diff --git a/.gitignore b/.gitignore index 5a66ff5..4f1959b 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,4 @@ _pycache_/ -yolov7/ -yolov5/ -ultralytics/ wandb/ .idea/ diff --git a/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/bug-report.yml b/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 0000000..371439b --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,85 @@ +name: 🐛 Bug Report +# title: " " +description: Problems with YOLOv8 +labels: [bug, triage] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv8 🐛 Bug Report! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/ultralytics/issues) to see if a similar bug report already exists. + options: + - label: > + I have searched the YOLOv8 [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar bug report. + required: true + + - type: dropdown + attributes: + label: YOLOv8 Component + description: | + Please select the part of YOLOv8 where you found the bug. + multiple: true + options: + - "Training" + - "Validation" + - "Detection" + - "Export" + - "PyTorch Hub" + - "Multi-GPU" + - "Evolution" + - "Integrations" + - "Other" + validations: + required: false + + - type: textarea + attributes: + label: Bug + description: Provide console output with error messages and/or screenshots of the bug. + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Environment + description: Please specify the software and hardware you used to produce the bug. + placeholder: | + - YOLO: Ultralytics YOLOv8.0.21 🚀 Python-3.8.10 torch-1.13.1+cu117 CUDA:0 (A100-SXM-80GB, 81251MiB) + - OS: Ubuntu 20.04 + - Python: 3.8.10 + validations: + required: false + + - type: textarea + attributes: + label: Minimal Reproducible Example + description: > + When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. + This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + placeholder: | + ``` + # Code to reproduce your issue here + ``` + validations: + required: false + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/ultralytics/pulls) (PR) to help improve YOLOv8 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv8 [Contributing Guide](https://github.com/ultralytics/ultralytics/blob/main/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/config.yml b/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..4504611 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,11 @@ +blank_issues_enabled: true +contact_links: + - name: 📄Docs + url: https://docs.ultralytics.com/ + about: Full Ultralytics YOLOv8 Documentation + - name: 💬 Forum + url: https://community.ultralytics.com/ + about: Ask on Ultralytics Community Forum + - name: Stack Overflow + url: https://stackoverflow.com/search?q=YOLOv8 + about: Ask on Stack Overflow with 'YOLOv8' tag diff --git a/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/feature-request.yml b/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 0000000..627ffb1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,50 @@ +name: 🚀 Feature Request +description: Suggest a YOLOv8 idea +# title: " " +labels: [enhancement] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv8 🚀 Feature Request! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/ultralytics/issues) to see if a similar feature request already exists. + options: + - label: > + I have searched the YOLOv8 [issues](https://github.com/ultralytics/ultralytics/issues) and found no similar feature requests. + required: true + + - type: textarea + attributes: + label: Description + description: A short description of your feature. + placeholder: | + What new feature would you like to see in YOLOv8? + validations: + required: true + + - type: textarea + attributes: + label: Use case + description: | + Describe the use case of your feature request. It will help us understand and prioritize the feature request. + placeholder: | + How would this feature be used, and who would use it? + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/ultralytics/pulls) (PR) to help improve YOLOv8 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv8 [Contributing Guide](https://github.com/ultralytics/ultralytics/blob/main/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/question.yml b/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/question.yml new file mode 100644 index 0000000..258e26b --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/ISSUE_TEMPLATE/question.yml @@ -0,0 +1,33 @@ +name: ❓ Question +description: Ask a YOLOv8 question +# title: " " +labels: [question] +body: + - type: markdown + attributes: + value: | + Thank you for asking a YOLOv8 ❓ Question! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/ultralytics/issues) and [discussions](https://github.com/ultralytics/ultralytics/discussions) to see if a similar question already exists. + options: + - label: > + I have searched the YOLOv8 [issues](https://github.com/ultralytics/ultralytics/issues) and [discussions](https://github.com/ultralytics/ultralytics/discussions) and found no similar questions. + required: true + + - type: textarea + attributes: + label: Question + description: What is your question? + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? diff --git a/src/train_utils/train_models/models/ultralytics/.github/dependabot.yml b/src/train_utils/train_models/models/ultralytics/.github/dependabot.yml new file mode 100644 index 0000000..ab54fc7 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/dependabot.yml @@ -0,0 +1,28 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + time: "04:00" + open-pull-requests-limit: 10 + reviewers: + - glenn-jocher + labels: + - dependencies + + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: weekly + time: "04:00" + open-pull-requests-limit: 5 + reviewers: + - glenn-jocher + labels: + - dependencies diff --git a/src/train_utils/train_models/models/ultralytics/.github/translate-readme.yml b/src/train_utils/train_models/models/ultralytics/.github/translate-readme.yml new file mode 100644 index 0000000..e3dbace --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/translate-readme.yml @@ -0,0 +1,26 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md + +name: Translate README + +on: + push: + branches: + - translate_readme # replace with 'main' to enable action + paths: + - README.md + +jobs: + Translate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version: 16 + # ISO Language Codes: https://cloud.google.com/translate/docs/languages + - name: Adding README - Chinese Simplified + uses: dephraiim/translate-readme@main + with: + LANG: zh-CN diff --git a/src/train_utils/train_models/models/ultralytics/.github/workflows/ci.yaml b/src/train_utils/train_models/models/ultralytics/.github/workflows/ci.yaml new file mode 100644 index 0000000..e236ff5 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/workflows/ci.yaml @@ -0,0 +1,179 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLO Continuous Integration (CI) GitHub Actions tests + +name: Ultralytics CI + +on: + push: + branches: [main] + pull_request: + branches: [main, updates] + schedule: + - cron: '0 0 * * *' # runs at 00:00 UTC every day + +jobs: + HUB: + if: github.repository == 'ultralytics/ultralytics' && (github.event_name == 'schedule' || github.event_name == 'push') + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: ['3.10'] + model: [yolov5n] + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' # caching pip dependencies + - name: Install requirements + shell: bash # for Windows compatibility + run: | + python -m pip install --upgrade pip wheel + pip install -e . --extra-index-url https://download.pytorch.org/whl/cpu + - name: Check environment + run: | + echo "RUNNER_OS is ${{ runner.os }}" + echo "GITHUB_EVENT_NAME is ${{ github.event_name }}" + echo "GITHUB_WORKFLOW is ${{ github.workflow }}" + echo "GITHUB_ACTOR is ${{ github.actor }}" + echo "GITHUB_REPOSITORY is ${{ github.repository }}" + echo "GITHUB_REPOSITORY_OWNER is ${{ github.repository_owner }}" + python --version + pip --version + pip list + - name: Test HUB training + shell: python + env: + APIKEY: ${{ secrets.ULTRALYTICS_HUB_APIKEY }} + run: | + import os + from ultralytics import hub + key = os.environ['APIKEY'] + hub.reset_model(key) + hub.start(key) + + Benchmarks: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: ['3.10'] + model: [yolov8n] + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' # caching pip dependencies + - name: Install requirements + shell: bash # for Windows compatibility + run: | + python -m pip install --upgrade pip wheel + if [ "${{ matrix.os }}" == "macos-latest" ]; then + pip install -e . coremltools openvino-dev tensorflow-macos --extra-index-url https://download.pytorch.org/whl/cpu + else + pip install -e . coremltools openvino-dev tensorflow-cpu --extra-index-url https://download.pytorch.org/whl/cpu + fi + yolo export format=tflite + - name: Check environment + run: | + echo "RUNNER_OS is ${{ runner.os }}" + echo "GITHUB_EVENT_NAME is ${{ github.event_name }}" + echo "GITHUB_WORKFLOW is ${{ github.workflow }}" + echo "GITHUB_ACTOR is ${{ github.actor }}" + echo "GITHUB_REPOSITORY is ${{ github.repository }}" + echo "GITHUB_REPOSITORY_OWNER is ${{ github.repository_owner }}" + python --version + pip --version + pip list + - name: Benchmark DetectionModel + shell: python + run: | + from ultralytics.yolo.utils.benchmarks import benchmark + benchmark(model='${{ matrix.model }}.pt', imgsz=160, half=False, hard_fail=0.20) + - name: Benchmark SegmentationModel + shell: python + run: | + from ultralytics.yolo.utils.benchmarks import benchmark + benchmark(model='${{ matrix.model }}-seg.pt', imgsz=160, half=False, hard_fail=0.14) + - name: Benchmark ClassificationModel + shell: python + run: | + from ultralytics.yolo.utils.benchmarks import benchmark + benchmark(model='${{ matrix.model }}-cls.pt', imgsz=160, half=False, hard_fail=0.61) + - name: Benchmark Summary + run: | + cat benchmarks.log + echo "$(cat benchmarks.log)" >> $GITHUB_STEP_SUMMARY + + Tests: + timeout-minutes: 60 + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest] + python-version: ['3.7', '3.8', '3.9', '3.10'] + model: [yolov8n] + torch: [latest] + include: + - os: ubuntu-latest + python-version: '3.8' # torch 1.7.0 requires python >=3.6, <=3.8 + model: yolov8n + torch: '1.8.0' # min torch version CI https://pypi.org/project/torchvision/ + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' # caching pip dependencies + - name: Install requirements + shell: bash # for Windows compatibility + run: | + python -m pip install --upgrade pip wheel + if [ "${{ matrix.torch }}" == "1.8.0" ]; then + pip install -e . torch==1.8.0 torchvision==0.9.0 pytest --extra-index-url https://download.pytorch.org/whl/cpu + else + pip install -e . pytest --extra-index-url https://download.pytorch.org/whl/cpu + fi + - name: Check environment + run: | + echo "RUNNER_OS is ${{ runner.os }}" + echo "GITHUB_EVENT_NAME is ${{ github.event_name }}" + echo "GITHUB_WORKFLOW is ${{ github.workflow }}" + echo "GITHUB_ACTOR is ${{ github.actor }}" + echo "GITHUB_REPOSITORY is ${{ github.repository }}" + echo "GITHUB_REPOSITORY_OWNER is ${{ github.repository_owner }}" + python --version + pip --version + pip list + - name: Test detection + shell: bash # for Windows compatibility + run: | + yolo task=detect mode=train data=coco8.yaml model=yolov8n.yaml epochs=1 imgsz=32 + yolo task=detect mode=train data=coco8.yaml model=yolov8n.pt epochs=1 imgsz=32 + yolo task=detect mode=val data=coco8.yaml model=runs/detect/train/weights/last.pt imgsz=32 + yolo task=detect mode=predict model=runs/detect/train/weights/last.pt imgsz=32 source=ultralytics/assets/bus.jpg + yolo mode=export model=runs/detect/train/weights/last.pt imgsz=32 format=torchscript + - name: Test segmentation + shell: bash # for Windows compatibility + run: | + yolo task=segment mode=train data=coco8-seg.yaml model=yolov8n-seg.yaml epochs=1 imgsz=32 + yolo task=segment mode=train data=coco8-seg.yaml model=yolov8n-seg.pt epochs=1 imgsz=32 + yolo task=segment mode=val data=coco8-seg.yaml model=runs/segment/train/weights/last.pt imgsz=32 + yolo task=segment mode=predict model=runs/segment/train/weights/last.pt imgsz=32 source=ultralytics/assets/bus.jpg + yolo mode=export model=runs/segment/train/weights/last.pt imgsz=32 format=torchscript + - name: Test classification + shell: bash # for Windows compatibility + run: | + yolo task=classify mode=train data=imagenet10 model=yolov8n-cls.yaml epochs=1 imgsz=32 + yolo task=classify mode=train data=imagenet10 model=yolov8n-cls.pt epochs=1 imgsz=32 + yolo task=classify mode=val data=imagenet10 model=runs/classify/train/weights/last.pt imgsz=32 + yolo task=classify mode=predict model=runs/classify/train/weights/last.pt imgsz=32 source=ultralytics/assets/bus.jpg + yolo mode=export model=runs/classify/train/weights/last.pt imgsz=32 format=torchscript + - name: Pytest tests + shell: bash # for Windows compatibility + run: pytest tests diff --git a/src/train_utils/train_models/models/ultralytics/.github/workflows/cla.yml b/src/train_utils/train_models/models/ultralytics/.github/workflows/cla.yml new file mode 100644 index 0000000..e6000a1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/workflows/cla.yml @@ -0,0 +1,37 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +name: CLA Assistant +on: + issue_comment: + types: + - created + pull_request_target: + types: + - reopened + - opened + - synchronize + +jobs: + CLA: + if: github.repository == 'ultralytics/ultralytics' + runs-on: ubuntu-latest + steps: + - name: CLA Assistant + if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I sign the CLA') || github.event_name == 'pull_request_target' + uses: contributor-assistant/github-action@v2.3.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # must be repository secret token + PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + with: + path-to-signatures: 'signatures/version1/cla.json' + path-to-document: 'https://github.com/ultralytics/assets/blob/main/documents/CLA.md' # CLA document + # branch should not be protected + branch: 'main' + allowlist: dependabot[bot],github-actions,[pre-commit*,pre-commit*,bot* + + remote-organization-name: ultralytics + remote-repository-name: cla + custom-pr-sign-comment: 'I have read the CLA Document and I sign the CLA' + custom-allsigned-prcomment: All Contributors have signed the CLA. ✅ + #custom-notsigned-prcomment: 'pull request comment with Introductory message to ask new contributors to sign' diff --git a/src/train_utils/train_models/models/ultralytics/.github/workflows/docker.yaml b/src/train_utils/train_models/models/ultralytics/.github/workflows/docker.yaml new file mode 100644 index 0000000..1d1d012 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/workflows/docker.yaml @@ -0,0 +1,57 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Builds ultralytics/ultralytics:latest images on DockerHub https://hub.docker.com/r/ultralytics + +name: Publish Docker Images + +on: + push: + branches: [main] + +jobs: + docker: + if: github.repository == 'ultralytics/ultralytics' + name: Push Docker image to Docker Hub + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v3 + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push arm64 image + uses: docker/build-push-action@v4 + continue-on-error: true + with: + context: . + platforms: linux/arm64 + file: docker/Dockerfile-arm64 + push: true + tags: ultralytics/ultralytics:latest-arm64 + + - name: Build and push CPU image + uses: docker/build-push-action@v4 + continue-on-error: true + with: + context: . + file: docker/Dockerfile-cpu + push: true + tags: ultralytics/ultralytics:latest-cpu + + - name: Build and push GPU image + uses: docker/build-push-action@v4 + continue-on-error: true + with: + context: . + file: docker/Dockerfile + push: true + tags: ultralytics/ultralytics:latest diff --git a/src/train_utils/train_models/models/ultralytics/.github/workflows/greetings.yml b/src/train_utils/train_models/models/ultralytics/.github/workflows/greetings.yml new file mode 100644 index 0000000..e3e2e06 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/workflows/greetings.yml @@ -0,0 +1,56 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +name: Greetings + +on: + pull_request_target: + types: [opened] + issues: + types: [opened] + +jobs: + greeting: + runs-on: ubuntu-latest + steps: + - uses: actions/first-interaction@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + pr-message: | + 👋 Hello @${{ github.actor }}, thank you for submitting a YOLOv8 🚀 PR! To allow your work to be integrated as seamlessly as possible, we advise you to: + + - ✅ Verify your PR is **up-to-date** with `ultralytics/ultralytics` `main` branch. If your PR is behind you can update your code by clicking the 'Update branch' button or by running `git pull` and `git merge main` locally. + - ✅ Verify all YOLOv8 Continuous Integration (CI) **checks are passing**. + - ✅ Update YOLOv8 [Docs](https://docs.ultralytics.com) for any new or updated features. + - ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee + + See our [Contributing Guide](https://github.com/ultralytics/ultralytics/blob/main/CONTRIBUTING.md) for details and let us know if you have any questions! + + issue-message: | + 👋 Hello @${{ github.actor }}, thank you for your interest in YOLOv8 🚀! We recommend a visit to the [YOLOv8 Docs](https://docs.ultralytics.com) for new users where you can find many [Python](https://docs.ultralytics.com/usage/python/) and [CLI](https://docs.ultralytics.com/usage/cli/) usage examples and where many of the most common questions may already be answered. + + If this is a 🐛 Bug Report, please provide a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us debug it. + + If this is a custom training ❓ Question, please provide as much information as possible, including dataset image examples and training logs, and verify you are following our [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results). + + ## Install + + Pip install the `ultralytics` package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) in a [**Python>=3.7**](https://www.python.org/) environment with [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). + + ```bash + pip install ultralytics + ``` + + ## Environments + + YOLOv8 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + + - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle + - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) + - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + ## Status + + Ultralytics CI + + If this badge is green, all [Ultralytics CI](https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml?query=event%3Aschedule) tests are currently passing. CI tests verify correct operation of all YOLOv8 [Modes](https://docs.ultralytics.com/modes/) and [Tasks](https://docs.ultralytics.com/tasks/) on macOS, Windows, and Ubuntu every 24 hours and on every commit. diff --git a/src/train_utils/train_models/models/ultralytics/.github/workflows/links.yml b/src/train_utils/train_models/models/ultralytics/.github/workflows/links.yml new file mode 100644 index 0000000..2baea63 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/workflows/links.yml @@ -0,0 +1,38 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLO Continuous Integration (CI) GitHub Actions tests + +name: Check Broken links + +on: + push: + branches: [main] + pull_request: + branches: [main] + workflow_dispatch: + schedule: + - cron: '0 0 * * *' # runs at 00:00 UTC every day + +jobs: + Links: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Test Markdown and HTML links + uses: lycheeverse/lychee-action@v1.6.1 + with: + fail: true + # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) + args: --accept 429,999 --exclude-loopback --exclude twitter.com --verbose --no-progress './**/*.md' './**/*.html' + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + + - name: Test Markdown, HTML, YAML, Python and Notebook links + if: github.event_name == 'workflow_dispatch' + uses: lycheeverse/lychee-action@v1.6.1 + with: + fail: true + # accept 429(Instagram, 'too many requests'), 999(LinkedIn, 'unknown status code'), Timeout(Twitter) + args: --accept 429,999 --exclude-loopback --exclude twitter.com,url.com --verbose --no-progress './**/*.md' './**/*.html' './**/*.yml' './**/*.yaml' './**/*.py' './**/*.ipynb' + env: + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/src/train_utils/train_models/models/ultralytics/.github/workflows/publish.yml b/src/train_utils/train_models/models/ultralytics/.github/workflows/publish.yml new file mode 100644 index 0000000..15c13bf --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/workflows/publish.yml @@ -0,0 +1,69 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Publish pip package to PyPI https://pypi.org/project/ultralytics/ and Docs to https://docs.ultralytics.com + +name: Publish to PyPI and Deploy Docs + +on: + push: + branches: [main] + workflow_dispatch: + inputs: + pypi: + type: boolean + description: Publish to PyPI + docs: + type: boolean + description: Deploy Docs + +jobs: + publish: + if: github.repository == 'ultralytics/ultralytics' && github.actor == 'glenn-jocher' + name: Publish + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + - name: Set up Python environment + uses: actions/setup-python@v4 + with: + python-version: '3.10' + cache: 'pip' # caching pip dependencies + - name: Install dependencies + run: | + python -m pip install --upgrade pip wheel build twine + pip install -e '.[dev]' --extra-index-url https://download.pytorch.org/whl/cpu + - name: Check PyPI version + shell: python + run: | + import os + import pkg_resources as pkg + import ultralytics + from ultralytics.yolo.utils.checks import check_latest_pypi_version + + v_local = pkg.parse_version(ultralytics.__version__).release + v_pypi = pkg.parse_version(check_latest_pypi_version()).release + print(f'Local version is {v_local}') + print(f'PyPI version is {v_pypi}') + d = [a - b for a, b in zip(v_local, v_pypi)] # diff + increment = (d[0] == d[1] == 0) and d[2] == 1 # only publish if patch version increments by 1 + os.system(f'echo "increment={increment}" >> $GITHUB_OUTPUT') + if increment: + print('Local version is higher than PyPI version. Publishing new version to PyPI ✅.') + id: check_pypi + - name: Publish to PyPI + continue-on-error: true + if: (github.event_name == 'push' || github.event.inputs.pypi == 'true') && steps.check_pypi.outputs.increment == 'True' + env: + PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} + run: | + python -m build + python -m twine upload dist/* -u __token__ -p $PYPI_TOKEN + - name: Deploy Docs + continue-on-error: true + if: (github.event_name == 'push' && steps.check_pypi.outputs.increment == 'True') || github.event.inputs.docs == 'true' + env: + PERSONAL_ACCESS_TOKEN: ${{ secrets.PERSONAL_ACCESS_TOKEN }} + run: | + mkdocs gh-deploy --force || true + # git checkout gh-pages + # git push https://$PERSONAL_ACCESS_TOKEN@github.com/ultralytics/docs gh-pages --force diff --git a/src/train_utils/train_models/models/ultralytics/.github/workflows/stale.yml b/src/train_utils/train_models/models/ultralytics/.github/workflows/stale.yml new file mode 100644 index 0000000..82c881c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.github/workflows/stale.yml @@ -0,0 +1,47 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +name: Close stale issues +on: + schedule: + - cron: '0 0 * * *' # Runs at 00:00 UTC every day + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v7 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + + stale-issue-message: | + 👋 Hello there! We wanted to give you a friendly reminder that this issue has not had any recent activity and may be closed soon, but don't worry - you can always reopen it if needed. If you still have any questions or concerns, please feel free to let us know how we can help. + + For additional resources and information, please see the links below: + + - **Docs**: https://docs.ultralytics.com + - **HUB**: https://hub.ultralytics.com + - **Community**: https://community.ultralytics.com + + Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! + + Thank you for your contributions to YOLO 🚀 and Vision AI ⭐ + + stale-pr-message: | + 👋 Hello there! We wanted to let you know that we've decided to close this pull request due to inactivity. We appreciate the effort you put into contributing to our project, but unfortunately, not all contributions are suitable or aligned with our product roadmap. + + We hope you understand our decision, and please don't let it discourage you from contributing to open source projects in the future. We value all of our community members and their contributions, and we encourage you to keep exploring new projects and ways to get involved. + + For additional resources and information, please see the links below: + + - **Docs**: https://docs.ultralytics.com + - **HUB**: https://hub.ultralytics.com + - **Community**: https://community.ultralytics.com + + Thank you for your contributions to YOLO 🚀 and Vision AI ⭐ + + days-before-issue-stale: 30 + days-before-issue-close: 10 + days-before-pr-stale: 90 + days-before-pr-close: 30 + exempt-issue-labels: 'documentation,tutorial,TODO' + operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting. diff --git a/src/train_utils/train_models/models/ultralytics/.gitignore b/src/train_utils/train_models/models/ultralytics/.gitignore new file mode 100644 index 0000000..e61f79a --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.gitignore @@ -0,0 +1,155 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# Profiling +*.pclprof + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# datasets and projects +datasets/ +runs/ +wandb/ + +.DS_Store + +# Neural Network weights ----------------------------------------------------------------------------------------------- +weights/ +*.weights +*.pt +*.pb +*.onnx +*.engine +*.mlmodel +*.torchscript +*.tflite +*.h5 +*_saved_model/ +*_web_model/ +*_openvino_model/ +*_paddle_model/ diff --git a/src/train_utils/train_models/models/ultralytics/.pre-commit-config.yaml b/src/train_utils/train_models/models/ultralytics/.pre-commit-config.yaml new file mode 100644 index 0000000..54a733a --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/.pre-commit-config.yaml @@ -0,0 +1,73 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md + +exclude: 'docs/' +# Define bot property if installed via https://github.com/marketplace/pre-commit-ci +ci: + autofix_prs: true + autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' + autoupdate_schedule: monthly + # submodules: true + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-case-conflict + - id: check-yaml + - id: check-docstring-first + - id: double-quote-string-fixer + - id: detect-private-key + + - repo: https://github.com/asottile/pyupgrade + rev: v3.3.1 + hooks: + - id: pyupgrade + name: Upgrade code + + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + name: Sort imports + + - repo: https://github.com/google/yapf + rev: v0.32.0 + hooks: + - id: yapf + name: YAPF formatting + + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.16 + hooks: + - id: mdformat + name: MD formatting + additional_dependencies: + - mdformat-gfm + - mdformat-black + # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" + + - repo: https://github.com/PyCQA/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + name: PEP8 + + - repo: https://github.com/codespell-project/codespell + rev: v2.2.2 + hooks: + - id: codespell + args: + - --ignore-words-list=crate,nd,strack,dota + +# - repo: https://github.com/asottile/yesqa +# rev: v1.4.0 +# hooks: +# - id: yesqa + +# - repo: https://github.com/asottile/dead +# rev: v1.5.0 +# hooks: +# - id: dead diff --git a/src/train_utils/train_models/models/ultralytics/CITATION.cff b/src/train_utils/train_models/models/ultralytics/CITATION.cff new file mode 100644 index 0000000..fd26c57 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/CITATION.cff @@ -0,0 +1,20 @@ +cff-version: 1.2.0 +preferred-citation: + type: software + message: If you use this software, please cite it as below. + authors: + - family-names: Jocher + given-names: Glenn + orcid: "https://orcid.org/0000-0001-5950-6979" + - family-names: Chaurasia + given-names: Ayush + orcid: "https://orcid.org/0000-0002-7603-6750" + - family-names: Qiu + given-names: Jing + orcid: "https://orcid.org/0000-0003-3783-7069" + title: "YOLO by Ultralytics" + version: 8.0.0 + # doi: 10.5281/zenodo.3908559 # TODO + date-released: 2023-1-10 + license: GPL-3.0 + url: "https://github.com/ultralytics/ultralytics" diff --git a/src/train_utils/train_models/models/ultralytics/CONTRIBUTING.md b/src/train_utils/train_models/models/ultralytics/CONTRIBUTING.md new file mode 100644 index 0000000..dd05788 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/CONTRIBUTING.md @@ -0,0 +1,115 @@ +## Contributing to YOLOv8 🚀 + +We love your input! We want to make contributing to YOLOv8 as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing a new feature +- Becoming a maintainer + +YOLOv8 works so well due to our combined community effort, and for every small improvement you contribute you will be +helping push the frontiers of what's possible in AI 😃! + +## Submitting a Pull Request (PR) 🛠️ + +Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps: + +### 1. Select File to Update + +Select `requirements.txt` to update by clicking on it in GitHub. + +

PR_step1

+ +### 2. Click 'Edit this file' + +Button is in top-right corner. + +

PR_step2

+ +### 3. Make Changes + +Change `matplotlib` version from `3.2.2` to `3.3`. + +

PR_step3

+ +### 4. Preview Changes and Submit PR + +Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** +for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose +changes** button. All done, your PR is now submitted to YOLOv8 for review and approval 😃! + +

PR_step4

+ +### PR recommendations + +To allow your work to be integrated as seamlessly as possible, we advise you to: + +- ✅ Verify your PR is **up-to-date** with `ultralytics/ultralytics` `main` branch. If your PR is behind you can update + your code by clicking the 'Update branch' button or by running `git pull` and `git merge main` locally. + +

Screenshot 2022-08-29 at 22 47 15

+ +- ✅ Verify all YOLOv8 Continuous Integration (CI) **checks are passing**. + +

Screenshot 2022-08-29 at 22 47 03

+ +- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase + but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee + +### Docstrings + +Not all functions or classes require docstrings but when they do, we +follow [google-style docstrings format](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings). +Here is an example: + +```python +""" + What the function does. Performs NMS on given detection predictions. + + Args: + arg1: The description of the 1st argument + arg2: The description of the 2nd argument + + Returns: + What the function returns. Empty if nothing is returned. + + Raises: + Exception Class: When and why this exception can be raised by the function. +""" +``` + +## Submitting a Bug Report 🐛 + +If you spot a problem with YOLOv8 please submit a Bug Report! + +For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few +short guidelines below to help users provide what we need in order to get started. + +When asking a question, people will be better able to provide help if you provide **code** that they can easily +understand and use to **reproduce** the problem. This is referred to by community members as creating +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +the problem should be: + +- ✅ **Minimal** – Use as little code as possible that still produces the same problem +- ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself +- ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem + +In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code +should be: + +- ✅ **Current** – Verify that your code is up-to-date with current + GitHub [main](https://github.com/ultralytics/ultralytics/tree/main) branch, and if necessary `git pull` or `git clone` + a new copy to ensure your problem has not already been resolved by previous commits. +- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this + repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. + +If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 +**Bug Report** [template](https://github.com/ultralytics/ultralytics/issues/new/choose) and providing +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +understand and diagnose your problem. + +## License + +By contributing, you agree that your contributions will be licensed under +the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) diff --git a/src/train_utils/train_models/models/ultralytics/LICENSE b/src/train_utils/train_models/models/ultralytics/LICENSE new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/LICENSE @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/train_utils/train_models/models/ultralytics/MANIFEST.in b/src/train_utils/train_models/models/ultralytics/MANIFEST.in new file mode 100644 index 0000000..def1ad3 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/MANIFEST.in @@ -0,0 +1,8 @@ +include *.md +include requirements.txt +include LICENSE +include setup.py +include ultralytics/assets/bus.jpg +include ultralytics/assets/zidane.jpg +recursive-include ultralytics *.yaml +recursive-exclude __pycache__ * diff --git a/src/train_utils/train_models/models/ultralytics/README.md b/src/train_utils/train_models/models/ultralytics/README.md new file mode 100644 index 0000000..c1683e4 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/README.md @@ -0,0 +1,259 @@ +
+

+ + +

+ +[English](README.md) | [简体中文](README.zh-CN.md) +
+ +
+ Ultralytics CI + YOLOv8 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+
+ +[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics), developed by [Ultralytics](https://ultralytics.com), +is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces +new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and +easy to use, making it an excellent choice for a wide range of object detection, image segmentation and image +classification tasks. + +To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license). + + + +
+ + + + + + + + + + + + + + + + + +
+
+ +##
Documentation
+ +See below for a quickstart installation and usage example, and see the [YOLOv8 Docs](https://docs.ultralytics.com) for +full documentation on training, validation, prediction and deployment. + +
+Install + +Pip install the ultralytics package including +all [requirements](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) in a +[**Python>=3.7**](https://www.python.org/) environment with +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). + +```bash +pip install ultralytics +``` + +
+ +
+Usage + +#### CLI + +YOLOv8 may be used directly in the Command Line Interface (CLI) with a `yolo` command: + +```bash +yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' +``` + +`yolo` can be used for a variety of tasks and modes and accepts additional arguments, i.e. `imgsz=640`. See the YOLOv8 +[CLI Docs](https://docs.ultralytics.com/usage/cli) for examples. + +#### Python + +YOLOv8 may also be used directly in a Python environment, and accepts the +same [arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above: + +```python +from ultralytics import YOLO + +# Load a model +model = YOLO("yolov8n.yaml") # build a new model from scratch +model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + +# Use the model +model.train(data="coco128.yaml", epochs=3) # train the model +metrics = model.val() # evaluate model performance on the validation set +results = model("https://ultralytics.com/images/bus.jpg") # predict on an image +success = model.export(format="onnx") # export the model to ONNX format +``` + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases). See +YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python) for more examples. + +
+ +##
Models
+ +All YOLOv8 pretrained models are available here. Detect, Segment and Pose models are pretrained on the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify models are pretrained on the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. + +
Detection + +See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examples with these models. + +| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | +| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | +| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | +| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | +| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | + +- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset. +
Reproduce by `yolo val detect data=coco.yaml device=0` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val detect data=coco128.yaml batch=1 device=0|cpu` + +
+ +
Segmentation + +See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage examples with these models. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | +| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | +| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | +| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | +| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | + +- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset. +
Reproduce by `yolo val segment data=coco.yaml device=0` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val segment data=coco128-seg.yaml batch=1 device=0|cpu` + +
+ +
Classification + +See [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usage examples with these models. + +| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 | +| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | +| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 | +| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 | +| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 | +| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 | +| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 | + +- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set. +
Reproduce by `yolo val classify data=path/to/ImageNet device=0` +- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` + +
+ +##
Integrations
+ +
+ + +
+
+ +
+ + + + + + + + + + + +
+ +| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | +| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Label and export your custom datasets directly to YOLOv8 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv8 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv8 models, resume training, and interactively visualize and debug predictions | Run YOLOv8 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | + +##
Ultralytics HUB
+ +Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data +visualization, YOLOv5 and YOLOv8 (coming soon) 🚀 model training and deployment, without any coding. Transform images +into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and +user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! + + + + +##
Contribute
+ +We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see +our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out +our [Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback +on your experience. Thank you 🙏 to all our contributors! + + + + + + +##
License
+ +YOLOv8 is available under two different licenses: + +- **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for details. +- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source + requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and + applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). + +##
Contact
+ +For YOLOv8 bug reports and feature requests please +visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues) or +the [Ultralytics Community Forum](https://community.ultralytics.com/). + +
+
+ + + + + + + + + + + + + + + + + +
diff --git a/src/train_utils/train_models/models/ultralytics/README.zh-CN.md b/src/train_utils/train_models/models/ultralytics/README.zh-CN.md new file mode 100644 index 0000000..81e568c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/README.zh-CN.md @@ -0,0 +1,244 @@ +
+

+ + +

+ +[English](README.md) | [简体中文](README.zh-CN.md) +
+ +
+ Ultralytics CI + YOLOv8 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+
+ +[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) 是由 [Ultralytics](https://ultralytics.com) 开发的一个前沿的 +SOTA 模型。它在以前成功的 YOLO 版本基础上,引入了新的功能和改进,进一步提升了其性能和灵活性。YOLOv8 +基于快速、准确和易于使用的设计理念,使其成为广泛的目标检测、图像分割和图像分类任务的绝佳选择。 + +如果要申请企业许可证,请填写 [Ultralytics 许可](https://ultralytics.com/license)。 + +
+ + + + + + + + + + + + + + + + + +
+
+ +##
文档
+ +有关训练、测试和部署的完整文档见[YOLOv8 Docs](https://docs.ultralytics.com)。请参阅下面的快速入门示例。 + +
+安装 + +Pip 安装包含所有 [requirements](https://github.com/ultralytics/ultralytics/blob/main/requirements.txt) 的 +ultralytics 包,环境要求 [**Python>=3.7**](https://www.python.org/),且 [\*\*PyTorch>=1.7 +\*\*](https://pytorch.org/get-started/locally/)。 + +```bash +pip install ultralytics +``` + +
+ +
+使用方法 + +YOLOv8 可以直接在命令行界面(CLI)中使用 `yolo` 命令运行: + +```bash +yolo predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' +``` + +`yolo`可以用于各种任务和模式,并接受额外的参数,例如 `imgsz=640`。参见 YOLOv8 [文档](https://docs.ultralytics.com) +中可用`yolo`[参数](https://docs.ultralytics.com/usage/cfg/)的完整列表。 + +```bash +yolo task=detect mode=train model=yolov8n.pt args... + classify predict yolov8n-cls.yaml args... + segment val yolov8n-seg.yaml args... + export yolov8n.pt format=onnx args... +``` + +YOLOv8 也可以在 Python 环境中直接使用,并接受与上面 CLI 例子中相同的[参数](https://docs.ultralytics.com/usage/cfg/): + +```python +from ultralytics import YOLO + +# 加载模型 +model = YOLO("yolov8n.yaml") # 从头开始构建新模型 +model = YOLO("yolov8n.pt") # 加载预训练模型(推荐用于训练) + +# Use the model +results = model.train(data="coco128.yaml", epochs=3) # 训练模型 +results = model.val() # 在验证集上评估模型性能 +results = model("https://ultralytics.com/images/bus.jpg") # 预测图像 +success = model.export(format="onnx") # 将模型导出为 ONNX 格式 +``` + +[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会从 +Ultralytics [发布页](https://github.com/ultralytics/ultralytics/releases) 自动下载。 + +
+ +##
模型
+ +所有 YOLOv8 的预训练模型都可以在这里找到。目标检测和分割模型是在 COCO 数据集上预训练的,而分类模型是在 ImageNet 数据集上预训练的。 + +第一次使用时,[模型](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) 会从 +Ultralytics [发布页](https://github.com/ultralytics/ultralytics/releases) 自动下载。 + +
目标检测 + +| 模型 | 尺寸
(像素) | mAPval
50-95 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) | +| ------------------------------------------------------------------------------------ | --------------- | -------------------- | ----------------------------- | ---------------------------------- | --------------- | ----------------- | +| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | +| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | +| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | +| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | +| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | + +- **mAPval** 结果都在 [COCO val2017](http://cocodataset.org) 数据集上,使用单模型单尺度测试得到。 +
复现命令 `yolo val detect data=coco.yaml device=0` +- **推理速度**使用 COCO + 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。 +
复现命令 `yolo val detect data=coco128.yaml batch=1 device=0|cpu` + +
+ +
实例分割 + +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) | +| -------------------------------------------------------------------------------------------- | --------------- | -------------------- | --------------------- | ----------------------------- | ---------------------------------- | --------------- | ----------------- | +| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | +| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | +| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | +| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | +| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | + +- **mAPval** 结果都在 [COCO val2017](http://cocodataset.org) 数据集上,使用单模型单尺度测试得到。 +
复现命令 `yolo val segment data=coco.yaml device=0` +- **推理速度**使用 COCO + 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。 +
复现命令 `yolo val segment data=coco128-seg.yaml batch=1 device=0|cpu` + +
+ +
分类 + +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 推理速度
CPU ONNX
(ms) | 推理速度
A100 TensorRT
(ms) | 参数量
(M) | FLOPs
(B) at 640 | +| -------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ----------------------------- | ---------------------------------- | --------------- | ------------------------ | +| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 | +| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 | +| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 | +| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 | +| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 | + +- **acc** 都在 [ImageNet](https://www.image-net.org/) 数据集上,使用单模型单尺度测试得到。 +
复现命令 `yolo val classify data=path/to/ImageNet device=0` +- **推理速度**使用 ImageNet + 验证集图片推理时间进行平均得到,测试环境使用 [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) 实例。 +
复现命令 `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` + +
+ +##
模块集成
+ +
+ + +
+
+ +
+ + + + + + + + + + + +
+ +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | +| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :--------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | +| 将您的自定义数据集进行标注并直接导出到 YOLOv8 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv8 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv8 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv8 推理的速度最高可提高6倍 | + +##
Ultralytics HUB
+ +[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们⭐ **新**的无代码解决方案,用于可视化数据集,训练 YOLOv8🚀 +模型,并以无缝体验方式部署到现实世界。现在开始**免费**! +还可以通过下载 [Ultralytics App](https://ultralytics.com/app_install) 在你的 iOS 或 Android 设备上运行 YOLOv8 模型! + + + + +##
贡献
+ +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv8 做出贡献。请看我们的 [贡献指南](CONTRIBUTING.md) +,并填写 [调查问卷](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) +向我们发送您的体验反馈。感谢我们所有的贡献者! + + + + + + +##
License
+ +YOLOv8 在两种不同的 License 下可用: + +- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) 文件的详细信息。 +- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI + 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 + +##
联系我们
+ +请访问 [GitHub Issues](https://github.com/ultralytics/ultralytics/issues) +或 [Ultralytics Community Forum](https://community.ultralytics.com) 以报告 YOLOv8 错误和请求功能。 + +
+
+ + + + + + + + + + + + + + + + + +
diff --git a/src/train_utils/train_models/models/ultralytics/docker/Dockerfile b/src/train_utils/train_models/models/ultralytics/docker/Dockerfile new file mode 100644 index 0000000..0db9152 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docker/Dockerfile @@ -0,0 +1,71 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Builds ultralytics/ultralytics:latest image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics +# Image is CUDA-optimized for YOLOv8 single/multi-GPU training and inference + +# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch +FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++ +# RUN alias python=python3 + +# Security updates +# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 +RUN apt upgrade --no-install-recommends -y openssl tar + +# Create working directory +RUN mkdir -p /usr/src/ultralytics +WORKDIR /usr/src/ultralytics + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics + +# Install pip packages +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache '.[export]' albumentations comet gsutil notebook + +# Set environment variables +ENV OMP_NUM_THREADS=1 + +# Cleanup +ENV DEBIAN_FRONTEND teletype + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/ultralytics:latest && sudo docker build -f docker/Dockerfile -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t + +# Pull and Run with local directory access +# t=ultralytics/ultralytics:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t + +# Kill all +# sudo docker kill $(sudo docker ps -q) + +# Kill all image-based +# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/ultralytics:latest) + +# DockerHub tag update +# t=ultralytics/ultralytics:latest tnew=ultralytics/ultralytics:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew + +# Clean up +# sudo docker system prune -a --volumes + +# Update Ubuntu drivers +# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ + +# DDP test +# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 + +# GCP VM from Image +# docker.io/ultralytics/ultralytics:latest diff --git a/src/train_utils/train_models/models/ultralytics/docker/Dockerfile-arm64 b/src/train_utils/train_models/models/ultralytics/docker/Dockerfile-arm64 new file mode 100644 index 0000000..82ff4a9 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docker/Dockerfile-arm64 @@ -0,0 +1,40 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Builds ultralytics/ultralytics:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics +# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi + +# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu +FROM arm64v8/ubuntu:rolling + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev +# RUN alias python=python3 + +# Create working directory +RUN mkdir -p /usr/src/ultralytics +WORKDIR /usr/src/ultralytics + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics + +# Install pip packages +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache . albumentations gsutil notebook + +# Cleanup +ENV DEBIAN_FRONTEND teletype + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/ultralytics:latest-arm64 && sudo docker build --platform linux/arm64 -f docker/Dockerfile-arm64 -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/ultralytics:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/src/train_utils/train_models/models/ultralytics/docker/Dockerfile-cpu b/src/train_utils/train_models/models/ultralytics/docker/Dockerfile-cpu new file mode 100644 index 0000000..364aaf4 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docker/Dockerfile-cpu @@ -0,0 +1,41 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Builds ultralytics/ultralytics:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/ultralytics +# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv8 deployments + +# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu +FROM ubuntu:rolling + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg g++ +# RUN alias python=python3 + +# Create working directory +RUN mkdir -p /usr/src/ultralytics +WORKDIR /usr/src/ultralytics + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/ultralytics /usr/src/ultralytics + +# Install pip packages +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache '.[export]' albumentations gsutil notebook \ + --extra-index-url https://download.pytorch.org/whl/cpu + +# Cleanup +ENV DEBIAN_FRONTEND teletype + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/ultralytics:latest-cpu && sudo docker build -f docker/Dockerfile-cpu -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/ultralytics:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/src/train_utils/train_models/models/ultralytics/docs/CNAME b/src/train_utils/train_models/models/ultralytics/docs/CNAME new file mode 100644 index 0000000..773aac8 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/CNAME @@ -0,0 +1 @@ +docs.ultralytics.com \ No newline at end of file diff --git a/src/train_utils/train_models/models/ultralytics/docs/README.md b/src/train_utils/train_models/models/ultralytics/docs/README.md new file mode 100644 index 0000000..3b3e306 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/README.md @@ -0,0 +1,85 @@ +# Ultralytics Docs + +Ultralytics Docs are deployed to [https://docs.ultralytics.com](https://docs.ultralytics.com). + +### Install Ultralytics package + +To install the ultralytics package in developer mode, you will need to have Git and Python 3 installed on your system. +Then, follow these steps: + +1. Clone the ultralytics repository to your local machine using Git: + +```bash +git clone https://github.com/ultralytics/ultralytics.git +``` + +2. Navigate to the root directory of the repository: + +```bash +cd ultralytics +``` + +3. Install the package in developer mode using pip: + +```bash +pip install -e '.[dev]' +``` + +This will install the ultralytics package and its dependencies in developer mode, allowing you to make changes to the +package code and have them reflected immediately in your Python environment. + +Note that you may need to use the pip3 command instead of pip if you have multiple versions of Python installed on your +system. + +### Building and Serving Locally + +The `mkdocs serve` command is used to build and serve a local version of the MkDocs documentation site. It is typically +used during the development and testing phase of a documentation project. + +```bash +mkdocs serve +``` + +Here is a breakdown of what this command does: + +- `mkdocs`: This is the command-line interface (CLI) for the MkDocs static site generator. It is used to build and serve + MkDocs sites. +- `serve`: This is a subcommand of the `mkdocs` CLI that tells it to build and serve the documentation site locally. +- `-a`: This flag specifies the hostname and port number to bind the server to. The default value is `localhost:8000`. +- `-t`: This flag specifies the theme to use for the documentation site. The default value is `mkdocs`. +- `-s`: This flag tells the `serve` command to serve the site in silent mode, which means it will not display any log + messages or progress updates. + When you run the `mkdocs serve` command, it will build the documentation site using the files in the `docs/` directory + and serve it at the specified hostname and port number. You can then view the site by going to the URL in your web + browser. + +While the site is being served, you can make changes to the documentation files and see them reflected in the live site +immediately. This is useful for testing and debugging your documentation before deploying it to a live server. + +To stop the serve command and terminate the local server, you can use the `CTRL+C` keyboard shortcut. + +### Deploying Your Documentation Site + +To deploy your MkDocs documentation site, you will need to choose a hosting provider and a deployment method. Some +popular options include GitHub Pages, GitLab Pages, and Amazon S3. + +Before you can deploy your site, you will need to configure your `mkdocs.yml` file to specify the remote host and any +other necessary deployment settings. + +Once you have configured your `mkdocs.yml` file, you can use the `mkdocs deploy` command to build and deploy your site. +This command will build the documentation site using the files in the `docs/` directory and the specified configuration +file and theme, and then deploy the site to the specified remote host. + +For example, to deploy your site to GitHub Pages using the gh-deploy plugin, you can use the following command: + +```bash +mkdocs gh-deploy +``` + +If you are using GitHub Pages, you can set a custom domain for your documentation site by going to the "Settings" page +for your repository and updating the "Custom domain" field in the "GitHub Pages" section. + +![196814117-fc16e711-d2be-4722-9536-b7c6d78fd167](https://user-images.githubusercontent.com/26833433/210150206-9e86dcd7-10af-43e4-9eb2-9518b3799eac.png) + +For more information on deploying your MkDocs documentation site, see +the [MkDocs documentation](https://www.mkdocs.org/user-guide/deploying-your-docs/). diff --git a/src/train_utils/train_models/models/ultralytics/docs/SECURITY.md b/src/train_utils/train_models/models/ultralytics/docs/SECURITY.md new file mode 100644 index 0000000..c00e145 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/SECURITY.md @@ -0,0 +1,26 @@ +At [Ultralytics](https://ultralytics.com), the security of our users' data and systems is of utmost importance. To +ensure the safety and security of our [open-source projects](https://github.com/ultralytics), we have implemented +several measures to detect and prevent security vulnerabilities. + +[![ultralytics](https://snyk.io/advisor/python/ultralytics/badge.svg)](https://snyk.io/advisor/python/ultralytics) + +## Snyk Scanning + +We use [Snyk](https://snyk.io/advisor/python/ultralytics) to regularly scan the YOLOv8 repository for vulnerabilities +and security issues. Our goal is to identify and remediate any potential threats as soon as possible, to minimize any +risks to our users. + +## GitHub CodeQL Scanning + +In addition to our Snyk scans, we also use +GitHub's [CodeQL](https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/about-code-scanning-with-codeql) +scans to proactively identify and address security vulnerabilities. + +## Reporting Security Issues + +If you suspect or discover a security vulnerability in the YOLOv8 repository, please let us know immediately. You can +reach out to us directly via our [contact form](https://ultralytics.com/contact) or +via [security@ultralytics.com](mailto:security@ultralytics.com). Our security team will investigate and respond as soon +as possible. + +We appreciate your help in keeping the YOLOv8 repository secure and safe for everyone. diff --git a/src/train_utils/train_models/models/ultralytics/docs/app.md b/src/train_utils/train_models/models/ultralytics/docs/app.md new file mode 100644 index 0000000..8aaf686 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/app.md @@ -0,0 +1,48 @@ +# Ultralytics HUB App for YOLOv8 + + + +
+
+ + + + + + + + + + + + + + + + + +
+
+ +   + + +
+ +Welcome to the Ultralytics HUB app, which is designed to demonstrate the power and capabilities of the YOLOv5 and YOLOv8 +models. This app is available for download on +the [Apple App Store](https://apps.apple.com/xk/app/ultralytics/id1583935240) and +the [Google Play Store](https://play.google.com/store/apps/details?id=com.ultralytics.ultralytics_app). + +**To install the app, simply scan the QR code provided above**. At the moment, the app features YOLOv5 models, with +YOLOv8 models set to be available soon. + +With the YOLOv5 model, you can easily detect and classify objects in images and videos with high accuracy and speed. The +model has been trained on a vast dataset and can recognize a wide range of objects, including pedestrians, traffic +signs, and cars. + +Using this app, you can try out YOLOv5 on your images and videos, and observe how the model works in real-time. +Additionally, you can learn more about YOLOv5's functionality and how it can be integrated into real-world applications. + +We are confident that you will enjoy using YOLOv5 and be amazed at its capabilities. Thank you for choosing Ultralytics +for your AI solutions. \ No newline at end of file diff --git a/src/train_utils/train_models/models/ultralytics/docs/assets/favicon.ico b/src/train_utils/train_models/models/ultralytics/docs/assets/favicon.ico new file mode 100644 index 0000000..b71e7ec Binary files /dev/null and b/src/train_utils/train_models/models/ultralytics/docs/assets/favicon.ico differ diff --git a/src/train_utils/train_models/models/ultralytics/docs/hub.md b/src/train_utils/train_models/models/ultralytics/docs/hub.md new file mode 100644 index 0000000..199fa63 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/hub.md @@ -0,0 +1,112 @@ +# Ultralytics HUB + + + +
+
+ + + + + + + + + + + + + + + + + +
+
+ + CI CPU + + Open In Colab +
+ + +[Ultralytics HUB](https://hub.ultralytics.com) is a new no-code online tool developed +by [Ultralytics](https://ultralytics.com), the creators of the popular [YOLOv5](https://github.com/ultralytics/yolov5) +object detection and image segmentation models. With Ultralytics HUB, users can easily train and deploy YOLO models +without any coding or technical expertise. + +Ultralytics HUB is designed to be user-friendly and intuitive, with a drag-and-drop interface that allows users to +easily upload their data and select their model configurations. It also offers a range of pre-trained models and +templates to choose from, making it easy for users to get started with training their own models. Once a model is +trained, it can be easily deployed and used for real-time object detection and image segmentation tasks. Overall, +Ultralytics HUB is an essential tool for anyone looking to use YOLO for their object detection and image segmentation +projects. + +**[Get started now](https://hub.ultralytics.com)** and experience the power and simplicity of Ultralytics HUB for +yourself. Sign up for a free account and start building, training, and deploying YOLOv5 and YOLOv8 models today. + +## 1. Upload a Dataset + +Ultralytics HUB datasets are just like YOLOv5 🚀 datasets, they use the same structure and the same label formats to keep +everything simple. + +When you upload a dataset to Ultralytics HUB, make sure to **place your dataset YAML inside the dataset root directory** +as in the example shown below, and then zip for upload to https://hub.ultralytics.com/. Your **dataset YAML, directory +and zip** should all share the same name. For example, if your dataset is called 'coco6' as in our +example [ultralytics/hub/coco6.zip](https://github.com/ultralytics/hub/blob/master/coco6.zip), then you should have a +coco6.yaml inside your coco6/ directory, which should zip to create coco6.zip for upload: + +```bash +zip -r coco6.zip coco6 +``` + +The example [coco6.zip](https://github.com/ultralytics/hub/blob/master/coco6.zip) dataset in this repository can be +downloaded and unzipped to see exactly how to structure your custom dataset. + +

+ +

+ +The dataset YAML is the same standard YOLOv5 YAML format. See +the [YOLOv5 Train Custom Data tutorial](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data) for full details. + +```yaml +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: # dataset root dir (leave empty for HUB) +train: images/train # train images (relative to 'path') 8 images +val: images/val # val images (relative to 'path') 8 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + ... +``` + +After zipping your dataset, sign in to [Ultralytics HUB](https://bit.ly/ultralytics_hub) and click the Datasets tab. +Click 'Upload Dataset' to upload, scan and visualize your new dataset before training new YOLOv5 models on it! + +HUB Dataset Upload + +## 2. Train a Model + +Connect to the Ultralytics HUB notebook and use your model API key to begin training! + + +Open In Colab + +## 3. Deploy to Real World + +Export your model to 13 different formats, including TensorFlow, ONNX, OpenVINO, CoreML, Paddle and many others. Run +models directly on your [iOS](https://apps.apple.com/xk/app/ultralytics/id1583935240) or +[Android](https://play.google.com/store/apps/details?id=com.ultralytics.ultralytics_app) mobile device by downloading +the [Ultralytics App](https://ultralytics.com/app_install)! + +## ❓ Issues + +If you are a new [Ultralytics HUB](https://bit.ly/ultralytics_hub) user and have questions or comments, you are in the +right place! Please raise a [New Issue](https://github.com/ultralytics/hub/issues/new/choose) and let us know what we +can do to make your life better 😃! diff --git a/src/train_utils/train_models/models/ultralytics/docs/index.md b/src/train_utils/train_models/models/ultralytics/docs/index.md new file mode 100644 index 0000000..3fa3b5f --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/index.md @@ -0,0 +1,74 @@ +
+ + +
+ Ultralytics CI + YOLOv8 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+
+ +Welcome to the Ultralytics YOLOv8 documentation landing +page! [Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of the YOLO (You Only Look +Once) object detection and image segmentation model developed by [Ultralytics](https://ultralytics.com). This page +serves as the starting point for exploring the various resources available to help you get started with YOLOv8 and +understand its features and capabilities. + +The YOLOv8 model is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection and image segmentation tasks. It can be trained on large datasets and is capable of running on a +variety of hardware platforms, from CPUs to GPUs. + +Whether you are a seasoned machine learning practitioner or new to the field, we hope that the resources on this page +will help you get the most out of YOLOv8. For any bugs and feature requests please +visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). For professional support +please [Contact Us](https://ultralytics.com/contact). + +## A Brief History of YOLO + +YOLO (You Only Look Once) is a popular object detection and image segmentation model developed by Joseph Redmon and Ali +Farhadi at the University of Washington. The first version of YOLO was released in 2015 and quickly gained popularity +due to its high speed and accuracy. + +YOLOv2 was released in 2016 and improved upon the original model by incorporating batch normalization, anchor boxes, and +dimension clusters. YOLOv3 was released in 2018 and further improved the model's performance by using a more efficient +backbone network, adding a feature pyramid, and making use of focal loss. + +In 2020, YOLOv4 was released which introduced a number of innovations such as the use of Mosaic data augmentation, a new +anchor-free detection head, and a new loss function. + +In 2021, Ultralytics released [YOLOv5](https://github.com/ultralytics/yolov5), which further improved the model's +performance and added new features such as support for panoptic segmentation and object tracking. + +YOLO has been widely used in a variety of applications, including autonomous vehicles, security and surveillance, and +medical imaging. It has also been used to win several competitions, such as the COCO Object Detection Challenge and the +DOTA Object Detection Challenge. + +For more information about the history and development of YOLO, you can refer to the following references: + +- Redmon, J., & Farhadi, A. (2015). You only look once: Unified, real-time object detection. In Proceedings of the IEEE + conference on computer vision and pattern recognition (pp. 779-788). +- Redmon, J., & Farhadi, A. (2016). YOLO9000: Better, faster, stronger. In Proceedings + +## Ultralytics YOLOv8 + +[Ultralytics YOLOv8](https://github.com/ultralytics/ultralytics) is the latest version of the YOLO object detection and +image segmentation model developed by Ultralytics. YOLOv8 is a cutting-edge, state-of-the-art (SOTA) model that builds +upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and +flexibility. + +One key feature of YOLOv8 is its extensibility. It is designed as a framework that supports all previous versions of +YOLO, making it easy to switch between different versions and compare their performance. This makes YOLOv8 an ideal +choice for users who want to take advantage of the latest YOLO technology while still being able to use their existing +YOLO models. + +In addition to its extensibility, YOLOv8 includes a number of other innovations that make it an appealing choice for a +wide range of object detection and image segmentation tasks. These include a new backbone network, a new anchor-free +detection head, and a new loss function. YOLOv8 is also highly efficient and can be run on a variety of hardware +platforms, from CPUs to GPUs. + +Overall, YOLOv8 is a powerful and flexible tool for object detection and image segmentation that offers the best of both +worlds: the latest SOTA technology and the ability to use and compare all previous YOLO versions. diff --git a/src/train_utils/train_models/models/ultralytics/docs/modes/benchmark.md b/src/train_utils/train_models/models/ultralytics/docs/modes/benchmark.md new file mode 100644 index 0000000..b57e093 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/modes/benchmark.md @@ -0,0 +1,65 @@ + + +**Benchmark mode** is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks +provide information on the size of the exported format, its `mAP50-95` metrics (for object detection and segmentation) +or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export +formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for +their specific use case based on their requirements for speed and accuracy. + +!!! tip "Tip" + + * Export to ONNX or OpenVINO for up to 3x CPU speedup. + * Export to TensorRT for up to 5x GPU speedup. + +## Usage Examples + +Run YOLOv8n benchmarks on all supported export formats including ONNX, TensorRT etc. See Arguments section below for a +full list of export arguments. + +!!! example "" + + === "Python" + + ```python + from ultralytics.yolo.utils.benchmarks import benchmark + + # Benchmark + benchmark(model='yolov8n.pt', imgsz=640, half=False, device=0) + ``` + === "CLI" + + ```bash + yolo benchmark model=yolov8n.pt imgsz=640 half=False device=0 + ``` + +## Arguments + +Arguments such as `model`, `imgsz`, `half`, `device`, and `hard_fail` provide users with the flexibility to fine-tune +the benchmarks to their specific needs and compare the performance of different export formats with ease. + +| Key | Value | Description | +|-------------|---------|----------------------------------------------------------------------| +| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `half` | `False` | FP16 quantization | +| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | +| `hard_fail` | `False` | do not continue on error (bool), or val floor threshold (float) | + +## Export Formats + +Benchmarks will attempt to run automatically on all possible export formats below. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | diff --git a/src/train_utils/train_models/models/ultralytics/docs/modes/export.md b/src/train_utils/train_models/models/ultralytics/docs/modes/export.md new file mode 100644 index 0000000..f454466 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/modes/export.md @@ -0,0 +1,81 @@ + + +**Export mode** is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the +model is converted to a format that can be used by other software applications or hardware devices. This mode is useful +when deploying the model to production environments. + +!!! tip "Tip" + + * Export to ONNX or OpenVINO for up to 3x CPU speedup. + * Export to TensorRT for up to 5x GPU speedup. + +## Usage Examples + +Export a YOLOv8n model to a different format like ONNX or TensorRT. See Arguments section below for a full list of +export arguments. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +## Arguments + +Export settings for YOLO models refer to the various configurations and options used to save or +export the model for use in other environments or platforms. These settings can affect the model's performance, size, +and compatibility with different systems. Some common YOLO export settings include the format of the exported model +file (e.g. ONNX, TensorFlow SavedModel), the device on which the model will be run (e.g. CPU, GPU), and the presence of +additional features such as masks or multiple labels per box. Other factors that may affect the export process include +the specific task the model is being used for and the requirements or constraints of the target environment or platform. +It is important to carefully consider and configure these settings to ensure that the exported model is optimized for +the intended use case and can be used effectively in the target environment. + +| Key | Value | Description | +|-------------|-----------------|------------------------------------------------------| +| `format` | `'torchscript'` | format to export to | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `keras` | `False` | use Keras for TF SavedModel export | +| `optimize` | `False` | TorchScript: optimize for mobile | +| `half` | `False` | FP16 quantization | +| `int8` | `False` | INT8 quantization | +| `dynamic` | `False` | ONNX/TF/TensorRT: dynamic axes | +| `simplify` | `False` | ONNX: simplify model | +| `opset` | `None` | ONNX: opset version (optional, defaults to latest) | +| `workspace` | `4` | TensorRT: workspace size (GB) | +| `nms` | `False` | CoreML: add NMS | + +## Export Formats + +Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, +i.e. `format='onnx'` or `format='engine'`. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | diff --git a/src/train_utils/train_models/models/ultralytics/docs/modes/index.md b/src/train_utils/train_models/models/ultralytics/docs/modes/index.md new file mode 100644 index 0000000..14e2d85 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/modes/index.md @@ -0,0 +1,62 @@ +# YOLOv8 Modes + + + +Ultralytics YOLOv8 supports several **modes** that can be used to perform different tasks. These modes are: + +**Train**: For training a YOLOv8 model on a custom dataset. +**Val**: For validating a YOLOv8 model after it has been trained. +**Predict**: For making predictions using a trained YOLOv8 model on new images or videos. +**Export**: For exporting a YOLOv8 model to a format that can be used for deployment. +**Track**: For tracking objects in real-time using a YOLOv8 model. +**Benchmark**: For benchmarking YOLOv8 exports (ONNX, TensorRT, etc.) speed and accuracy. + +## [Train](train.md) + +Train mode is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the +specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can +accurately predict the classes and locations of objects in an image. + +[Train Examples](train.md){ .md-button .md-button--primary} + +## [Val](val.md) + +Val mode is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a +validation set to measure its accuracy and generalization performance. This mode can be used to tune the hyperparameters +of the model to improve its performance. + +[Val Examples](val.md){ .md-button .md-button--primary} + +## [Predict](predict.md) + +Predict mode is used for making predictions using a trained YOLOv8 model on new images or videos. In this mode, the +model is loaded from a checkpoint file, and the user can provide images or videos to perform inference. The model +predicts the classes and locations of objects in the input images or videos. + +[Predict Examples](predict.md){ .md-button .md-button--primary} + +## [Export](export.md) + +Export mode is used for exporting a YOLOv8 model to a format that can be used for deployment. In this mode, the model is +converted to a format that can be used by other software applications or hardware devices. This mode is useful when +deploying the model to production environments. + +[Export Examples](export.md){ .md-button .md-button--primary} + +## [Track](track.md) + +Track mode is used for tracking objects in real-time using a YOLOv8 model. In this mode, the model is loaded from a +checkpoint file, and the user can provide a live video stream to perform real-time object tracking. This mode is useful +for applications such as surveillance systems or self-driving cars. + +[Track Examples](track.md){ .md-button .md-button--primary} + +## [Benchmark](benchmark.md) + +Benchmark mode is used to profile the speed and accuracy of various export formats for YOLOv8. The benchmarks provide +information on the size of the exported format, its `mAP50-95` metrics (for object detection and segmentation) +or `accuracy_top5` metrics (for classification), and the inference time in milliseconds per image across various export +formats like ONNX, OpenVINO, TensorRT and others. This information can help users choose the optimal export format for +their specific use case based on their requirements for speed and accuracy. + +[Benchmark Examples](benchmark.md){ .md-button .md-button--primary} diff --git a/src/train_utils/train_models/models/ultralytics/docs/modes/predict.md b/src/train_utils/train_models/models/ultralytics/docs/modes/predict.md new file mode 100644 index 0000000..ffc8722 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/modes/predict.md @@ -0,0 +1,180 @@ + + +Inference or prediction of a task returns a list of `Results` objects. Alternatively, in the streaming mode, it returns +a generator of `Results` objects which is memory efficient. Streaming mode can be enabled by passing `stream=True` in +predictor's call method. + +!!! example "Predict" + + === "Return a List" + + ```python + inputs = [img, img] # list of np arrays + results = model(inputs) # List of Results objects + + for result in results: + boxes = result.boxes # Boxes object for bbox outputs + masks = result.masks # Masks object for segmenation masks outputs + probs = result.probs # Class probabilities for classification outputs + ``` + + === "Return a Generator" + + ```python + inputs = [img, img] # list of numpy arrays + results = model(inputs, stream=True) # generator of Results objects + + for r in results: + boxes = r.boxes # Boxes object for bbox outputs + masks = r.masks # Masks object for segmenation masks outputs + probs = r.probs # Class probabilities for classification outputs + ``` + +## Sources + +YOLOv8 can run inference on a variety of sources. The table below lists the various sources that can be used as input +for YOLOv8, along with the required format and notes. Sources include images, URLs, PIL images, OpenCV, numpy arrays, +torch tensors, CSV files, videos, directories, globs, YouTube videos, and streams. The table also indicates whether each +source can be used as a stream and the model argument required for that source. + +| source | stream | model(arg) | type | notes | +|------------|---------|--------------------------------------------|----------------|------------------| +| image | | `'im.jpg'` | `str`, `Path` | | +| URL | | `'https://ultralytics.com/images/bus.jpg'` | `str` | | +| screenshot | | `'screen'` | `str` | | +| PIL | | `Image.open('im.jpg')` | `PIL.Image` | HWC, RGB | +| OpenCV | | `cv2.imread('im.jpg')[:,:,::-1]` | `np.ndarray` | HWC, BGR to RGB | +| numpy | | `np.zeros((640,1280,3))` | `np.ndarray` | HWC | +| torch | | `torch.zeros(16,3,320,640)` | `torch.Tensor` | BCHW, RGB | +| CSV | | `'sources.csv'` | `str`, `Path` | RTSP, RTMP, HTTP | +| video | ✓ | `'vid.mp4'` | `str`, `Path` | | +| directory | ✓ | `'path/'` | `str`, `Path` | | +| glob | ✓ | `'path/*.jpg'` | `str` | Use `*` operator | +| YouTube | ✓ | `'https://youtu.be/Zgi9g1ksQHc'` | `str` | | +| stream | ✓ | `'rtsp://example.com/media.mp4'` | `str` | RTSP, RTMP, HTTP | + +## Image Formats + +For images, YOLOv8 supports a variety of image formats defined +in [yolo/data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/data/utils.py). The +following suffixes are valid for images: + +| Image Suffixes | Example Predict Command | Reference | +|----------------|----------------------------------|-------------------------------------------------------------------------------| +| .bmp | `yolo predict source=image.bmp` | [Microsoft BMP File Format](https://en.wikipedia.org/wiki/BMP_file_format) | +| .dng | `yolo predict source=image.dng` | [Adobe DNG](https://www.adobe.com/products/photoshop/extend.displayTab2.html) | +| .jpeg | `yolo predict source=image.jpeg` | [JPEG](https://en.wikipedia.org/wiki/JPEG) | +| .jpg | `yolo predict source=image.jpg` | [JPEG](https://en.wikipedia.org/wiki/JPEG) | +| .mpo | `yolo predict source=image.mpo` | [Multi Picture Object](https://fileinfo.com/extension/mpo) | +| .png | `yolo predict source=image.png` | [Portable Network Graphics](https://en.wikipedia.org/wiki/PNG) | +| .tif | `yolo predict source=image.tif` | [Tag Image File Format](https://en.wikipedia.org/wiki/TIFF) | +| .tiff | `yolo predict source=image.tiff` | [Tag Image File Format](https://en.wikipedia.org/wiki/TIFF) | +| .webp | `yolo predict source=image.webp` | [WebP](https://en.wikipedia.org/wiki/WebP) | +| .pfm | `yolo predict source=image.pfm` | [Portable FloatMap](https://en.wikipedia.org/wiki/Netpbm#File_formats) | + +## Video Formats + +For videos, YOLOv8 also supports a variety of video formats defined +in [yolo/data/utils.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/data/utils.py). The +following suffixes are valid for videos: + +| Video Suffixes | Example Predict Command | Reference | +|----------------|----------------------------------|----------------------------------------------------------------------------------| +| .asf | `yolo predict source=video.asf` | [Advanced Systems Format](https://en.wikipedia.org/wiki/Advanced_Systems_Format) | +| .avi | `yolo predict source=video.avi` | [Audio Video Interleave](https://en.wikipedia.org/wiki/Audio_Video_Interleave) | +| .gif | `yolo predict source=video.gif` | [Graphics Interchange Format](https://en.wikipedia.org/wiki/GIF) | +| .m4v | `yolo predict source=video.m4v` | [MPEG-4 Part 14](https://en.wikipedia.org/wiki/M4V) | +| .mkv | `yolo predict source=video.mkv` | [Matroska](https://en.wikipedia.org/wiki/Matroska) | +| .mov | `yolo predict source=video.mov` | [QuickTime File Format](https://en.wikipedia.org/wiki/QuickTime_File_Format) | +| .mp4 | `yolo predict source=video.mp4` | [MPEG-4 Part 14 - Wikipedia](https://en.wikipedia.org/wiki/MPEG-4_Part_14) | +| .mpeg | `yolo predict source=video.mpeg` | [MPEG-1 Part 2](https://en.wikipedia.org/wiki/MPEG-1) | +| .mpg | `yolo predict source=video.mpg` | [MPEG-1 Part 2](https://en.wikipedia.org/wiki/MPEG-1) | +| .ts | `yolo predict source=video.ts` | [MPEG Transport Stream](https://en.wikipedia.org/wiki/MPEG_transport_stream) | +| .wmv | `yolo predict source=video.wmv` | [Windows Media Video](https://en.wikipedia.org/wiki/Windows_Media_Video) | +| .webm | `yolo predict source=video.webm` | [WebM Project](https://en.wikipedia.org/wiki/WebM) | + +## Working with Results + +Results object consists of these component objects: + +- `Results.boxes`: `Boxes` object with properties and methods for manipulating bboxes +- `Results.masks`: `Masks` object used to index masks or to get segment coordinates. +- `Results.probs`: `torch.Tensor` containing the class probabilities/logits. +- `Results.orig_img`: Original image loaded in memory. +- `Results.path`: `Path` containing the path to input image + +Each result is composed of torch.Tensor by default, in which you can easily use following functionality: + +```python +results = results.cuda() +results = results.cpu() +results = results.to("cpu") +results = results.numpy() +``` + +### Boxes + +`Boxes` object can be used index, manipulate and convert bboxes to different formats. The box format conversion +operations are cached, which means they're only calculated once per object and those values are reused for future calls. + +- Indexing a `Boxes` objects returns a `Boxes` object + +```python +results = model(inputs) +boxes = results[0].boxes +box = boxes[0] # returns one box +box.xyxy +``` + +- Properties and conversions + +```python +boxes.xyxy # box with xyxy format, (N, 4) +boxes.xywh # box with xywh format, (N, 4) +boxes.xyxyn # box with xyxy format but normalized, (N, 4) +boxes.xywhn # box with xywh format but normalized, (N, 4) +boxes.conf # confidence score, (N, 1) +boxes.cls # cls, (N, 1) +boxes.data # raw bboxes tensor, (N, 6) or boxes.boxes . +``` + +### Masks + +`Masks` object can be used index, manipulate and convert masks to segments. The segment conversion operation is cached. + +```python +results = model(inputs) +masks = results[0].masks # Masks object +masks.segments # bounding coordinates of masks, List[segment] * N +masks.data # raw masks tensor, (N, H, W) or masks.masks +``` + +### probs + +`probs` attribute of `Results` class is a `Tensor` containing class probabilities of a classification operation. + +```python +results = model(inputs) +results[0].probs # cls prob, (num_class, ) +``` + +Class reference documentation for `Results` module and its components can be found [here](../reference/results.md) + +## Plotting results + +You can use `plot()` function of `Result` object to plot results on in image object. It plots all components(boxes, +masks, classification logits, etc.) found in the results object + +```python +res = model(img) +res_plotted = res[0].plot() +cv2.imshow("result", res_plotted) +``` + +!!! example "`plot()` arguments" + + `show_conf (bool)`: Show confidence + + `line_width (Float)`: The line width of boxes. Automatically scaled to img size if not provided + + `font_size (Float)`: The font size of . Automatically scaled to img size if not provided diff --git a/src/train_utils/train_models/models/ultralytics/docs/modes/track.md b/src/train_utils/train_models/models/ultralytics/docs/modes/track.md new file mode 100644 index 0000000..8058f38 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/modes/track.md @@ -0,0 +1,96 @@ + + +Object tracking is a task that involves identifying the location and class of objects, then assigning a unique ID to +that detection in video streams. + +The output of tracker is the same as detection with an added object ID. + +## Available Trackers + +The following tracking algorithms have been implemented and can be enabled by passing `tracker=tracker_type.yaml` + +* [BoT-SORT](https://github.com/NirAharon/BoT-SORT) - `botsort.yaml` +* [ByteTrack](https://github.com/ifzhang/ByteTrack) - `bytetrack.yaml` + +The default tracker is BoT-SORT. + +## Tracking + +Use a trained YOLOv8n/YOLOv8n-seg model to run tracker on video streams. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official detection model + model = YOLO('yolov8n-seg.pt') # load an official segmentation model + model = YOLO('path/to/best.pt') # load a custom model + + # Track with the model + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True) + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", show=True, tracker="bytetrack.yaml") + ``` + === "CLI" + + ```bash + yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" # official detection model + yolo track model=yolov8n-seg.pt source=... # official segmentation model + yolo track model=path/to/best.pt source=... # custom model + yolo track model=path/to/best.pt tracker="bytetrack.yaml" # bytetrack tracker + + ``` + +As in the above usage, we support both the detection and segmentation models for tracking and the only thing you need to +do is loading the corresponding (detection or segmentation) model. + +## Configuration + +### Tracking + +Tracking shares the configuration with predict, i.e `conf`, `iou`, `show`. More configurations please refer +to [predict page](https://docs.ultralytics.com/modes/predict/). +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.pt') + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", conf=0.3, iou=0.5, show=True) + ``` + === "CLI" + + ```bash + yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" conf=0.3, iou=0.5 show + + ``` + +### Tracker + +We also support using a modified tracker config file, just copy a config file i.e `custom_tracker.yaml` +from [ultralytics/tracker/cfg](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/tracker/cfg) and modify +any configurations(expect the `tracker_type`) you need to. +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.pt') + results = model.track(source="https://youtu.be/Zgi9g1ksQHc", tracker='custom_tracker.yaml') + ``` + === "CLI" + + ```bash + yolo track model=yolov8n.pt source="https://youtu.be/Zgi9g1ksQHc" tracker='custom_tracker.yaml' + ``` + +Please refer to [ultralytics/tracker/cfg](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/tracker/cfg) +page + diff --git a/src/train_utils/train_models/models/ultralytics/docs/modes/train.md b/src/train_utils/train_models/models/ultralytics/docs/modes/train.md new file mode 100644 index 0000000..7b551fe --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/modes/train.md @@ -0,0 +1,97 @@ + + +**Train mode** is used for training a YOLOv8 model on a custom dataset. In this mode, the model is trained using the +specified dataset and hyperparameters. The training process involves optimizing the model's parameters so that it can +accurately predict the classes and locations of objects in an image. + +!!! tip "Tip" + + * YOLOv8 datasets like COCO, VOC, ImageNet and many others automatically download on first use, i.e. `yolo train data=coco.yaml` + +## Usage Examples + +Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. See Arguments section below for a full list of +training arguments. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.yaml') # build a new model from YAML + model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='coco128.yaml', epochs=100, imgsz=640) + ``` + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640 + + # Start training from a pretrained *.pt model + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo detect train data=coco128.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640 + ``` + +## Arguments + +Training settings for YOLO models refer to the various hyperparameters and configurations used to train the model on a +dataset. These settings can affect the model's performance, speed, and accuracy. Some common YOLO training settings +include the batch size, learning rate, momentum, and weight decay. Other factors that may affect the training process +include the choice of optimizer, the choice of loss function, and the size and composition of the training dataset. It +is important to carefully tune and experiment with these settings to achieve the best possible performance for a given +task. + +| Key | Value | Description | +|-------------------|----------|-----------------------------------------------------------------------------| +| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | +| `data` | `None` | path to data file, i.e. coco128.yaml | +| `epochs` | `100` | number of epochs to train for | +| `patience` | `50` | epochs to wait for no observable improvement for early stopping of training | +| `batch` | `16` | number of images per batch (-1 for AutoBatch) | +| `imgsz` | `640` | size of input images as integer or w,h | +| `save` | `True` | save train checkpoints and predict results | +| `save_period` | `-1` | Save checkpoint every x epochs (disabled if < 1) | +| `cache` | `False` | True/ram, disk or False. Use cache for data loading | +| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | +| `workers` | `8` | number of worker threads for data loading (per RANK if DDP) | +| `project` | `None` | project name | +| `name` | `None` | experiment name | +| `exist_ok` | `False` | whether to overwrite existing experiment | +| `pretrained` | `False` | whether to use a pretrained model | +| `optimizer` | `'SGD'` | optimizer to use, choices=['SGD', 'Adam', 'AdamW', 'RMSProp'] | +| `verbose` | `False` | whether to print verbose output | +| `seed` | `0` | random seed for reproducibility | +| `deterministic` | `True` | whether to enable deterministic mode | +| `single_cls` | `False` | train multi-class data as single-class | +| `image_weights` | `False` | use weighted image selection for training | +| `rect` | `False` | support rectangular training | +| `cos_lr` | `False` | use cosine learning rate scheduler | +| `close_mosaic` | `10` | disable mosaic augmentation for final 10 epochs | +| `resume` | `False` | resume training from last checkpoint | +| `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] | +| `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) | +| `lrf` | `0.01` | final learning rate (lr0 * lrf) | +| `momentum` | `0.937` | SGD momentum/Adam beta1 | +| `weight_decay` | `0.0005` | optimizer weight decay 5e-4 | +| `warmup_epochs` | `3.0` | warmup epochs (fractions ok) | +| `warmup_momentum` | `0.8` | warmup initial momentum | +| `warmup_bias_lr` | `0.1` | warmup initial bias lr | +| `box` | `7.5` | box loss gain | +| `cls` | `0.5` | cls loss gain (scale with pixels) | +| `dfl` | `1.5` | dfl loss gain | +| `fl_gamma` | `0.0` | focal loss gamma (efficientDet default gamma=1.5) | +| `label_smoothing` | `0.0` | label smoothing (fraction) | +| `nbs` | `64` | nominal batch size | +| `overlap_mask` | `True` | masks should overlap during training (segment train only) | +| `mask_ratio` | `4` | mask downsample ratio (segment train only) | +| `dropout` | `0.0` | use dropout regularization (classify train only) | +| `val` | `True` | validate/test during training | diff --git a/src/train_utils/train_models/models/ultralytics/docs/modes/val.md b/src/train_utils/train_models/models/ultralytics/docs/modes/val.md new file mode 100644 index 0000000..da5e7f1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/modes/val.md @@ -0,0 +1,86 @@ + + +**Val mode** is used for validating a YOLOv8 model after it has been trained. In this mode, the model is evaluated on a +validation set to measure its accuracy and generalization performance. This mode can be used to tune the hyperparameters +of the model to improve its performance. + +!!! tip "Tip" + + * YOLOv8 models automatically remember their training settings, so you can validate a model at the same image size and on the original dataset easily with just `yolo val model=yolov8n.pt` or `model('yolov8n.pt').val()` + +## Usage Examples + +Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's +training `data` and arguments as model attributes. See Arguments section below for a full list of export arguments. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.box.map # map50-95 + metrics.box.map50 # map50 + metrics.box.map75 # map75 + metrics.box.maps # a list contains map50-95 of each category + ``` + === "CLI" + + ```bash + yolo detect val model=yolov8n.pt # val official model + yolo detect val model=path/to/best.pt # val custom model + ``` + +## Arguments + +Validation settings for YOLO models refer to the various hyperparameters and configurations used to +evaluate the model's performance on a validation dataset. These settings can affect the model's performance, speed, and +accuracy. Some common YOLO validation settings include the batch size, the frequency with which validation is performed +during training, and the metrics used to evaluate the model's performance. Other factors that may affect the validation +process include the size and composition of the validation dataset and the specific task the model is being used for. It +is important to carefully tune and experiment with these settings to ensure that the model is performing well on the +validation dataset and to detect and prevent overfitting. + +| Key | Value | Description | +|---------------|---------|--------------------------------------------------------------------| +| `data` | `None` | path to data file, i.e. coco128.yaml | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `batch` | `16` | number of images per batch (-1 for AutoBatch) | +| `save_json` | `False` | save results to JSON file | +| `save_hybrid` | `False` | save hybrid version of labels (labels + additional predictions) | +| `conf` | `0.001` | object confidence threshold for detection | +| `iou` | `0.6` | intersection over union (IoU) threshold for NMS | +| `max_det` | `300` | maximum number of detections per image | +| `half` | `True` | use half precision (FP16) | +| `device` | `None` | device to run on, i.e. cuda device=0/1/2/3 or device=cpu | +| `dnn` | `False` | use OpenCV DNN for ONNX inference | +| `plots` | `False` | show plots during training | +| `rect` | `False` | support rectangular evaluation | +| `split` | `val` | dataset split to use for validation, i.e. 'val', 'test' or 'train' | + +## Export Formats + +Available YOLOv8 export formats are in the table below. You can export to any format using the `format` argument, +i.e. `format='onnx'` or `format='engine'`. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | diff --git a/src/train_utils/train_models/models/ultralytics/docs/quickstart.md b/src/train_utils/train_models/models/ultralytics/docs/quickstart.md new file mode 100644 index 0000000..3eb4443 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/quickstart.md @@ -0,0 +1,73 @@ +## Install + +Install YOLOv8 via the `ultralytics` pip package for the latest stable release or by cloning +the [https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics) repository for the most +up-to-date version. + +!!! example "Pip install method (recommended)" + + ```bash + pip install ultralytics + ``` + +!!! example "Git clone method (for development)" + + ```bash + git clone https://github.com/ultralytics/ultralytics + cd ultralytics + pip install -e '.[dev]' + ``` + See contributing section to know more about contributing to the project + +## Use with CLI + +The YOLO command line interface (CLI) lets you simply train, validate or infer models on various tasks and versions. +CLI requires no customization or code. You can simply run all tasks from the terminal with the `yolo` command. + +!!! example + + === "Syntax" + ```bash + yolo task=detect mode=train model=yolov8n.yaml args... + classify predict yolov8n-cls.yaml args... + segment val yolov8n-seg.yaml args... + export yolov8n.pt format=onnx args... + ``` + + === "Example training" + ```bash + yolo detect train model=yolov8n.pt data=coco128.yaml device=0 + ``` + === "Example Multi-GPU training" + ```bash + yolo detect train model=yolov8n.pt data=coco128.yaml device=\'0,1,2,3\' + ``` + +[CLI Guide](usage/cli.md){ .md-button .md-button--primary} + +## Use with Python + +Python usage allows users to easily use YOLOv8 inside their Python projects. It provides functions for loading and +running the model, as well as for processing the model's output. The interface is designed to be easy to use, so that +users can quickly implement object detection in their projects. + +Overall, the Python interface is a useful tool for anyone looking to incorporate object detection, segmentation or +classification into their Python projects using YOLOv8. + +!!! example + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.yaml') # build a new model from scratch + model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) + + # Use the model + results = model.train(data='coco128.yaml', epochs=3) # train the model + results = model.val() # evaluate model performance on the validation set + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + success = model.export(format='onnx') # export the model to ONNX format + ``` + +[Python Guide](usage/python.md){.md-button .md-button--primary} diff --git a/src/train_utils/train_models/models/ultralytics/docs/reference/base_pred.md b/src/train_utils/train_models/models/ultralytics/docs/reference/base_pred.md new file mode 100644 index 0000000..5a61c50 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/reference/base_pred.md @@ -0,0 +1,8 @@ +All task Predictors are inherited from `BasePredictors` class that contains the model validation routine boilerplate. +You can override any function of these Trainers to suit your needs. + +--- + +### BasePredictor API Reference + +:::ultralytics.yolo.engine.predictor.BasePredictor \ No newline at end of file diff --git a/src/train_utils/train_models/models/ultralytics/docs/reference/base_trainer.md b/src/train_utils/train_models/models/ultralytics/docs/reference/base_trainer.md new file mode 100644 index 0000000..a93af69 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/reference/base_trainer.md @@ -0,0 +1,8 @@ +All task Trainers are inherited from `BaseTrainer` class that contains the model training and optimization routine +boilerplate. You can override any function of these Trainers to suit your needs. + +--- + +### BaseTrainer API Reference + +:::ultralytics.yolo.engine.trainer.BaseTrainer \ No newline at end of file diff --git a/src/train_utils/train_models/models/ultralytics/docs/reference/base_val.md b/src/train_utils/train_models/models/ultralytics/docs/reference/base_val.md new file mode 100644 index 0000000..37b7d9c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/reference/base_val.md @@ -0,0 +1,8 @@ +All task Validators are inherited from `BaseValidator` class that contains the model validation routine boilerplate. You +can override any function of these Trainers to suit your needs. + +--- + +### BaseValidator API Reference + +:::ultralytics.yolo.engine.validator.BaseValidator \ No newline at end of file diff --git a/src/train_utils/train_models/models/ultralytics/docs/reference/exporter.md b/src/train_utils/train_models/models/ultralytics/docs/reference/exporter.md new file mode 100644 index 0000000..4ce31e1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/reference/exporter.md @@ -0,0 +1,3 @@ +### Exporter API Reference + +:::ultralytics.yolo.engine.exporter.Exporter \ No newline at end of file diff --git a/src/train_utils/train_models/models/ultralytics/docs/reference/model.md b/src/train_utils/train_models/models/ultralytics/docs/reference/model.md new file mode 100644 index 0000000..6edc97b --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/reference/model.md @@ -0,0 +1 @@ +::: ultralytics.yolo.engine.model diff --git a/src/train_utils/train_models/models/ultralytics/docs/reference/nn.md b/src/train_utils/train_models/models/ultralytics/docs/reference/nn.md new file mode 100644 index 0000000..0c7b1a8 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/reference/nn.md @@ -0,0 +1,19 @@ +# nn Module + +Ultralytics nn module contains 3 main components: + +1. **AutoBackend**: A module that can run inference on all popular model formats +2. **BaseModel**: `BaseModel` class defines the operations supported by tasks like Detection and Segmentation +3. **modules**: Optimized and reusable neural network blocks built on PyTorch. + +## AutoBackend + +:::ultralytics.nn.autobackend.AutoBackend + +## BaseModel + +:::ultralytics.nn.tasks.BaseModel + +## Modules + +TODO \ No newline at end of file diff --git a/src/train_utils/train_models/models/ultralytics/docs/reference/ops.md b/src/train_utils/train_models/models/ultralytics/docs/reference/ops.md new file mode 100644 index 0000000..8c4f1b7 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/reference/ops.md @@ -0,0 +1,208 @@ +This module contains optimized deep learning related operations used in the Ultralytics YOLO framework + +## Non-max suppression + +:::ultralytics.yolo.utils.ops.non_max_suppression +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## Scale boxes + +:::ultralytics.yolo.utils.ops.scale_boxes +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## Scale image + +:::ultralytics.yolo.utils.ops.scale_image +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## clip boxes + +:::ultralytics.yolo.utils.ops.clip_boxes +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +# Box Format Conversion + +## xyxy2xywh + +:::ultralytics.yolo.utils.ops.xyxy2xywh +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xywh2xyxy + +:::ultralytics.yolo.utils.ops.xywh2xyxy +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xywhn2xyxy + +:::ultralytics.yolo.utils.ops.xywhn2xyxy +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xyxy2xywhn + +:::ultralytics.yolo.utils.ops.xyxy2xywhn +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xyn2xy + +:::ultralytics.yolo.utils.ops.xyn2xy +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xywh2ltwh + +:::ultralytics.yolo.utils.ops.xywh2ltwh +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## xyxy2ltwh + +:::ultralytics.yolo.utils.ops.xyxy2ltwh +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## ltwh2xywh + +:::ultralytics.yolo.utils.ops.ltwh2xywh +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## ltwh2xyxy + +:::ultralytics.yolo.utils.ops.ltwh2xyxy +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## segment2box + +:::ultralytics.yolo.utils.ops.segment2box +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +# Mask Operations + +## resample_segments + +:::ultralytics.yolo.utils.ops.resample_segments +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## crop_mask + +:::ultralytics.yolo.utils.ops.crop_mask +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## process_mask_upsample + +:::ultralytics.yolo.utils.ops.process_mask_upsample +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## process_mask + +:::ultralytics.yolo.utils.ops.process_mask +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## process_mask_native + +:::ultralytics.yolo.utils.ops.process_mask_native +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## scale_segments + +:::ultralytics.yolo.utils.ops.scale_segments +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## masks2segments + +:::ultralytics.yolo.utils.ops.masks2segments +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + +## clip_segments + +:::ultralytics.yolo.utils.ops.clip_segments +handler: python +options: +show_source: false +show_root_toc_entry: false +--- + + + + + diff --git a/src/train_utils/train_models/models/ultralytics/docs/reference/results.md b/src/train_utils/train_models/models/ultralytics/docs/reference/results.md new file mode 100644 index 0000000..e222ec4 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/reference/results.md @@ -0,0 +1,11 @@ +### Results API Reference + +:::ultralytics.yolo.engine.results.Results + +### Boxes API Reference + +:::ultralytics.yolo.engine.results.Boxes + +### Masks API Reference + +:::ultralytics.yolo.engine.results.Masks diff --git a/src/train_utils/train_models/models/ultralytics/docs/stylesheets/style.css b/src/train_utils/train_models/models/ultralytics/docs/stylesheets/style.css new file mode 100644 index 0000000..4bed4e1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/stylesheets/style.css @@ -0,0 +1,14 @@ +th, td { + border: 0.5px solid var(--md-typeset-table-color); + border-spacing: 0px; + border-bottom: none; + border-left: none; + border-top: none; +} +.md-typeset__table { + min-width: 100%; + line-height: 1; +} +.md-typeset table:not([class]) { + display: table; +} diff --git a/src/train_utils/train_models/models/ultralytics/docs/tasks/classify.md b/src/train_utils/train_models/models/ultralytics/docs/tasks/classify.md new file mode 100644 index 0000000..3c8cb6f --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/tasks/classify.md @@ -0,0 +1,169 @@ +Image classification is the simplest of the three tasks and involves classifying an entire image into one of a set of +predefined classes. + + + +The output of an image classifier is a single class label and a confidence score. Image +classification is useful when you need to know only what class an image belongs to and don't need to know where objects +of that class are located or what their exact shape is. + +!!! tip "Tip" + + YOLOv8 Classify models use the `-cls` suffix, i.e. `yolov8n-cls.pt` and are pretrained on [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml). + +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8) + +YOLOv8 pretrained Classify models are shown here. Detect, Segment and Pose models are pretrained on +the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify +models are pretrained on +the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. + +| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 | +|----------------------------------------------------------------------------------------------|-----------------------|------------------|------------------|--------------------------------|-------------------------------------|--------------------|--------------------------| +| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 | +| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 | +| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 | +| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 | +| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 | + +- **acc** values are model accuracies on the [ImageNet](https://www.image-net.org/) dataset validation set. +
Reproduce by `yolo val classify data=path/to/ImageNet device=0` +- **Speed** averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu` + +## Train + +Train YOLOv8n-cls on the MNIST160 dataset for 100 epochs at image size 64. For a full list of available arguments +see the [Configuration](../usage/cfg.md) page. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-cls.yaml') # build a new model from YAML + model = YOLO('yolov8n-cls.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n-cls.yaml').load('yolov8n-cls.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='mnist160', epochs=100, imgsz=64) + ``` + + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo classify train data=mnist160 model=yolov8n-cls.yaml epochs=100 imgsz=64 + + # Start training from a pretrained *.pt model + yolo classify train data=mnist160 model=yolov8n-cls.pt epochs=100 imgsz=64 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo classify train data=mnist160 model=yolov8n-cls.yaml pretrained=yolov8n-cls.pt epochs=100 imgsz=64 + ``` + +## Val + +Validate trained YOLOv8n-cls model accuracy on the MNIST160 dataset. No argument need to passed as the `model` retains +it's training `data` and arguments as model attributes. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-cls.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.top1 # top1 accuracy + metrics.top5 # top5 accuracy + ``` + === "CLI" + + ```bash + yolo classify val model=yolov8n-cls.pt # val official model + yolo classify val model=path/to/best.pt # val custom model + ``` + +## Predict + +Use a trained YOLOv8n-cls model to run predictions on images. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-cls.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Predict with the model + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + ``` + === "CLI" + + ```bash + yolo classify predict model=yolov8n-cls.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo classify predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model + ``` + +Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page. + +## Export + +Export a YOLOv8n-cls model to a different format like ONNX, CoreML, etc. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-cls.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n-cls.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +Available YOLOv8-cls export formats are in the table below. You can predict or validate directly on exported models, +i.e. `yolo predict model=yolov8n-cls.onnx`. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|-------------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n-cls.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-cls.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-cls.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-cls_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-cls.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-cls.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-cls_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-cls.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-cls.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-cls_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-cls_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-cls_paddle_model/` | ✅ | + diff --git a/src/train_utils/train_models/models/ultralytics/docs/tasks/detect.md b/src/train_utils/train_models/models/ultralytics/docs/tasks/detect.md new file mode 100644 index 0000000..34b580d --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/tasks/detect.md @@ -0,0 +1,169 @@ +Object detection is a task that involves identifying the location and class of objects in an image or video stream. + + + +The output of an object detector is a set of bounding boxes that enclose the objects in the image, along with class +labels +and confidence scores for each box. Object detection is a good choice when you need to identify objects of interest in a +scene, but don't need to know exactly where the object is or its exact shape. + +!!! tip "Tip" + + YOLOv8 Detect models are the default YOLOv8 models, i.e. `yolov8n.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml). + +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8) + +YOLOv8 pretrained Detect models are shown here. Detect, Segment and Pose models are pretrained on +the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify +models are pretrained on +the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. + +| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +|--------------------------------------------------------------------------------------|-----------------------|----------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | +| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | +| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | +| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | +| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | + +- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset. +
Reproduce by `yolo val detect data=coco.yaml device=0` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val detect data=coco128.yaml batch=1 device=0|cpu` + +## Train + +Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. For a full list of available arguments see +the [Configuration](../usage/cfg.md) page. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.yaml') # build a new model from YAML + model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='coco128.yaml', epochs=100, imgsz=640) + ``` + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640 + + # Start training from a pretrained *.pt model + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo detect train data=coco128.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640 + ``` + +## Val + +Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's +training `data` and arguments as model attributes. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.box.map # map50-95 + metrics.box.map50 # map50 + metrics.box.map75 # map75 + metrics.box.maps # a list contains map50-95 of each category + ``` + === "CLI" + + ```bash + yolo detect val model=yolov8n.pt # val official model + yolo detect val model=path/to/best.pt # val custom model + ``` + +## Predict + +Use a trained YOLOv8n model to run predictions on images. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Predict with the model + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + ``` + === "CLI" + + ```bash + yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model + ``` + +Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page. + +## Export + +Export a YOLOv8n model to a different format like ONNX, CoreML, etc. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +Available YOLOv8 export formats are in the table below. You can predict or validate directly on exported models, +i.e. `yolo predict model=yolov8n.onnx`. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | diff --git a/src/train_utils/train_models/models/ultralytics/docs/tasks/index.md b/src/train_utils/train_models/models/ultralytics/docs/tasks/index.md new file mode 100644 index 0000000..3276d53 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/tasks/index.md @@ -0,0 +1,46 @@ +# Ultralytics YOLOv8 Tasks + +YOLOv8 is an AI framework that supports multiple computer vision **tasks**. The framework can be used to +perform [detection](detect.md), [segmentation](segment.md), [classification](classify.md), +and [keypoints](keypoints.md) detection. Each of these tasks has a different objective and use case. + + + +## [Detection](detect.md) + +Detection is the primary task supported by YOLOv8. It involves detecting objects in an image or video frame and drawing +bounding boxes around them. The detected objects are classified into different categories based on their features. +YOLOv8 can detect multiple objects in a single image or video frame with high accuracy and speed. + +[Detection Examples](detect.md){ .md-button .md-button--primary} + +## [Segmentation](segment.md) + +Segmentation is a task that involves segmenting an image into different regions based on the content of the image. Each +region is assigned a label based on its content. This task is useful in applications such as image segmentation and +medical imaging. YOLOv8 uses a variant of the U-Net architecture to perform segmentation. + +[Segmentation Examples](segment.md){ .md-button .md-button--primary} + +## [Classification](classify.md) + +Classification is a task that involves classifying an image into different categories. YOLOv8 can be used to classify +images based on their content. It uses a variant of the EfficientNet architecture to perform classification. + +[Classification Examples](classify.md){ .md-button .md-button--primary} + + + +## Conclusion + +YOLOv8 supports multiple tasks, including detection, segmentation, classification, and keypoints detection. Each of +these tasks has different objectives and use cases. By understanding the differences between these tasks, you can choose +the appropriate task for your computer vision application. \ No newline at end of file diff --git a/src/train_utils/train_models/models/ultralytics/docs/tasks/keypoints.md b/src/train_utils/train_models/models/ultralytics/docs/tasks/keypoints.md new file mode 100644 index 0000000..d9f2484 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/tasks/keypoints.md @@ -0,0 +1,149 @@ +Key Point Estimation is a task that involves identifying the location of specific points in an image, usually referred +to as keypoints. The keypoints can represent various parts of the object such as joints, landmarks, or other distinctive +features. The locations of the keypoints are usually represented as a set of 2D `[x, y]` or 3D `[x, y, visible]` +coordinates. + + + +The output of a keypoint detector is a set of points that represent the keypoints on the object in the image, usually +along with the confidence scores for each point. Keypoint estimation is a good choice when you need to identify specific +parts of an object in a scene, and their location in relation to each other. + +!!! tip "Tip" + + YOLOv8 _keypoints_ models use the `-kpts` suffix, i.e. `yolov8n-kpts.pt`. These models are trained on the COCO dataset and are suitable for a variety of keypoint estimation tasks. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8){ .md-button .md-button--primary} + +## Train TODO + +Train an OpenPose model on a custom dataset of keypoints using the OpenPose framework. For more information on how to +train an OpenPose model on a custom dataset, see the OpenPose Training page. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.yaml') # build a new model from YAML + model = YOLO('yolov8n.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n.yaml').load('yolov8n.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='coco128.yaml', epochs=100, imgsz=640) + ``` + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo detect train data=coco128.yaml model=yolov8n.yaml epochs=100 imgsz=640 + + # Start training from a pretrained *.pt model + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo detect train data=coco128.yaml model=yolov8n.yaml pretrained=yolov8n.pt epochs=100 imgsz=640 + ``` + +## Val TODO + +Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's +training `data` and arguments as model attributes. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.box.map # map50-95 + metrics.box.map50 # map50 + metrics.box.map75 # map75 + metrics.box.maps # a list contains map50-95 of each category + ``` + === "CLI" + + ```bash + yolo detect val model=yolov8n.pt # val official model + yolo detect val model=path/to/best.pt # val custom model + ``` + +## Predict TODO + +Use a trained YOLOv8n model to run predictions on images. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Predict with the model + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + ``` + === "CLI" + + ```bash + yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model + ``` + +Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page. + +## Export TODO + +Export a YOLOv8n model to a different format like ONNX, CoreML, etc. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +Available YOLOv8-pose export formats are in the table below. You can predict or validate directly on exported models, +i.e. `yolo predict model=yolov8n-pose.onnx`. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|---------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | ✅ | diff --git a/src/train_utils/train_models/models/ultralytics/docs/tasks/segment.md b/src/train_utils/train_models/models/ultralytics/docs/tasks/segment.md new file mode 100644 index 0000000..2ff7f58 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/tasks/segment.md @@ -0,0 +1,175 @@ +Instance segmentation goes a step further than object detection and involves identifying individual objects in an image +and segmenting them from the rest of the image. + + + +The output of an instance segmentation model is a set of masks or +contours that outline each object in the image, along with class labels and confidence scores for each object. Instance +segmentation is useful when you need to know not only where objects are in an image, but also what their exact shape is. + +!!! tip "Tip" + + YOLOv8 Segment models use the `-seg` suffix, i.e. `yolov8n-seg.pt` and are pretrained on [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml). + +## [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models/v8) + +YOLOv8 pretrained Segment models are shown here. Detect, Segment and Pose models are pretrained on +the [COCO](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/coco.yaml) dataset, while Classify +models are pretrained on +the [ImageNet](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/datasets/ImageNet.yaml) dataset. + +[Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/models) download automatically from the latest +Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +|----------------------------------------------------------------------------------------------|-----------------------|----------------------|-----------------------|--------------------------------|-------------------------------------|--------------------|-------------------| +| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | +| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | +| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | +| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | +| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | + +- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset. +
Reproduce by `yolo val segment data=coco.yaml device=0` +- **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) + instance. +
Reproduce by `yolo val segment data=coco128-seg.yaml batch=1 device=0|cpu` + +## Train + +Train YOLOv8n-seg on the COCO128-seg dataset for 100 epochs at image size 640. For a full list of available +arguments see the [Configuration](../usage/cfg.md) page. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-seg.yaml') # build a new model from YAML + model = YOLO('yolov8n-seg.pt') # load a pretrained model (recommended for training) + model = YOLO('yolov8n-seg.yaml').load('yolov8n.pt') # build from YAML and transfer weights + + # Train the model + model.train(data='coco128-seg.yaml', epochs=100, imgsz=640) + ``` + === "CLI" + + ```bash + # Build a new model from YAML and start training from scratch + yolo segment train data=coco128-seg.yaml model=yolov8n-seg.yaml epochs=100 imgsz=640 + + # Start training from a pretrained *.pt model + yolo segment train data=coco128-seg.yaml model=yolov8n-seg.pt epochs=100 imgsz=640 + + # Build a new model from YAML, transfer pretrained weights to it and start training + yolo segment train data=coco128-seg.yaml model=yolov8n-seg.yaml pretrained=yolov8n-seg.pt epochs=100 imgsz=640 + ``` + +## Val + +Validate trained YOLOv8n-seg model accuracy on the COCO128-seg dataset. No argument need to passed as the `model` +retains it's training `data` and arguments as model attributes. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-seg.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Validate the model + metrics = model.val() # no arguments needed, dataset and settings remembered + metrics.box.map # map50-95(B) + metrics.box.map50 # map50(B) + metrics.box.map75 # map75(B) + metrics.box.maps # a list contains map50-95(B) of each category + metrics.seg.map # map50-95(M) + metrics.seg.map50 # map50(M) + metrics.seg.map75 # map75(M) + metrics.seg.maps # a list contains map50-95(M) of each category + ``` + === "CLI" + + ```bash + yolo segment val model=yolov8n-seg.pt # val official model + yolo segment val model=path/to/best.pt # val custom model + ``` + +## Predict + +Use a trained YOLOv8n-seg model to run predictions on images. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-seg.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom model + + # Predict with the model + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + ``` + === "CLI" + + ```bash + yolo segment predict model=yolov8n-seg.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo segment predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model + ``` + +Read more details of `predict` in our [Predict](https://docs.ultralytics.com/modes/predict/) page. + +## Export + +Export a YOLOv8n-seg model to a different format like ONNX, CoreML, etc. + +!!! example "" + + === "Python" + + ```python + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n-seg.pt') # load an official model + model = YOLO('path/to/best.pt') # load a custom trained + + # Export the model + model.export(format='onnx') + ``` + === "CLI" + + ```bash + yolo export model=yolov8n-seg.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + +Available YOLOv8-seg export formats are in the table below. You can predict or validate directly on exported models, +i.e. `yolo predict model=yolov8n-seg.onnx`. + +| Format | `format` Argument | Model | Metadata | +|--------------------------------------------------------------------|-------------------|-------------------------------|----------| +| [PyTorch](https://pytorch.org/) | - | `yolov8n-seg.pt` | ✅ | +| [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n-seg.torchscript` | ✅ | +| [ONNX](https://onnx.ai/) | `onnx` | `yolov8n-seg.onnx` | ✅ | +| [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n-seg_openvino_model/` | ✅ | +| [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n-seg.engine` | ✅ | +| [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n-seg.mlmodel` | ✅ | +| [TF SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n-seg_saved_model/` | ✅ | +| [TF GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n-seg.pb` | ❌ | +| [TF Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n-seg.tflite` | ✅ | +| [TF Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n-seg_edgetpu.tflite` | ✅ | +| [TF.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n-seg_web_model/` | ✅ | +| [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n-seg_paddle_model/` | ✅ | + + diff --git a/src/train_utils/train_models/models/ultralytics/docs/usage/callbacks.md b/src/train_utils/train_models/models/ultralytics/docs/usage/callbacks.md new file mode 100644 index 0000000..b505df2 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/usage/callbacks.md @@ -0,0 +1,86 @@ +## Callbacks + +Ultralytics framework supports callbacks as entry points in strategic stages of train, val, export, and predict modes. +Each callback accepts a `Trainer`, `Validator`, or `Predictor` object depending on the operation type. All properties of +these objects can be found in Reference section of the docs. + +## Examples + +### Returning additional information with Prediction + +In this example, we want to return the original frame with each result object. Here's how we can do that + +```python +def on_predict_batch_end(predictor): + # results -> List[batch_size] + _, _, im0s, _, _ = predictor.batch + im0s = im0s if isinstance(im0s, list) else [im0s] + predictor.results = zip(predictor.results, im0s) + +model = YOLO(f'yolov8n.pt') +model.add_callback("on_predict_batch_end", on_predict_batch_end) +for (result, frame) in model.track/predict(): + pass +``` + +## All callbacks + +Here are all supported callbacks. + +### Trainer + +`on_pretrain_routine_start` + +`on_pretrain_routine_end` + +`on_train_start` + +`on_train_epoch_start` + +`on_train_batch_start` + +`optimizer_step` + +`on_before_zero_grad` + +`on_train_batch_end` + +`on_train_epoch_end` + +`on_fit_epoch_end` + +`on_model_save` + +`on_train_end` + +`on_params_update` + +`teardown` + +### Validator + +`on_val_start` + +`on_val_batch_start` + +`on_val_batch_end` + +`on_val_end` + +### Predictor + +`on_predict_start` + +`on_predict_batch_start` + +`on_predict_postprocess_end` + +`on_predict_batch_end` + +`on_predict_end` + +### Exporter + +`on_export_start` + +`on_export_end` diff --git a/src/train_utils/train_models/models/ultralytics/docs/usage/cfg.md b/src/train_utils/train_models/models/ultralytics/docs/usage/cfg.md new file mode 100644 index 0000000..2c7388c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/usage/cfg.md @@ -0,0 +1,246 @@ +YOLO settings and hyperparameters play a critical role in the model's performance, speed, and accuracy. These settings +and hyperparameters can affect the model's behavior at various stages of the model development process, including +training, validation, and prediction. + +YOLOv8 'yolo' CLI commands use the following syntax: + +!!! example "" + + === "CLI" + + ```bash + yolo TASK MODE ARGS + ``` + +Where: + +- `TASK` (optional) is one of `[detect, segment, classify, pose]`. If it is not passed explicitly YOLOv8 will try to + guess + the `TASK` from the model type. +- `MODE` (required) is one of `[train, val, predict, export, track, benchmark]` +- `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults. + For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml` + GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml). + +#### Tasks + +YOLO models can be used for a variety of tasks, including detection, segmentation, classification and pose. These tasks +differ in the type of output they produce and the specific problem they are designed to solve. + +**Detect**: For identifying and localizing objects or regions of interest in an image or video. +**Segment**: For dividing an image or video into regions or pixels that correspond to different objects or classes. +**Classify**: For predicting the class label of an input image. +**Pose**: For identifying objects and estimating their keypoints in an image or video. + +| Key | Value | Description | +|--------|------------|-------------------------------------------------| +| `task` | `'detect'` | YOLO task, i.e. detect, segment, classify, pose | + +#### Modes + +YOLO models can be used in different modes depending on the specific problem you are trying to solve. These modes +include: + +**Train**: For training a YOLOv8 model on a custom dataset. +**Val**: For validating a YOLOv8 model after it has been trained. +**Predict**: For making predictions using a trained YOLOv8 model on new images or videos. +**Export**: For exporting a YOLOv8 model to a format that can be used for deployment. +**Track**: For tracking objects in real-time using a YOLOv8 model. +**Benchmark**: For benchmarking YOLOv8 exports (ONNX, TensorRT, etc.) speed and accuracy. + +| Key | Value | Description | +|--------|-----------|---------------------------------------------------------------| +| `mode` | `'train'` | YOLO mode, i.e. train, val, predict, export, track, benchmark | + +### Training + +Training settings for YOLO models refer to the various hyperparameters and configurations used to train the model on a +dataset. These settings can affect the model's performance, speed, and accuracy. Some common YOLO training settings +include the batch size, learning rate, momentum, and weight decay. Other factors that may affect the training process +include the choice of optimizer, the choice of loss function, and the size and composition of the training dataset. It +is important to carefully tune and experiment with these settings to achieve the best possible performance for a given +task. + +| Key | Value | Description | +|-------------------|----------|-----------------------------------------------------------------------------| +| `model` | `None` | path to model file, i.e. yolov8n.pt, yolov8n.yaml | +| `data` | `None` | path to data file, i.e. coco128.yaml | +| `epochs` | `100` | number of epochs to train for | +| `patience` | `50` | epochs to wait for no observable improvement for early stopping of training | +| `batch` | `16` | number of images per batch (-1 for AutoBatch) | +| `imgsz` | `640` | size of input images as integer or w,h | +| `save` | `True` | save train checkpoints and predict results | +| `save_period` | `-1` | Save checkpoint every x epochs (disabled if < 1) | +| `cache` | `False` | True/ram, disk or False. Use cache for data loading | +| `device` | `None` | device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu | +| `workers` | `8` | number of worker threads for data loading (per RANK if DDP) | +| `project` | `None` | project name | +| `name` | `None` | experiment name | +| `exist_ok` | `False` | whether to overwrite existing experiment | +| `pretrained` | `False` | whether to use a pretrained model | +| `optimizer` | `'SGD'` | optimizer to use, choices=['SGD', 'Adam', 'AdamW', 'RMSProp'] | +| `verbose` | `False` | whether to print verbose output | +| `seed` | `0` | random seed for reproducibility | +| `deterministic` | `True` | whether to enable deterministic mode | +| `single_cls` | `False` | train multi-class data as single-class | +| `image_weights` | `False` | use weighted image selection for training | +| `rect` | `False` | support rectangular training | +| `cos_lr` | `False` | use cosine learning rate scheduler | +| `close_mosaic` | `10` | disable mosaic augmentation for final 10 epochs | +| `resume` | `False` | resume training from last checkpoint | +| `amp` | `True` | Automatic Mixed Precision (AMP) training, choices=[True, False] | +| `lr0` | `0.01` | initial learning rate (i.e. SGD=1E-2, Adam=1E-3) | +| `lrf` | `0.01` | final learning rate (lr0 * lrf) | +| `momentum` | `0.937` | SGD momentum/Adam beta1 | +| `weight_decay` | `0.0005` | optimizer weight decay 5e-4 | +| `warmup_epochs` | `3.0` | warmup epochs (fractions ok) | +| `warmup_momentum` | `0.8` | warmup initial momentum | +| `warmup_bias_lr` | `0.1` | warmup initial bias lr | +| `box` | `7.5` | box loss gain | +| `cls` | `0.5` | cls loss gain (scale with pixels) | +| `dfl` | `1.5` | dfl loss gain | +| `fl_gamma` | `0.0` | focal loss gamma (efficientDet default gamma=1.5) | +| `label_smoothing` | `0.0` | label smoothing (fraction) | +| `nbs` | `64` | nominal batch size | +| `overlap_mask` | `True` | masks should overlap during training (segment train only) | +| `mask_ratio` | `4` | mask downsample ratio (segment train only) | +| `dropout` | `0.0` | use dropout regularization (classify train only) | +| `val` | `True` | validate/test during training | + +### Prediction + +Prediction settings for YOLO models refer to the various hyperparameters and configurations used to make predictions +with the model on new data. These settings can affect the model's performance, speed, and accuracy. Some common YOLO +prediction settings include the confidence threshold, non-maximum suppression (NMS) threshold, and the number of classes +to consider. Other factors that may affect the prediction process include the size and format of the input data, the +presence of additional features such as masks or multiple labels per box, and the specific task the model is being used +for. It is important to carefully tune and experiment with these settings to achieve the best possible performance for a +given task. + +| Key | Value | Description | +|------------------|------------------------|----------------------------------------------------------| +| `source` | `'ultralytics/assets'` | source directory for images or videos | +| `conf` | `0.25` | object confidence threshold for detection | +| `iou` | `0.7` | intersection over union (IoU) threshold for NMS | +| `half` | `False` | use half precision (FP16) | +| `device` | `None` | device to run on, i.e. cuda device=0/1/2/3 or device=cpu | +| `show` | `False` | show results if possible | +| `save` | `False` | save images with results | +| `save_txt` | `False` | save results as .txt file | +| `save_conf` | `False` | save results with confidence scores | +| `save_crop` | `False` | save cropped images with results | +| `hide_labels` | `False` | hide labels | +| `hide_conf` | `False` | hide confidence scores | +| `max_det` | `300` | maximum number of detections per image | +| `vid_stride` | `False` | video frame-rate stride | +| `line_thickness` | `3` | bounding box thickness (pixels) | +| `visualize` | `False` | visualize model features | +| `augment` | `False` | apply image augmentation to prediction sources | +| `agnostic_nms` | `False` | class-agnostic NMS | +| `retina_masks` | `False` | use high-resolution segmentation masks | +| `classes` | `None` | filter results by class, i.e. class=0, or class=[0,2,3] | +| `boxes` | `True` | Show boxes in segmentation predictions | + +### Validation + +Validation settings for YOLO models refer to the various hyperparameters and configurations used to +evaluate the model's performance on a validation dataset. These settings can affect the model's performance, speed, and +accuracy. Some common YOLO validation settings include the batch size, the frequency with which validation is performed +during training, and the metrics used to evaluate the model's performance. Other factors that may affect the validation +process include the size and composition of the validation dataset and the specific task the model is being used for. It +is important to carefully tune and experiment with these settings to ensure that the model is performing well on the +validation dataset and to detect and prevent overfitting. + +| Key | Value | Description | +|---------------|---------|--------------------------------------------------------------------| +| `save_json` | `False` | save results to JSON file | +| `save_hybrid` | `False` | save hybrid version of labels (labels + additional predictions) | +| `conf` | `0.001` | object confidence threshold for detection | +| `iou` | `0.6` | intersection over union (IoU) threshold for NMS | +| `max_det` | `300` | maximum number of detections per image | +| `half` | `True` | use half precision (FP16) | +| `device` | `None` | device to run on, i.e. cuda device=0/1/2/3 or device=cpu | +| `dnn` | `False` | use OpenCV DNN for ONNX inference | +| `plots` | `False` | show plots during training | +| `rect` | `False` | support rectangular evaluation | +| `split` | `val` | dataset split to use for validation, i.e. 'val', 'test' or 'train' | + +### Export + +Export settings for YOLO models refer to the various configurations and options used to save or +export the model for use in other environments or platforms. These settings can affect the model's performance, size, +and compatibility with different systems. Some common YOLO export settings include the format of the exported model +file (e.g. ONNX, TensorFlow SavedModel), the device on which the model will be run (e.g. CPU, GPU), and the presence of +additional features such as masks or multiple labels per box. Other factors that may affect the export process include +the specific task the model is being used for and the requirements or constraints of the target environment or platform. +It is important to carefully consider and configure these settings to ensure that the exported model is optimized for +the intended use case and can be used effectively in the target environment. + +| Key | Value | Description | +|-------------|-----------------|------------------------------------------------------| +| `format` | `'torchscript'` | format to export to | +| `imgsz` | `640` | image size as scalar or (h, w) list, i.e. (640, 480) | +| `keras` | `False` | use Keras for TF SavedModel export | +| `optimize` | `False` | TorchScript: optimize for mobile | +| `half` | `False` | FP16 quantization | +| `int8` | `False` | INT8 quantization | +| `dynamic` | `False` | ONNX/TF/TensorRT: dynamic axes | +| `simplify` | `False` | ONNX: simplify model | +| `opset` | `None` | ONNX: opset version (optional, defaults to latest) | +| `workspace` | `4` | TensorRT: workspace size (GB) | +| `nms` | `False` | CoreML: add NMS | + +### Augmentation + +Augmentation settings for YOLO models refer to the various transformations and modifications +applied to the training data to increase the diversity and size of the dataset. These settings can affect the model's +performance, speed, and accuracy. Some common YOLO augmentation settings include the type and intensity of the +transformations applied (e.g. random flips, rotations, cropping, color changes), the probability with which each +transformation is applied, and the presence of additional features such as masks or multiple labels per box. Other +factors that may affect the augmentation process include the size and composition of the original dataset and the +specific task the model is being used for. It is important to carefully tune and experiment with these settings to +ensure that the augmented dataset is diverse and representative enough to train a high-performing model. + +| Key | Value | Description | +|---------------|-------|-------------------------------------------------| +| `hsv_h` | 0.015 | image HSV-Hue augmentation (fraction) | +| `hsv_s` | 0.7 | image HSV-Saturation augmentation (fraction) | +| `hsv_v` | 0.4 | image HSV-Value augmentation (fraction) | +| `degrees` | 0.0 | image rotation (+/- deg) | +| `translate` | 0.1 | image translation (+/- fraction) | +| `scale` | 0.5 | image scale (+/- gain) | +| `shear` | 0.0 | image shear (+/- deg) | +| `perspective` | 0.0 | image perspective (+/- fraction), range 0-0.001 | +| `flipud` | 0.0 | image flip up-down (probability) | +| `fliplr` | 0.5 | image flip left-right (probability) | +| `mosaic` | 1.0 | image mosaic (probability) | +| `mixup` | 0.0 | image mixup (probability) | +| `copy_paste` | 0.0 | segment copy-paste (probability) | + +### Logging, checkpoints, plotting and file management + +Logging, checkpoints, plotting, and file management are important considerations when training a YOLO model. + +- Logging: It is often helpful to log various metrics and statistics during training to track the model's progress and + diagnose any issues that may arise. This can be done using a logging library such as TensorBoard or by writing log + messages to a file. +- Checkpoints: It is a good practice to save checkpoints of the model at regular intervals during training. This allows + you to resume training from a previous point if the training process is interrupted or if you want to experiment with + different training configurations. +- Plotting: Visualizing the model's performance and training progress can be helpful for understanding how the model is + behaving and identifying potential issues. This can be done using a plotting library such as matplotlib or by + generating plots using a logging library such as TensorBoard. +- File management: Managing the various files generated during the training process, such as model checkpoints, log + files, and plots, can be challenging. It is important to have a clear and organized file structure to keep track of + these files and make it easy to access and analyze them as needed. + +Effective logging, checkpointing, plotting, and file management can help you keep track of the model's progress and make +it easier to debug and optimize the training process. + +| Key | Value | Description | +|------------|----------|------------------------------------------------------------------------------------------------| +| `project` | `'runs'` | project name | +| `name` | `'exp'` | experiment name. `exp` gets automatically incremented if not specified, i.e, `exp`, `exp2` ... | +| `exist_ok` | `False` | whether to overwrite existing experiment | +| `plots` | `False` | save plots during train/val | +| `save` | `False` | save train checkpoints and predict results | diff --git a/src/train_utils/train_models/models/ultralytics/docs/usage/cli.md b/src/train_utils/train_models/models/ultralytics/docs/usage/cli.md new file mode 100644 index 0000000..20ada7c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/usage/cli.md @@ -0,0 +1,138 @@ +The YOLO Command Line Interface (CLI) is the easiest way to get started training, validating, predicting and exporting +YOLOv8 models. + +The `yolo` command is used for all actions: + +!!! example "" + + === "CLI" + + ```bash + yolo TASK MODE ARGS + ``` + +Where: + +- `TASK` (optional) is one of `[detect, segment, classify]`. If it is not passed explicitly YOLOv8 will try to guess + the `TASK` from the model type. +- `MODE` (required) is one of `[train, val, predict, export]` +- `ARGS` (optional) are any number of custom `arg=value` pairs like `imgsz=320` that override defaults. + For a full list of available `ARGS` see the [Configuration](cfg.md) page and `defaults.yaml` + GitHub [source](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml). + +!!! note "" + + Note: Arguments MUST be passed as `arg=val` with an equals sign and a space between `arg=val` pairs + + - `yolo predict model=yolov8n.pt imgsz=640 conf=0.25`   ✅ + - `yolo predict model yolov8n.pt imgsz 640 conf 0.25`   ❌ + - `yolo predict --model yolov8n.pt --imgsz 640 --conf 0.25`   ❌ + +## Train + +Train YOLOv8n on the COCO128 dataset for 100 epochs at image size 640. For a full list of available arguments see +the [Configuration](cfg.md) page. + +!!! example "" + + ```bash + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=100 imgsz=640 + yolo detect train resume model=last.pt # resume training + ``` + +## Val + +Validate trained YOLOv8n model accuracy on the COCO128 dataset. No argument need to passed as the `model` retains it's +training `data` and arguments as model attributes. + +!!! example "" + + ```bash + yolo detect val model=yolov8n.pt # val official model + yolo detect val model=path/to/best.pt # val custom model + ``` + +## Predict + +Use a trained YOLOv8n model to run predictions on images. + +!!! example "" + + ```bash + yolo detect predict model=yolov8n.pt source='https://ultralytics.com/images/bus.jpg' # predict with official model + yolo detect predict model=path/to/best.pt source='https://ultralytics.com/images/bus.jpg' # predict with custom model + ``` + +## Export + +Export a YOLOv8n model to a different format like ONNX, CoreML, etc. + +!!! example "" + + ```bash + yolo export model=yolov8n.pt format=onnx # export official model + yolo export model=path/to/best.pt format=onnx # export custom trained model + ``` + + Available YOLOv8 export formats include: + + | Format | `format=` | Model | + |----------------------------------------------------------------------------|--------------------|---------------------------| + | [PyTorch](https://pytorch.org/) | - | `yolov8n.pt` | + | [TorchScript](https://pytorch.org/docs/stable/jit.html) | `torchscript` | `yolov8n.torchscript` | + | [ONNX](https://onnx.ai/) | `onnx` | `yolov8n.onnx` | + | [OpenVINO](https://docs.openvino.ai/latest/index.html) | `openvino` | `yolov8n_openvino_model/` | + | [TensorRT](https://developer.nvidia.com/tensorrt) | `engine` | `yolov8n.engine` | + | [CoreML](https://github.com/apple/coremltools) | `coreml` | `yolov8n.mlmodel` | + | [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) | `saved_model` | `yolov8n_saved_model/` | + | [TensorFlow GraphDef](https://www.tensorflow.org/api_docs/python/tf/Graph) | `pb` | `yolov8n.pb` | + | [TensorFlow Lite](https://www.tensorflow.org/lite) | `tflite` | `yolov8n.tflite` | + | [TensorFlow Edge TPU](https://coral.ai/docs/edgetpu/models-intro/) | `edgetpu` | `yolov8n_edgetpu.tflite` | + | [TensorFlow.js](https://www.tensorflow.org/js) | `tfjs` | `yolov8n_web_model/` | + | [PaddlePaddle](https://github.com/PaddlePaddle) | `paddle` | `yolov8n_paddle_model/` | + +--- + +## Overriding default arguments + +Default arguments can be overridden by simply passing them as arguments in the CLI in `arg=value` pairs. + +!!! tip "" + + === "Example 1" + Train a detection model for `10 epochs` with `learning_rate` of `0.01` + ```bash + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 + ``` + + === "Example 2" + Predict a YouTube video using a pretrained segmentation model at image size 320: + ```bash + yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + ``` + + === "Example 3" + Validate a pretrained detection model at batch-size 1 and image size 640: + ```bash + yolo detect val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 + ``` + +--- + +## Overriding default config file + +You can override the `default.yaml` config file entirely by passing a new file with the `cfg` arguments, +i.e. `cfg=custom.yaml`. + +To do this first create a copy of `default.yaml` in your current working dir with the `yolo copy-cfg` command. + +This will create `default_copy.yaml`, which you can then pass as `cfg=default_copy.yaml` along with any additional args, +like `imgsz=320` in this example: + +!!! example "" + + === "CLI" + ```bash + yolo copy-cfg + yolo cfg=default_copy.yaml imgsz=320 + ``` diff --git a/src/train_utils/train_models/models/ultralytics/docs/usage/engine.md b/src/train_utils/train_models/models/ultralytics/docs/usage/engine.md new file mode 100644 index 0000000..5597be2 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/usage/engine.md @@ -0,0 +1,83 @@ +Both the Ultralytics YOLO command-line and python interfaces are simply a high-level abstraction on the base engine +executors. Let's take a look at the Trainer engine. + +## BaseTrainer + +BaseTrainer contains the generic boilerplate training routine. It can be customized for any task based over overriding +the required functions or operations as long the as correct formats are followed. For example, you can support your own +custom model and dataloader by just overriding these functions: + +* `get_model(cfg, weights)` - The function that builds the model to be trained +* `get_dataloder()` - The function that builds the dataloader + More details and source code can be found in [`BaseTrainer` Reference](../reference/base_trainer.md) + +## DetectionTrainer + +Here's how you can use the YOLOv8 `DetectionTrainer` and customize it. + +```python +from ultralytics.yolo.v8.detect import DetectionTrainer + +trainer = DetectionTrainer(overrides={...}) +trainer.train() +trained_model = trainer.best # get best model +``` + +### Customizing the DetectionTrainer + +Let's customize the trainer **to train a custom detection model** that is not supported directly. You can do this by +simply overloading the existing the `get_model` functionality: + +```python +from ultralytics.yolo.v8.detect import DetectionTrainer + + +class CustomTrainer(DetectionTrainer): + def get_model(self, cfg, weights): + ... + + +trainer = CustomTrainer(overrides={...}) +trainer.train() +``` + +You now realize that you need to customize the trainer further to: + +* Customize the `loss function`. +* Add `callback` that uploads model to your Google Drive after every 10 `epochs` + Here's how you can do it: + +```python +from ultralytics.yolo.v8.detect import DetectionTrainer + + +class CustomTrainer(DetectionTrainer): + def get_model(self, cfg, weights): + ... + + def criterion(self, preds, batch): + # get ground truth + imgs = batch["imgs"] + bboxes = batch["bboxes"] + ... + return loss, loss_items # see Reference-> Trainer for details on the expected format + + +# callback to upload model weights +def log_model(trainer): + last_weight_path = trainer.last + ... + + +trainer = CustomTrainer(overrides={...}) +trainer.add_callback("on_train_epoch_end", log_model) # Adds to existing callback +trainer.train() +``` + +To know more about Callback triggering events and entry point, checkout our Callbacks guide # TODO + +## Other engine components + +There are other components that can be customized similarly like `Validators` and `Predictors` +See Reference section for more information on these. + diff --git a/src/train_utils/train_models/models/ultralytics/docs/usage/python.md b/src/train_utils/train_models/models/ultralytics/docs/usage/python.md new file mode 100644 index 0000000..60ef051 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/docs/usage/python.md @@ -0,0 +1,166 @@ +The simplest way of simply using YOLOv8 directly in a Python environment. + +!!! example "Train" + + === "From pretrained(recommended)" + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.pt') # pass any model type + model.train(epochs=5) + ``` + + === "From scratch" + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.yaml') + model.train(data='coco128.yaml', epochs=5) + ``` + + === "Resume" + ```python + # TODO: Resume feature is under development and should be released soon. + model = YOLO("last.pt") + model.train(resume=True) + ``` + +!!! example "Val" + + === "Val after training" + ```python + from ultralytics import YOLO + + model = YOLO('yolov8n.yaml') + model.train(data='coco128.yaml', epochs=5) + model.val() # It'll automatically evaluate the data you trained. + ``` + + === "Val independently" + ```python + from ultralytics import YOLO + + model = YOLO("model.pt") + # It'll use the data yaml file in model.pt if you don't set data. + model.val() + # or you can set the data you want to val + model.val(data='coco128.yaml') + ``` + +!!! example "Predict" + + === "From source" + ```python + from ultralytics import YOLO + from PIL import Image + import cv2 + + model = YOLO("model.pt") + # accepts all formats - image/dir/Path/URL/video/PIL/ndarray. 0 for webcam + results = model.predict(source="0") + results = model.predict(source="folder", show=True) # Display preds. Accepts all YOLO predict arguments + + # from PIL + im1 = Image.open("bus.jpg") + results = model.predict(source=im1, save=True) # save plotted images + + # from ndarray + im2 = cv2.imread("bus.jpg") + results = model.predict(source=im2, save=True, save_txt=True) # save predictions as labels + + # from list of PIL/ndarray + results = model.predict(source=[im1, im2]) + ``` + + === "Results usage" + ```python + # results would be a list of Results object including all the predictions by default + # but be careful as it could occupy a lot memory when there're many images, + # especially the task is segmentation. + # 1. return as a list + results = model.predict(source="folder") + + # results would be a generator which is more friendly to memory by setting stream=True + # 2. return as a generator + results = model.predict(source=0, stream=True) + + for result in results: + # detection + result.boxes.xyxy # box with xyxy format, (N, 4) + result.boxes.xywh # box with xywh format, (N, 4) + result.boxes.xyxyn # box with xyxy format but normalized, (N, 4) + result.boxes.xywhn # box with xywh format but normalized, (N, 4) + result.boxes.conf # confidence score, (N, 1) + result.boxes.cls # cls, (N, 1) + + # segmentation + result.masks.masks # masks, (N, H, W) + result.masks.segments # bounding coordinates of masks, List[segment] * N + + # classification + result.probs # cls prob, (num_class, ) + + # Each result is composed of torch.Tensor by default, + # in which you can easily use following functionality: + result = result.cuda() + result = result.cpu() + result = result.to("cpu") + result = result.numpy() + ``` + +!!! note "Export and Deployment" + + === "Export, Fuse & info" + ```python + from ultralytics import YOLO + + model = YOLO("model.pt") + model.fuse() + model.info(verbose=True) # Print model information + model.export(format=) # TODO: + + ``` + === "Deployment" + + + More functionality coming soon + +To know more about using `YOLO` models, refer Model class Reference + +[Model reference](../reference/model.md){ .md-button .md-button--primary} + +--- + +### Using Trainers + +`YOLO` model class is a high-level wrapper on the Trainer classes. Each YOLO task has its own trainer that inherits +from `BaseTrainer`. + +!!! tip "Detection Trainer Example" + + ```python + from ultralytics.yolo import v8 import DetectionTrainer, DetectionValidator, DetectionPredictor + + # trainer + trainer = DetectionTrainer(overrides={}) + trainer.train() + trained_model = trainer.best + + # Validator + val = DetectionValidator(args=...) + val(model=trained_model) + + # predictor + pred = DetectionPredictor(overrides={}) + pred(source=SOURCE, model=trained_model) + + # resume from last weight + overrides["resume"] = trainer.last + trainer = detect.DetectionTrainer(overrides=overrides) + ``` + +You can easily customize Trainers to support custom tasks or explore R&D ideas. +Learn more about Customizing `Trainers`, `Validators` and `Predictors` to suit your project needs in the Customization +Section. + +[Customization tutorials](engine.md){ .md-button .md-button--primary} diff --git a/src/train_utils/train_models/models/ultralytics/examples/README.md b/src/train_utils/train_models/models/ultralytics/examples/README.md new file mode 100644 index 0000000..9fc542a --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/examples/README.md @@ -0,0 +1,22 @@ +This is a list of real-world applications and walkthroughs. These can be folders of either python files or notebooks . + +## Ultralytics YOLO example applications + +| Title | Format | Contributor | +| ------------------------------------------------------------------------ | ------------------ | --------------------------------------------------- | +| [YOLO ONNX detection Inference with C++](./YOLOv8-CPP-Inference) | C++/ONNX | [Justas Bartnykas](https://github.com/JustasBart) | +| [YOLO OpenCV ONNX detection Python](./YOLOv8-OpenCV-ONNX-Python) | OpenCV/Python/ONNX | [Farid Inawan](https://github.com/frdteknikelektro) | +| [YOLO .Net ONNX detection C#](https://www.nuget.org/packages/Yolov8.Net) | C# .Net | [Samuel Stainback](https://github.com/sstainba) | + +## How can you contribute ? + +We're looking for examples, applications and guides from the community. Here's how you can contribute: + +- Make a PR with `[Example]` prefix in title after adding your project folder in the examples/ folder of the repository +- The project should satisfy these conditions: + - It should use ultralytics framework + - It have a README.md with instructions to run the project + - It should avoid adding large assets or dependencies unless absolutely needed + - The contributor is expected to help out in issues related to their examples + +If you're unsure about any of these requirements, make a PR and we'll happy to guide you diff --git a/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/CMakeLists.txt b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/CMakeLists.txt new file mode 100644 index 0000000..bc2f33f --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/CMakeLists.txt @@ -0,0 +1,28 @@ +cmake_minimum_required(VERSION 3.5) + +project(Yolov8CPPInference VERSION 0.1) + +set(CMAKE_INCLUDE_CURRENT_DIR ON) + +# CUDA +set(CUDA_TOOLKIT_ROOT_DIR "/usr/local/cuda") +find_package(CUDA 11 REQUIRED) + +set(CMAKE_CUDA_STANDARD 11) +set(CMAKE_CUDA_STANDARD_REQUIRED ON) +# !CUDA + +# OpenCV +find_package(OpenCV REQUIRED) +include_directories(${OpenCV_INCLUDE_DIRS}) +# !OpenCV + +set(PROJECT_SOURCES + main.cpp + + inference.h + inference.cpp +) + +add_executable(Yolov8CPPInference ${PROJECT_SOURCES}) +target_link_libraries(Yolov8CPPInference ${OpenCV_LIBS}) diff --git a/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/README.md b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/README.md new file mode 100644 index 0000000..4eca0ce --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/README.md @@ -0,0 +1,57 @@ +# yolov8/yolov5 Inference C++ + +Usage: + +``` +# git clone ultralytics +pip install . +cd examples/cpp_ + +Add a **yolov8\_.onnx** and/or **yolov5\_.onnx** model(s) to the ultralytics folder. +Edit the **main.cpp** to change the **projectBasePath** to match your user. + +Note that by default the CMake file will try and import the CUDA library to be used with the OpenCVs dnn (cuDNN) GPU Inference. +If your OpenCV build does not use CUDA/cuDNN you can remove that import call and run the example on CPU. + +mkdir build +cd build +cmake .. +make +./Yolov8CPPInference +``` + +To export yolov8 models: + +``` +yolo export \ +model=yolov8s.pt \ +imgsz=[480,640] \ +format=onnx \ +opset=12 +``` + +To export yolov5 models: + +``` +python3 export.py \ +--weights yolov5s.pt \ +--img 480 640 \ +--include onnx \ +--opset 12 +``` + +yolov8s.onnx: + +![image](https://user-images.githubusercontent.com/40023722/217356132-a4cecf2e-2729-4acb-b80a-6559022d7707.png) + +yolov5s.onnx: + +![image](https://user-images.githubusercontent.com/40023722/217357005-07464492-d1da-42e3-98a7-fc753f87d5e6.png) + +This repository is based on OpenCVs dnn API to run an ONNX exported model of either yolov5/yolov8 (In theory should work +for yolov6 and yolov7 but not tested). Note that for this example the networks are exported as rectangular (640x480) +resolutions, but it would work for any resolution that you export as although you might want to use the letterBox +approach for square images depending on your use-case. + +The **main** branch version is based on using Qt as a GUI wrapper the main interest here is the **Inference** class file +which shows how to transpose yolov8 models to work as yolov5 models. diff --git a/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/inference.cpp b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/inference.cpp new file mode 100644 index 0000000..b45830e --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/inference.cpp @@ -0,0 +1,185 @@ +#include "inference.h" + +Inference::Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape, const std::string &classesTxtFile, const bool &runWithCuda) +{ + modelPath = onnxModelPath; + modelShape = modelInputShape; + classesPath = classesTxtFile; + cudaEnabled = runWithCuda; + + loadOnnxNetwork(); + // loadClassesFromFile(); The classes are hard-coded for this example +} + +std::vector Inference::runInference(const cv::Mat &input) +{ + cv::Mat modelInput = input; + if (letterBoxForSquare && modelShape.width == modelShape.height) + modelInput = formatToSquare(modelInput); + + cv::Mat blob; + cv::dnn::blobFromImage(modelInput, blob, 1.0/255.0, modelShape, cv::Scalar(), true, false); + net.setInput(blob); + + std::vector outputs; + net.forward(outputs, net.getUnconnectedOutLayersNames()); + + int rows = outputs[0].size[1]; + int dimensions = outputs[0].size[2]; + + bool yolov8 = false; + // yolov5 has an output of shape (batchSize, 25200, 85) (Num classes + box[x,y,w,h] + confidence[c]) + // yolov8 has an output of shape (batchSize, 84, 8400) (Num classes + box[x,y,w,h]) + if (dimensions > rows) // Check if the shape[2] is more than shape[1] (yolov8) + { + yolov8 = true; + rows = outputs[0].size[2]; + dimensions = outputs[0].size[1]; + + outputs[0] = outputs[0].reshape(1, dimensions); + cv::transpose(outputs[0], outputs[0]); + } + float *data = (float *)outputs[0].data; + + float x_factor = modelInput.cols / modelShape.width; + float y_factor = modelInput.rows / modelShape.height; + + std::vector class_ids; + std::vector confidences; + std::vector boxes; + + for (int i = 0; i < rows; ++i) + { + if (yolov8) + { + float *classes_scores = data+4; + + cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores); + cv::Point class_id; + double maxClassScore; + + minMaxLoc(scores, 0, &maxClassScore, 0, &class_id); + + if (maxClassScore > modelScoreThreshold) + { + confidences.push_back(maxClassScore); + class_ids.push_back(class_id.x); + + float x = data[0]; + float y = data[1]; + float w = data[2]; + float h = data[3]; + + int left = int((x - 0.5 * w) * x_factor); + int top = int((y - 0.5 * h) * y_factor); + + int width = int(w * x_factor); + int height = int(h * y_factor); + + boxes.push_back(cv::Rect(left, top, width, height)); + } + } + else // yolov5 + { + float confidence = data[4]; + + if (confidence >= modelConfidenseThreshold) + { + float *classes_scores = data+5; + + cv::Mat scores(1, classes.size(), CV_32FC1, classes_scores); + cv::Point class_id; + double max_class_score; + + minMaxLoc(scores, 0, &max_class_score, 0, &class_id); + + if (max_class_score > modelScoreThreshold) + { + confidences.push_back(confidence); + class_ids.push_back(class_id.x); + + float x = data[0]; + float y = data[1]; + float w = data[2]; + float h = data[3]; + + int left = int((x - 0.5 * w) * x_factor); + int top = int((y - 0.5 * h) * y_factor); + + int width = int(w * x_factor); + int height = int(h * y_factor); + + boxes.push_back(cv::Rect(left, top, width, height)); + } + } + } + + data += dimensions; + } + + std::vector nms_result; + cv::dnn::NMSBoxes(boxes, confidences, modelScoreThreshold, modelNMSThreshold, nms_result); + + std::vector detections{}; + for (unsigned long i = 0; i < nms_result.size(); ++i) + { + int idx = nms_result[i]; + + Detection result; + result.class_id = class_ids[idx]; + result.confidence = confidences[idx]; + + std::random_device rd; + std::mt19937 gen(rd()); + std::uniform_int_distribution dis(100, 255); + result.color = cv::Scalar(dis(gen), + dis(gen), + dis(gen)); + + result.className = classes[result.class_id]; + result.box = boxes[idx]; + + detections.push_back(result); + } + + return detections; +} + +void Inference::loadClassesFromFile() +{ + std::ifstream inputFile(classesPath); + if (inputFile.is_open()) + { + std::string classLine; + while (std::getline(inputFile, classLine)) + classes.push_back(classLine); + inputFile.close(); + } +} + +void Inference::loadOnnxNetwork() +{ + net = cv::dnn::readNetFromONNX(modelPath); + if (cudaEnabled) + { + std::cout << "\nRunning on CUDA" << std::endl; + net.setPreferableBackend(cv::dnn::DNN_BACKEND_CUDA); + net.setPreferableTarget(cv::dnn::DNN_TARGET_CUDA); + } + else + { + std::cout << "\nRunning on CPU" << std::endl; + net.setPreferableBackend(cv::dnn::DNN_BACKEND_OPENCV); + net.setPreferableTarget(cv::dnn::DNN_TARGET_CPU); + } +} + +cv::Mat Inference::formatToSquare(const cv::Mat &source) +{ + int col = source.cols; + int row = source.rows; + int _max = MAX(col, row); + cv::Mat result = cv::Mat::zeros(_max, _max, CV_8UC3); + source.copyTo(result(cv::Rect(0, 0, col, row))); + return result; +} diff --git a/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/inference.h b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/inference.h new file mode 100644 index 0000000..5763e10 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/inference.h @@ -0,0 +1,52 @@ +#ifndef INFERENCE_H +#define INFERENCE_H + +// Cpp native +#include +#include +#include +#include + +// OpenCV / DNN / Inference +#include +#include +#include + +struct Detection +{ + int class_id{0}; + std::string className{}; + float confidence{0.0}; + cv::Scalar color{}; + cv::Rect box{}; +}; + +class Inference +{ +public: + Inference(const std::string &onnxModelPath, const cv::Size &modelInputShape = {640, 640}, const std::string &classesTxtFile = "", const bool &runWithCuda = true); + std::vector runInference(const cv::Mat &input); + +private: + void loadClassesFromFile(); + void loadOnnxNetwork(); + cv::Mat formatToSquare(const cv::Mat &source); + + std::string modelPath{}; + std::string classesPath{}; + bool cudaEnabled{}; + + std::vector classes{"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"}; + + cv::Size2f modelShape{}; + + float modelConfidenseThreshold {0.25}; + float modelScoreThreshold {0.45}; + float modelNMSThreshold {0.50}; + + bool letterBoxForSquare = true; + + cv::dnn::Net net; +}; + +#endif // INFERENCE_H diff --git a/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/main.cpp b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/main.cpp new file mode 100644 index 0000000..6d1ba98 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-CPP-Inference/main.cpp @@ -0,0 +1,70 @@ +#include +#include +#include + +#include + +#include "inference.h" + +using namespace std; +using namespace cv; + +int main(int argc, char **argv) +{ + std::string projectBasePath = "/home/user/ultralytics"; // Set your ultralytics base path + + bool runOnGPU = true; + + // + // Pass in either: + // + // "yolov8s.onnx" or "yolov5s.onnx" + // + // To run Inference with yolov8/yolov5 (ONNX) + // + + // Note that in this example the classes are hard-coded and 'classes.txt' is a place holder. + Inference inf(projectBasePath + "/yolov8s.onnx", cv::Size(640, 480), "classes.txt", runOnGPU); + + std::vector imageNames; + imageNames.push_back(projectBasePath + "/ultralytics/assets/bus.jpg"); + imageNames.push_back(projectBasePath + "/ultralytics/assets/zidane.jpg"); + + for (int i = 0; i < imageNames.size(); ++i) + { + cv::Mat frame = cv::imread(imageNames[i]); + + // Inference starts here... + std::vector output = inf.runInference(frame); + + int detections = output.size(); + std::cout << "Number of detections:" << detections << std::endl; + + for (int i = 0; i < detections; ++i) + { + Detection detection = output[i]; + + cv::Rect box = detection.box; + cv::Scalar color = detection.color; + + // Detection box + cv::rectangle(frame, box, color, 2); + + // Detection box text + std::string classString = detection.className + ' ' + std::to_string(detection.confidence).substr(0, 4); + cv::Size textSize = cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0); + cv::Rect textBox(box.x, box.y - 40, textSize.width + 10, textSize.height + 20); + + cv::rectangle(frame, textBox, color, cv::FILLED); + cv::putText(frame, classString, cv::Point(box.x + 5, box.y - 10), cv::FONT_HERSHEY_DUPLEX, 1, cv::Scalar(0, 0, 0), 2, 0); + } + // Inference ends here... + + // This is only for preview purposes + float scale = 0.8; + cv::resize(frame, frame, cv::Size(frame.cols*scale, frame.rows*scale)); + cv::imshow("Inference", frame); + + cv::waitKey(-1); + } +} diff --git a/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-OpenCV-ONNX-Python/README.md b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-OpenCV-ONNX-Python/README.md new file mode 100644 index 0000000..604dcfe --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-OpenCV-ONNX-Python/README.md @@ -0,0 +1,19 @@ +# YOLOv8 - OpenCV + +Implementation YOLOv8 on OpenCV using ONNX Format. + +Just simply clone and run + +```bash +pip install -r requirements.txt +python main.py +``` + +If you start from scratch: + +```bash +pip install ultralytics +yolo export model=yolov8n.pt imgsz=640 format=onnx opset=12 +``` + +_\*Make sure to include "opset=12"_ diff --git a/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-OpenCV-ONNX-Python/main.py b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-OpenCV-ONNX-Python/main.py new file mode 100644 index 0000000..410c908 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/examples/YOLOv8-OpenCV-ONNX-Python/main.py @@ -0,0 +1,74 @@ +import cv2.dnn +import numpy as np + +from ultralytics.yolo.utils import ROOT, yaml_load +from ultralytics.yolo.utils.checks import check_yaml + +CLASSES = yaml_load(check_yaml('coco128.yaml'))['names'] + +colors = np.random.uniform(0, 255, size=(len(CLASSES), 3)) + + +def draw_bounding_box(img, class_id, confidence, x, y, x_plus_w, y_plus_h): + label = f'{CLASSES[class_id]} ({confidence:.2f})' + color = colors[class_id] + cv2.rectangle(img, (x, y), (x_plus_w, y_plus_h), color, 2) + cv2.putText(img, label, (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2) + + +def main(): + model: cv2.dnn.Net = cv2.dnn.readNetFromONNX('yolov8n.onnx') + original_image: np.ndarray = cv2.imread(str(ROOT / 'assets/bus.jpg')) + [height, width, _] = original_image.shape + length = max((height, width)) + image = np.zeros((length, length, 3), np.uint8) + image[0:height, 0:width] = original_image + scale = length / 640 + + blob = cv2.dnn.blobFromImage(image, scalefactor=1 / 255, size=(640, 640)) + model.setInput(blob) + outputs = model.forward() + + outputs = np.array([cv2.transpose(outputs[0])]) + rows = outputs.shape[1] + + boxes = [] + scores = [] + class_ids = [] + + for i in range(rows): + classes_scores = outputs[0][i][4:] + (minScore, maxScore, minClassLoc, (x, maxClassIndex)) = cv2.minMaxLoc(classes_scores) + if maxScore >= 0.25: + box = [ + outputs[0][i][0] - (0.5 * outputs[0][i][2]), outputs[0][i][1] - (0.5 * outputs[0][i][3]), + outputs[0][i][2], outputs[0][i][3]] + boxes.append(box) + scores.append(maxScore) + class_ids.append(maxClassIndex) + + result_boxes = cv2.dnn.NMSBoxes(boxes, scores, 0.25, 0.45, 0.5) + + detections = [] + for i in range(len(result_boxes)): + index = result_boxes[i] + box = boxes[index] + detection = { + 'class_id': class_ids[index], + 'class_name': CLASSES[class_ids[index]], + 'confidence': scores[index], + 'box': box, + 'scale': scale} + detections.append(detection) + draw_bounding_box(original_image, class_ids[index], scores[index], round(box[0] * scale), round(box[1] * scale), + round((box[0] + box[2]) * scale), round((box[1] + box[3]) * scale)) + + cv2.imshow('image', original_image) + cv2.waitKey(0) + cv2.destroyAllWindows() + + return detections + + +if __name__ == '__main__': + main() diff --git a/src/train_utils/train_models/models/ultralytics/mkdocs.yml b/src/train_utils/train_models/models/ultralytics/mkdocs.yml new file mode 100644 index 0000000..905eb3c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/mkdocs.yml @@ -0,0 +1,154 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +site_name: YOLOv8 Docs +repo_url: https://github.com/ultralytics/ultralytics +edit_uri: https://github.com/ultralytics/ultralytics/tree/main/docs +repo_name: ultralytics/ultralytics +remote_name: https://github.com/ultralytics/docs + +theme: + name: "material" + logo: https://github.com/ultralytics/assets/raw/main/logo/Ultralytics_Logotype_Reverse.svg + favicon: https://github.com/ultralytics/assets/raw/main/logo/favicon-yolo.ico + font: + text: Roboto + + palette: + # Palette toggle for light mode + - scheme: default + # primary: grey + toggle: + icon: material/brightness-7 + name: Switch to dark mode + + # Palette toggle for dark mode + - scheme: slate + # primary: black + toggle: + icon: material/brightness-4 + name: Switch to light mode + features: + - content.action.edit + - content.code.annotate + - content.tooltips + - search.highlight + - search.share + - search.suggest + - toc.follow + - navigation.top + - navigation.expand + - navigation.footer + - navigation.tracking + - navigation.instant + - navigation.indexes + - content.tabs.link # all code tabs change simultaneously + +# Customization +copyright: Ultralytics 2023. All rights reserved. +extra: + # version: + # provider: mike # version drop-down menu + analytics: + provider: google + property: G-2M5EHKC0BH + feedback: + title: Was this page helpful? + ratings: + - icon: material/heart + name: This page was helpful + data: 1 + note: Thanks for your feedback! + - icon: material/heart-broken + name: This page could be improved + data: 0 + note: >- + Thanks for your feedback!
+ Tell us what we can improve. + + social: + - icon: fontawesome/brands/github + link: https://github.com/ultralytics + - icon: fontawesome/brands/linkedin + link: https://www.linkedin.com/company/ultralytics/ + - icon: fontawesome/brands/twitter + link: https://twitter.com/ultralytics + - icon: fontawesome/brands/youtube + link: https://www.youtube.com/ultralytics + - icon: fontawesome/brands/docker + link: https://hub.docker.com/r/ultralytics/ultralytics/ + - icon: fontawesome/brands/python + link: https://pypi.org/project/ultralytics/ + +extra_css: + - stylesheets/style.css + +markdown_extensions: + # Div text decorators + - admonition + - pymdownx.details + - pymdownx.superfences + - tables + - attr_list + - def_list + # Syntax highlight + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.snippets + + # Button + - attr_list + + # Content tabs + - pymdownx.superfences + - pymdownx.tabbed: + alternate_style: true + + # Highlight + - pymdownx.critic + - pymdownx.caret + - pymdownx.keys + - pymdownx.mark + - pymdownx.tilde + +plugins: + - mkdocstrings + - search + +# Primary navigation +nav: + - Home: index.md + - Quickstart: quickstart.md + - Modes: + - modes/index.md + - Train: modes/train.md + - Val: modes/val.md + - Predict: modes/predict.md + - Export: modes/export.md + - Track: modes/track.md + - Benchmark: modes/benchmark.md + - Tasks: + - tasks/index.md + - Detect: tasks/detect.md + - Segment: tasks/segment.md + - Classify: tasks/classify.md +# - Keypoints: tasks/keypoints.md + - Usage: + - CLI: usage/cli.md + - Python: usage/python.md + - Callbacks: usage/callbacks.md + - Configuration: usage/cfg.md + - Advanced Customization: usage/engine.md + - Ultralytics HUB: hub.md + - iOS and Android App: app.md + - Reference: + - Engine: + - Model: reference/model.md + - Trainer: reference/base_trainer.md + - Validator: reference/base_val.md + - Predictor: reference/base_pred.md + - Exporter: reference/exporter.md + - Results: reference/results.md + - ultralytics.nn: reference/nn.md + - Operations: reference/ops.md + - Security: SECURITY.md diff --git a/src/train_utils/train_models/models/ultralytics/requirements.txt b/src/train_utils/train_models/models/ultralytics/requirements.txt new file mode 100644 index 0000000..099fdcb --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/requirements.txt @@ -0,0 +1,43 @@ +# Ultralytics requirements +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +matplotlib>=3.2.2 +numpy>=1.21.6 +opencv-python>=4.6.0 +Pillow>=7.1.2 +PyYAML>=5.3.1 +requests>=2.23.0 +scipy>=1.4.1 +torch>=1.7.0 +torchvision>=0.8.1 +tqdm>=4.64.0 + +# Logging ------------------------------------- +# tensorboard>=2.4.1 +# clearml +# comet + +# Plotting ------------------------------------ +pandas>=1.1.4 +seaborn>=0.11.0 + +# Export -------------------------------------- +# coremltools>=6.0 # CoreML export +# onnx>=1.12.0 # ONNX export +# onnxsim>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) +# tflite-support +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev>=2022.3 # OpenVINO export + +# Extras -------------------------------------- +psutil # system utilization +thop>=0.1.1 # FLOPs computation +# ipython # interactive notebook +# albumentations>=1.0.3 +# pycocotools>=2.0.6 # COCO mAP +# roboflow diff --git a/src/train_utils/train_models/models/ultralytics/setup.cfg b/src/train_utils/train_models/models/ultralytics/setup.cfg new file mode 100644 index 0000000..2cde6a4 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/setup.cfg @@ -0,0 +1,56 @@ +# Project-wide configuration file, can be used for package metadata and other toll configurations +# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments +# Local usage: pip install pre-commit, pre-commit run --all-files + +[metadata] +license_files = LICENSE +description_file = README.md + +[tool:pytest] +norecursedirs = + .git + dist + build +addopts = + --doctest-modules + --durations=25 + --color=yes + +[flake8] +max-line-length = 120 +exclude = .tox,*.egg,build,temp +select = E,W,F +doctests = True +verbose = 2 +# https://pep8.readthedocs.io/en/latest/intro.html#error-codes +format = pylint +# see: https://www.flake8rules.com/ +ignore = E731,F405,E402,W504,E501 + # E731: Do not assign a lambda expression, use a def + # F405: name may be undefined, or defined from star imports: module + # E402: module level import not at top of file + # W504: line break after binary operator + # E501: line too long + # removed: + # F401: module imported but unused + # E231: missing whitespace after ‘,’, ‘;’, or ‘:’ + # E127: continuation line over-indented for visual indent + # F403: ‘from module import *’ used; unable to detect undefined names + + +[isort] +# https://pycqa.github.io/isort/docs/configuration/options.html +line_length = 120 +# see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html +multi_line_output = 0 + +[yapf] +based_on_style = pep8 +spaces_before_comment = 2 +COLUMN_LIMIT = 120 +COALESCE_BRACKETS = True +SPACES_AROUND_POWER_OPERATOR = True +SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = True +SPLIT_BEFORE_CLOSING_BRACKET = False +SPLIT_BEFORE_FIRST_ARGUMENT = False +# EACH_DICT_ENTRY_ON_SEPARATE_LINE = False diff --git a/src/train_utils/train_models/models/ultralytics/setup.py b/src/train_utils/train_models/models/ultralytics/setup.py new file mode 100644 index 0000000..ba0296e --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/setup.py @@ -0,0 +1,65 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import re +from pathlib import Path + +import pkg_resources as pkg +from setuptools import find_packages, setup + +# Settings +FILE = Path(__file__).resolve() +PARENT = FILE.parent # root directory +README = (PARENT / 'README.md').read_text(encoding='utf-8') +REQUIREMENTS = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements((PARENT / 'requirements.txt').read_text())] +PKG_REQUIREMENTS = ['sentry_sdk'] # pip-only requirements + + +def get_version(): + file = PARENT / 'ultralytics/__init__.py' + return re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', file.read_text(encoding='utf-8'), re.M)[1] + + +setup( + name='ultralytics', # name of pypi package + version=get_version(), # version of pypi package + python_requires='>=3.7', + license='GPL-3.0', + description='Ultralytics YOLOv8', + long_description=README, + long_description_content_type='text/markdown', + url='https://github.com/ultralytics/ultralytics', + project_urls={ + 'Bug Reports': 'https://github.com/ultralytics/ultralytics/issues', + 'Funding': 'https://ultralytics.com', + 'Source': 'https://github.com/ultralytics/ultralytics'}, + author='Ultralytics', + author_email='hello@ultralytics.com', + packages=find_packages(), # required + include_package_data=True, + install_requires=REQUIREMENTS + PKG_REQUIREMENTS, + extras_require={ + 'dev': ['check-manifest', 'pytest', 'pytest-cov', 'coverage', 'mkdocs-material', 'mkdocstrings[python]'], + 'export': ['coremltools>=6.0', 'onnx', 'onnxsim', 'onnxruntime', 'openvino-dev>=2022.3'], + 'tf': ['onnx2tf', 'sng4onnx', 'tflite_support', 'tensorflow']}, + classifiers=[ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'Intended Audience :: Education', + 'Intended Audience :: Science/Research', + 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', + 'Topic :: Software Development', + 'Topic :: Scientific/Engineering', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: Scientific/Engineering :: Image Recognition', + 'Operating System :: POSIX :: Linux', + 'Operating System :: MacOS', + 'Operating System :: Microsoft :: Windows', ], + keywords='machine-learning, deep-learning, vision, ML, DL, AI, YOLO, YOLOv3, YOLOv5, YOLOv8, HUB, Ultralytics', + entry_points={ + 'console_scripts': ['yolo = ultralytics.yolo.cfg:entrypoint', 'ultralytics = ultralytics.yolo.cfg:entrypoint']}) diff --git a/src/train_utils/train_models/models/ultralytics/tests/test_cli.py b/src/train_utils/train_models/models/ultralytics/tests/test_cli.py new file mode 100644 index 0000000..1ba1094 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/tests/test_cli.py @@ -0,0 +1,81 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import subprocess +from pathlib import Path + +from ultralytics.yolo.utils import LINUX, ONLINE, ROOT, SETTINGS + +MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n' +CFG = 'yolov8n' + + +def run(cmd): + # Run a subprocess command with check=True + subprocess.run(cmd.split(), check=True) + + +def test_special_modes(): + run('yolo checks') + run('yolo settings') + run('yolo help') + + +# Train checks --------------------------------------------------------------------------------------------------------- +def test_train_det(): + run(f'yolo train detect model={CFG}.yaml data=coco8.yaml imgsz=32 epochs=1 v5loader') + + +def test_train_seg(): + run(f'yolo train segment model={CFG}-seg.yaml data=coco8-seg.yaml imgsz=32 epochs=1') + + +def test_train_cls(): + run(f'yolo train classify model={CFG}-cls.yaml data=imagenet10 imgsz=32 epochs=1') + + +# Val checks ----------------------------------------------------------------------------------------------------------- +def test_val_detect(): + run(f'yolo val detect model={MODEL}.pt data=coco8.yaml imgsz=32') + + +def test_val_segment(): + run(f'yolo val segment model={MODEL}-seg.pt data=coco8-seg.yaml imgsz=32') + + +def test_val_classify(): + run(f'yolo val classify model={MODEL}-cls.pt data=imagenet10 imgsz=32') + + +# Predict checks ------------------------------------------------------------------------------------------------------- +def test_predict_detect(): + run(f"yolo predict model={MODEL}.pt source={ROOT / 'assets'} imgsz=32 save save_crop save_txt") + if ONLINE: + run(f'yolo predict model={MODEL}.pt source=https://ultralytics.com/images/bus.jpg imgsz=32') + run(f'yolo predict model={MODEL}.pt source=https://ultralytics.com/assets/decelera_landscape_min.mov imgsz=32') + run(f'yolo predict model={MODEL}.pt source=https://ultralytics.com/assets/decelera_portrait_min.mov imgsz=32') + + +def test_predict_segment(): + run(f"yolo predict model={MODEL}-seg.pt source={ROOT / 'assets'} imgsz=32 save") + + +def test_predict_classify(): + run(f"yolo predict model={MODEL}-cls.pt source={ROOT / 'assets'} imgsz=32 save") + + +# Export checks -------------------------------------------------------------------------------------------------------- +def test_export_detect_torchscript(): + run(f'yolo export model={MODEL}.pt format=torchscript') + + +def test_export_segment_torchscript(): + run(f'yolo export model={MODEL}-seg.pt format=torchscript') + + +def test_export_classify_torchscript(): + run(f'yolo export model={MODEL}-cls.pt format=torchscript') + + +def test_export_detect_edgetpu(enabled=False): + if enabled and LINUX: + run(f'yolo export model={MODEL}.pt format=edgetpu') diff --git a/src/train_utils/train_models/models/ultralytics/tests/test_engine.py b/src/train_utils/train_models/models/ultralytics/tests/test_engine.py new file mode 100644 index 0000000..c20edc1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/tests/test_engine.py @@ -0,0 +1,93 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from pathlib import Path + +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, SETTINGS +from ultralytics.yolo.v8 import classify, detect, segment + +CFG_DET = 'yolov8n.yaml' +CFG_SEG = 'yolov8n-seg.yaml' +CFG_CLS = 'squeezenet1_0' +CFG = get_cfg(DEFAULT_CFG) +MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n' +SOURCE = ROOT / 'assets' + + +def test_detect(): + overrides = {'data': 'coco8.yaml', 'model': CFG_DET, 'imgsz': 32, 'epochs': 1, 'save': False} + CFG.data = 'coco8.yaml' + + # Trainer + trainer = detect.DetectionTrainer(overrides=overrides) + trainer.train() + + # Validator + val = detect.DetectionValidator(args=CFG) + val(model=trainer.best) # validate best.pt + + # Predictor + pred = detect.DetectionPredictor(overrides={'imgsz': [64, 64]}) + result = pred(source=SOURCE, model=f'{MODEL}.pt') + assert len(result), 'predictor test failed' + + overrides['resume'] = trainer.last + trainer = detect.DetectionTrainer(overrides=overrides) + try: + trainer.train() + except Exception as e: + print(f'Expected exception caught: {e}') + return + + Exception('Resume test failed!') + + +def test_segment(): + overrides = {'data': 'coco8-seg.yaml', 'model': CFG_SEG, 'imgsz': 32, 'epochs': 1, 'save': False} + CFG.data = 'coco8-seg.yaml' + CFG.v5loader = False + # YOLO(CFG_SEG).train(**overrides) # works + + # trainer + trainer = segment.SegmentationTrainer(overrides=overrides) + trainer.train() + + # Validator + val = segment.SegmentationValidator(args=CFG) + val(model=trainer.best) # validate best.pt + + # Predictor + pred = segment.SegmentationPredictor(overrides={'imgsz': [64, 64]}) + result = pred(source=SOURCE, model=f'{MODEL}-seg.pt') + assert len(result), 'predictor test failed' + + # Test resume + overrides['resume'] = trainer.last + trainer = segment.SegmentationTrainer(overrides=overrides) + try: + trainer.train() + except Exception as e: + print(f'Expected exception caught: {e}') + return + + Exception('Resume test failed!') + + +def test_classify(): + overrides = {'data': 'imagenet10', 'model': 'yolov8n-cls.yaml', 'imgsz': 32, 'epochs': 1, 'save': False} + CFG.data = 'imagenet10' + CFG.imgsz = 32 + # YOLO(CFG_SEG).train(**overrides) # works + + # Trainer + trainer = classify.ClassificationTrainer(overrides=overrides) + trainer.train() + + # Validator + val = classify.ClassificationValidator(args=CFG) + val(model=trainer.best) + + # Predictor + pred = classify.ClassificationPredictor(overrides={'imgsz': [64, 64]}) + result = pred(source=SOURCE, model=trainer.best) + assert len(result), 'predictor test failed' diff --git a/src/train_utils/train_models/models/ultralytics/tests/test_python.py b/src/train_utils/train_models/models/ultralytics/tests/test_python.py new file mode 100644 index 0000000..22446a9 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/tests/test_python.py @@ -0,0 +1,222 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from pathlib import Path + +import cv2 +import numpy as np +import torch +from PIL import Image + +from ultralytics import YOLO +from ultralytics.yolo.data.build import load_inference_source +from ultralytics.yolo.utils import LINUX, ONLINE, ROOT, SETTINGS + +MODEL = Path(SETTINGS['weights_dir']) / 'yolov8n.pt' +CFG = 'yolov8n.yaml' +SOURCE = ROOT / 'assets/bus.jpg' +SOURCE_GREYSCALE = Path(f'{SOURCE.parent / SOURCE.stem}_greyscale.jpg') +SOURCE_RGBA = Path(f'{SOURCE.parent / SOURCE.stem}_4ch.png') + +# Convert SOURCE to greyscale and 4-ch +im = Image.open(SOURCE) +im.convert('L').save(SOURCE_GREYSCALE) # greyscale +im.convert('RGBA').save(SOURCE_RGBA) # 4-ch PNG with alpha + + +def test_model_forward(): + model = YOLO(CFG) + model(SOURCE) + + +def test_model_info(): + model = YOLO(CFG) + model.info() + model = YOLO(MODEL) + model.info(verbose=True) + + +def test_model_fuse(): + model = YOLO(CFG) + model.fuse() + model = YOLO(MODEL) + model.fuse() + + +def test_predict_dir(): + model = YOLO(MODEL) + model(source=ROOT / 'assets') + + +def test_predict_img(): + model = YOLO(MODEL) + seg_model = YOLO('yolov8n-seg.pt') + cls_model = YOLO('yolov8n-cls.pt') + im = cv2.imread(str(SOURCE)) + assert len(model(source=Image.open(SOURCE), save=True, verbose=True)) == 1 # PIL + assert len(model(source=im, save=True, save_txt=True)) == 1 # ndarray + assert len(model(source=[im, im], save=True, save_txt=True)) == 2 # batch + assert len(list(model(source=[im, im], save=True, stream=True))) == 2 # stream + assert len(model(torch.zeros(320, 640, 3).numpy())) == 1 # tensor to numpy + batch = [ + str(SOURCE), # filename + Path(SOURCE), # Path + 'https://ultralytics.com/images/zidane.jpg' if ONLINE else SOURCE, # URI + cv2.imread(str(SOURCE)), # OpenCV + Image.open(SOURCE), # PIL + np.zeros((320, 640, 3))] # numpy + assert len(model(batch)) == len(batch) # multiple sources in a batch + + # Test tensor inference + im = cv2.imread(str(SOURCE)) # OpenCV + t = cv2.resize(im, (32, 32)) + t = torch.from_numpy(t.transpose((2, 0, 1))) + t = torch.stack([t, t, t, t]) + results = model(t) + assert len(results) == t.shape[0] + results = seg_model(t) + assert len(results) == t.shape[0] + results = cls_model(t) + assert len(results) == t.shape[0] + + +def test_predict_grey_and_4ch(): + model = YOLO(MODEL) + for f in SOURCE_RGBA, SOURCE_GREYSCALE: + for source in Image.open(f), cv2.imread(str(f)), f: + model(source, save=True, verbose=True) + + +def test_val(): + model = YOLO(MODEL) + model.val(data='coco8.yaml', imgsz=32) + + +def test_val_scratch(): + model = YOLO(CFG) + model.val(data='coco8.yaml', imgsz=32) + + +def test_amp(): + if torch.cuda.is_available(): + from ultralytics.yolo.engine.trainer import check_amp + model = YOLO(MODEL).model.cuda() + assert check_amp(model) + + +def test_train_scratch(): + model = YOLO(CFG) + model.train(data='coco8.yaml', epochs=1, imgsz=32) + model(SOURCE) + + +def test_train_pretrained(): + model = YOLO(MODEL) + model.train(data='coco8.yaml', epochs=1, imgsz=32) + model(SOURCE) + + +def test_export_torchscript(): + model = YOLO(MODEL) + f = model.export(format='torchscript') + YOLO(f)(SOURCE) # exported model inference + + +def test_export_torchscript_scratch(): + model = YOLO(CFG) + f = model.export(format='torchscript') + YOLO(f)(SOURCE) # exported model inference + + +def test_export_onnx(): + model = YOLO(MODEL) + f = model.export(format='onnx') + YOLO(f)(SOURCE) # exported model inference + + +def test_export_openvino(): + model = YOLO(MODEL) + f = model.export(format='openvino') + YOLO(f)(SOURCE) # exported model inference + + +def test_export_coreml(): # sourcery skip: move-assign + model = YOLO(MODEL) + model.export(format='coreml') + # if MACOS: + # YOLO(f)(SOURCE) # model prediction only supported on macOS + + +def test_export_tflite(enabled=False): + # TF suffers from install conflicts on Windows and macOS + if enabled and LINUX: + model = YOLO(MODEL) + f = model.export(format='tflite') + YOLO(f)(SOURCE) + + +def test_export_pb(enabled=False): + # TF suffers from install conflicts on Windows and macOS + if enabled and LINUX: + model = YOLO(MODEL) + f = model.export(format='pb') + YOLO(f)(SOURCE) + + +def test_export_paddle(enabled=False): + # Paddle protobuf requirements conflicting with onnx protobuf requirements + if enabled: + model = YOLO(MODEL) + model.export(format='paddle') + + +def test_all_model_yamls(): + for m in list((ROOT / 'models').rglob('*.yaml')): + YOLO(m.name) + + +def test_workflow(): + model = YOLO(MODEL) + model.train(data='coco8.yaml', epochs=1, imgsz=32) + model.val() + model.predict(SOURCE) + model.export(format='onnx') # export a model to ONNX format + + +def test_predict_callback_and_setup(): + # test callback addition for prediction + def on_predict_batch_end(predictor): # results -> List[batch_size] + path, _, im0s, _, _ = predictor.batch + # print('on_predict_batch_end', im0s[0].shape) + im0s = im0s if isinstance(im0s, list) else [im0s] + bs = [predictor.dataset.bs for _ in range(len(path))] + predictor.results = zip(predictor.results, im0s, bs) + + model = YOLO(MODEL) + model.add_callback('on_predict_batch_end', on_predict_batch_end) + + dataset = load_inference_source(source=SOURCE, transforms=model.transforms) + bs = dataset.bs # noqa access predictor properties + results = model.predict(dataset, stream=True) # source already setup + for _, (result, im0, bs) in enumerate(results): + print('test_callback', im0.shape) + print('test_callback', bs) + boxes = result.boxes # Boxes object for bbox outputs + print(boxes) + + +def test_result(): + model = YOLO('yolov8n-seg.pt') + res = model([SOURCE, SOURCE]) + res[0].plot(show_conf=False) + res[0] = res[0].cpu().numpy() + print(res[0].path, res[0].masks.masks) + + model = YOLO('yolov8n.pt') + res = model(SOURCE) + res[0].plot() + print(res[0].path) + + model = YOLO('yolov8n-cls.pt') + res = model(SOURCE) + res[0].plot() + print(res[0].path) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/__init__.py new file mode 100644 index 0000000..be989f2 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/__init__.py @@ -0,0 +1,8 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +__version__ = '8.0.58' + +from ultralytics.yolo.engine.model import YOLO +from ultralytics.yolo.utils.checks import check_yolo as checks + +__all__ = '__version__', 'YOLO', 'checks' # allow simpler import diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/assets/bus.jpg b/src/train_utils/train_models/models/ultralytics/ultralytics/assets/bus.jpg new file mode 100644 index 0000000..40eaaf5 Binary files /dev/null and b/src/train_utils/train_models/models/ultralytics/ultralytics/assets/bus.jpg differ diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/assets/zidane.jpg b/src/train_utils/train_models/models/ultralytics/ultralytics/assets/zidane.jpg new file mode 100644 index 0000000..eeab1cd Binary files /dev/null and b/src/train_utils/train_models/models/ultralytics/ultralytics/assets/zidane.jpg differ diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/hub/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/hub/__init__.py new file mode 100644 index 0000000..bd2ae2c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/hub/__init__.py @@ -0,0 +1,95 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import requests + +from ultralytics.hub.auth import Auth +from ultralytics.hub.session import HUBTrainingSession +from ultralytics.hub.utils import PREFIX, split_key +from ultralytics.yolo.engine.model import YOLO +from ultralytics.yolo.utils import LOGGER, emojis + + +def start(key=''): + """ + Start training models with Ultralytics HUB. Usage: from ultralytics.hub import start; start('API_KEY') + """ + auth = Auth(key) + if not auth.get_state(): + model_id = request_api_key(auth) + else: + _, model_id = split_key(key) + + if not model_id: + raise ConnectionError(emojis('Connecting with global API key is not currently supported. ❌')) + + session = HUBTrainingSession(model_id=model_id, auth=auth) + session.check_disk_space() + + model = YOLO(model=session.model_file, session=session) + model.train(**session.train_args) + + +def request_api_key(auth, max_attempts=3): + """ + Prompt the user to input their API key. Returns the model ID. + """ + import getpass + for attempts in range(max_attempts): + LOGGER.info(f'{PREFIX}Login. Attempt {attempts + 1} of {max_attempts}') + input_key = getpass.getpass('Enter your Ultralytics HUB API key:\n') + auth.api_key, model_id = split_key(input_key) + + if auth.authenticate(): + LOGGER.info(f'{PREFIX}Authenticated ✅') + return model_id + + LOGGER.warning(f'{PREFIX}Invalid API key ⚠️\n') + + raise ConnectionError(emojis(f'{PREFIX}Failed to authenticate ❌')) + + +def reset_model(key=''): + # Reset a trained model to an untrained state + api_key, model_id = split_key(key) + r = requests.post('https://api.ultralytics.com/model-reset', json={'apiKey': api_key, 'modelId': model_id}) + + if r.status_code == 200: + LOGGER.info(f'{PREFIX}Model reset successfully') + return + LOGGER.warning(f'{PREFIX}Model reset failure {r.status_code} {r.reason}') + + +def export_fmts_hub(): + # Returns a list of HUB-supported export formats + from ultralytics.yolo.engine.exporter import export_formats + return list(export_formats()['Argument'][1:]) + ['ultralytics_tflite', 'ultralytics_coreml'] + + +def export_model(key='', format='torchscript'): + # Export a model to all formats + assert format in export_fmts_hub(), f"Unsupported export format '{format}', valid formats are {export_fmts_hub()}" + api_key, model_id = split_key(key) + r = requests.post('https://api.ultralytics.com/export', + json={ + 'apiKey': api_key, + 'modelId': model_id, + 'format': format}) + assert r.status_code == 200, f'{PREFIX}{format} export failure {r.status_code} {r.reason}' + LOGGER.info(f'{PREFIX}{format} export started ✅') + + +def get_export(key='', format='torchscript'): + # Get an exported model dictionary with download URL + assert format in export_fmts_hub, f"Unsupported export format '{format}', valid formats are {export_fmts_hub}" + api_key, model_id = split_key(key) + r = requests.post('https://api.ultralytics.com/get-export', + json={ + 'apiKey': api_key, + 'modelId': model_id, + 'format': format}) + assert r.status_code == 200, f'{PREFIX}{format} get_export failure {r.status_code} {r.reason}' + return r.json() + + +if __name__ == '__main__': + start() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/hub/auth.py b/src/train_utils/train_models/models/ultralytics/ultralytics/hub/auth.py new file mode 100644 index 0000000..8655b6f --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/hub/auth.py @@ -0,0 +1,70 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import requests + +from ultralytics.hub.utils import HUB_API_ROOT, request_with_credentials +from ultralytics.yolo.utils import is_colab + +API_KEY_PATH = 'https://hub.ultralytics.com/settings?tab=api+keys' + + +class Auth: + id_token = api_key = model_key = False + + def __init__(self, api_key=None): + self.api_key = self._clean_api_key(api_key) + self.authenticate() if self.api_key else self.auth_with_cookies() + + @staticmethod + def _clean_api_key(key: str) -> str: + """Strip model from key if present""" + separator = '_' + return key.split(separator)[0] if separator in key else key + + def authenticate(self) -> bool: + """Attempt to authenticate with server""" + try: + header = self.get_auth_header() + if header: + r = requests.post(f'{HUB_API_ROOT}/v1/auth', headers=header) + if not r.json().get('success', False): + raise ConnectionError('Unable to authenticate.') + return True + raise ConnectionError('User has not authenticated locally.') + except ConnectionError: + self.id_token = self.api_key = False # reset invalid + return False + + def auth_with_cookies(self) -> bool: + """ + Attempt to fetch authentication via cookies and set id_token. + User must be logged in to HUB and running in a supported browser. + """ + if not is_colab(): + return False # Currently only works with Colab + try: + authn = request_with_credentials(f'{HUB_API_ROOT}/v1/auth/auto') + if authn.get('success', False): + self.id_token = authn.get('data', {}).get('idToken', None) + self.authenticate() + return True + raise ConnectionError('Unable to fetch browser authentication details.') + except ConnectionError: + self.id_token = False # reset invalid + return False + + def get_auth_header(self): + if self.id_token: + return {'authorization': f'Bearer {self.id_token}'} + elif self.api_key: + return {'x-api-key': self.api_key} + else: + return None + + def get_state(self) -> bool: + """Get the authentication state""" + return self.id_token or self.api_key + + def set_api_key(self, key: str): + """Get the authentication state""" + self.api_key = key diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/hub/session.py b/src/train_utils/train_models/models/ultralytics/ultralytics/hub/session.py new file mode 100644 index 0000000..71cc719 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/hub/session.py @@ -0,0 +1,134 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +import signal +import sys +from pathlib import Path +from time import sleep + +import requests + +from ultralytics.hub.utils import HUB_API_ROOT, check_dataset_disk_space, smart_request +from ultralytics.yolo.utils import LOGGER, PREFIX, __version__, checks, emojis, is_colab, threaded + +AGENT_NAME = f'python-{__version__}-colab' if is_colab() else f'python-{__version__}-local' + + +class HUBTrainingSession: + + def __init__(self, model_id, auth): + self.agent_id = None # identifies which instance is communicating with server + self.model_id = model_id + self.api_url = f'{HUB_API_ROOT}/v1/models/{model_id}' + self.auth_header = auth.get_auth_header() + self.rate_limits = {'metrics': 3.0, 'ckpt': 900.0, 'heartbeat': 300.0} # rate limits (seconds) + self.timers = {} # rate limit timers (seconds) + self.metrics_queue = {} # metrics queue + self.model = self._get_model() + self.alive = True + self._start_heartbeat() # start heartbeats + self._register_signal_handlers() + + def _register_signal_handlers(self): + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + + def _handle_signal(self, signum, frame): + """ + Prevent heartbeats from being sent on Colab after kill. + This method does not use frame, it is included as it is + passed by signal. + """ + if self.alive is True: + LOGGER.info(f'{PREFIX}Kill signal received! ❌') + self._stop_heartbeat() + sys.exit(signum) + + def _stop_heartbeat(self): + """End the heartbeat loop""" + self.alive = False + + def upload_metrics(self): + payload = {'metrics': self.metrics_queue.copy(), 'type': 'metrics'} + smart_request('post', self.api_url, json=payload, headers=self.auth_header, code=2) + + def _get_model(self): + # Returns model from database by id + api_url = f'{HUB_API_ROOT}/v1/models/{self.model_id}' + + try: + response = smart_request('get', api_url, headers=self.auth_header, thread=False, code=0) + data = response.json().get('data', None) + + if data.get('status', None) == 'trained': + raise ValueError( + emojis(f'Model is already trained and uploaded to ' + f'https://hub.ultralytics.com/models/{self.model_id} 🚀')) + + if not data.get('data', None): + raise ValueError('Dataset may still be processing. Please wait a minute and try again.') # RF fix + self.model_id = data['id'] + + # TODO: restore when server keys when dataset URL and GPU train is working + + self.train_args = { + 'batch': data['batch_size'], + 'epochs': data['epochs'], + 'imgsz': data['imgsz'], + 'patience': data['patience'], + 'device': data['device'], + 'cache': data['cache'], + 'data': data['data']} + + self.model_file = data.get('cfg', data['weights']) + self.model_file = checks.check_yolov5u_filename(self.model_file, verbose=False) # YOLOv5->YOLOv5u + + return data + except requests.exceptions.ConnectionError as e: + raise ConnectionRefusedError('ERROR: The HUB server is not online. Please try again later.') from e + except Exception: + raise + + def check_disk_space(self): + if not check_dataset_disk_space(self.model['data']): + raise MemoryError('Not enough disk space') + + def upload_model(self, epoch, weights, is_best=False, map=0.0, final=False): + # Upload a model to HUB + if Path(weights).is_file(): + with open(weights, 'rb') as f: + file = f.read() + else: + LOGGER.warning(f'{PREFIX}WARNING ⚠️ Model upload issue. Missing model {weights}.') + file = None + url = f'{self.api_url}/upload' + # url = 'http://httpbin.org/post' # for debug + data = {'epoch': epoch} + if final: + data.update({'type': 'final', 'map': map}) + smart_request('post', + url, + data=data, + files={'best.pt': file}, + headers=self.auth_header, + retry=10, + timeout=3600, + thread=False, + progress=True, + code=4) + else: + data.update({'type': 'epoch', 'isBest': bool(is_best)}) + smart_request('post', url, data=data, files={'last.pt': file}, headers=self.auth_header, code=3) + + @threaded + def _start_heartbeat(self): + while self.alive: + r = smart_request('post', + f'{HUB_API_ROOT}/v1/agent/heartbeat/models/{self.model_id}', + json={ + 'agent': AGENT_NAME, + 'agentId': self.agent_id}, + headers=self.auth_header, + retry=0, + code=5, + thread=False) # already in a thread + self.agent_id = r.json().get('data', {}).get('agentId', None) + sleep(self.rate_limits['heartbeat']) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/hub/utils.py b/src/train_utils/train_models/models/ultralytics/ultralytics/hub/utils.py new file mode 100644 index 0000000..267e28b --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/hub/utils.py @@ -0,0 +1,215 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import os +import platform +import shutil +import sys +import threading +import time +from pathlib import Path +from random import random + +import requests +from tqdm import tqdm + +from ultralytics.yolo.utils import (DEFAULT_CFG_DICT, ENVIRONMENT, LOGGER, ONLINE, RANK, SETTINGS, TESTS_RUNNING, + TQDM_BAR_FORMAT, TryExcept, __version__, colorstr, emojis, get_git_origin_url, + is_colab, is_git_dir, is_pip_package) + +PREFIX = colorstr('Ultralytics HUB: ') +HELP_MSG = 'If this issue persists please visit https://github.com/ultralytics/hub/issues for assistance.' +HUB_API_ROOT = os.environ.get('ULTRALYTICS_HUB_API', 'https://api.ultralytics.com') + + +def check_dataset_disk_space(url='https://ultralytics.com/assets/coco128.zip', sf=2.0): + # Check that url fits on disk with safety factor sf, i.e. require 2GB free if url size is 1GB with sf=2.0 + gib = 1 << 30 # bytes per GiB + data = int(requests.head(url).headers['Content-Length']) / gib # dataset size (GB) + total, used, free = (x / gib for x in shutil.disk_usage('/')) # bytes + LOGGER.info(f'{PREFIX}{data:.3f} GB dataset, {free:.1f}/{total:.1f} GB free disk space') + if data * sf < free: + return True # sufficient space + LOGGER.warning(f'{PREFIX}WARNING: Insufficient free disk space {free:.1f} GB < {data * sf:.3f} GB required, ' + f'training cancelled ❌. Please free {data * sf - free:.1f} GB additional disk space and try again.') + return False # insufficient space + + +def request_with_credentials(url: str) -> any: + """ Make an ajax request with cookies attached """ + if not is_colab(): + raise OSError('request_with_credentials() must run in a Colab environment') + from google.colab import output # noqa + from IPython import display # noqa + display.display( + display.Javascript(""" + window._hub_tmp = new Promise((resolve, reject) => { + const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000) + fetch("%s", { + method: 'POST', + credentials: 'include' + }) + .then((response) => resolve(response.json())) + .then((json) => { + clearTimeout(timeout); + }).catch((err) => { + clearTimeout(timeout); + reject(err); + }); + }); + """ % url)) + return output.eval_js('_hub_tmp') + + +def split_key(key=''): + """ + Verify and split a 'api_key[sep]model_id' string, sep is one of '.' or '_' + + Args: + key (str): The model key to split. If not provided, the user will be prompted to enter it. + + Returns: + Tuple[str, str]: A tuple containing the API key and model ID. + """ + + import getpass + + error_string = emojis(f'{PREFIX}Invalid API key ⚠️\n') # error string + if not key: + key = getpass.getpass('Enter model key: ') + sep = '_' if '_' in key else '.' if '.' in key else None # separator + assert sep, error_string + api_key, model_id = key.split(sep) + assert len(api_key) and len(model_id), error_string + return api_key, model_id + + +def requests_with_progress(method, url, **kwargs): + """ + Make an HTTP request using the specified method and URL, with an optional progress bar. + + Args: + method (str): The HTTP method to use (e.g. 'GET', 'POST'). + url (str): The URL to send the request to. + progress (bool, optional): Whether to display a progress bar. Defaults to False. + **kwargs: Additional keyword arguments to pass to the underlying `requests.request` function. + + Returns: + requests.Response: The response from the HTTP request. + + """ + progress = kwargs.pop('progress', False) + if not progress: + return requests.request(method, url, **kwargs) + response = requests.request(method, url, stream=True, **kwargs) + total = int(response.headers.get('content-length', 0)) # total size + pbar = tqdm(total=total, unit='B', unit_scale=True, unit_divisor=1024, bar_format=TQDM_BAR_FORMAT) + for data in response.iter_content(chunk_size=1024): + pbar.update(len(data)) + pbar.close() + return response + + +def smart_request(method, url, retry=3, timeout=30, thread=True, code=-1, verbose=True, progress=False, **kwargs): + """ + Makes an HTTP request using the 'requests' library, with exponential backoff retries up to a specified timeout. + + Args: + method (str): The HTTP method to use for the request. Choices are 'post' and 'get'. + url (str): The URL to make the request to. + retry (int, optional): Number of retries to attempt before giving up. Default is 3. + timeout (int, optional): Timeout in seconds after which the function will give up retrying. Default is 30. + thread (bool, optional): Whether to execute the request in a separate daemon thread. Default is True. + code (int, optional): An identifier for the request, used for logging purposes. Default is -1. + verbose (bool, optional): A flag to determine whether to print out to console or not. Default is True. + progress (bool, optional): Whether to show a progress bar during the request. Default is False. + **kwargs: Keyword arguments to be passed to the requests function specified in method. + + Returns: + requests.Response: The HTTP response object. If the request is executed in a separate thread, returns None. + + """ + retry_codes = (408, 500) # retry only these codes + + @TryExcept(verbose=verbose) + def func(func_method, func_url, **func_kwargs): + r = None # response + t0 = time.time() # initial time for timer + for i in range(retry + 1): + if (time.time() - t0) > timeout: + break + r = requests_with_progress(func_method, func_url, **func_kwargs) # i.e. get(url, data, json, files) + if r.status_code == 200: + break + try: + m = r.json().get('message', 'No JSON message.') + except AttributeError: + m = 'Unable to read JSON.' + if i == 0: + if r.status_code in retry_codes: + m += f' Retrying {retry}x for {timeout}s.' if retry else '' + elif r.status_code == 429: # rate limit + h = r.headers # response headers + m = f"Rate limit reached ({h['X-RateLimit-Remaining']}/{h['X-RateLimit-Limit']}). " \ + f"Please retry after {h['Retry-After']}s." + if verbose: + LOGGER.warning(f'{PREFIX}{m} {HELP_MSG} ({r.status_code} #{code})') + if r.status_code not in retry_codes: + return r + time.sleep(2 ** i) # exponential standoff + return r + + args = method, url + kwargs['progress'] = progress + if thread: + threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True).start() + else: + return func(*args, **kwargs) + + +class Traces: + + def __init__(self): + """ + Initialize Traces for error tracking and reporting if tests are not currently running. + """ + self.rate_limit = 3.0 # rate limit (seconds) + self.t = 0.0 # rate limit timer (seconds) + self.metadata = { + 'sys_argv_name': Path(sys.argv[0]).name, + 'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other', + 'python': platform.python_version(), + 'release': __version__, + 'environment': ENVIRONMENT} + self.enabled = \ + SETTINGS['sync'] and \ + RANK in (-1, 0) and \ + not TESTS_RUNNING and \ + ONLINE and \ + (is_pip_package() or get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git') + + def __call__(self, cfg, all_keys=False, traces_sample_rate=1.0): + """ + Sync traces data if enabled in the global settings + + Args: + cfg (IterableSimpleNamespace): Configuration for the task and mode. + all_keys (bool): Sync all items, not just non-default values. + traces_sample_rate (float): Fraction of traces captured from 0.0 to 1.0 + """ + t = time.time() # current time + if self.enabled and random() < traces_sample_rate and (t - self.t) > self.rate_limit: + self.t = t # reset rate limit timer + cfg = vars(cfg) # convert type from IterableSimpleNamespace to dict + if not all_keys: # filter cfg + include_keys = {'task', 'mode'} # always include + cfg = { + k: (v.split(os.sep)[-1] if isinstance(v, str) and os.sep in v else v) + for k, v in cfg.items() if v != DEFAULT_CFG_DICT.get(k, None) or k in include_keys} + trace = {'uuid': SETTINGS['uuid'], 'cfg': cfg, 'metadata': self.metadata} + + # Send a request to the HUB API to sync analytics + smart_request('post', f'{HUB_API_ROOT}/v1/usage/anonymous', json=trace, code=3, retry=0, verbose=False) + + +# Run below code on hub/utils init ------------------------------------------------------------------------------------- +traces = Traces() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/README.md b/src/train_utils/train_models/models/ultralytics/ultralytics/models/README.md new file mode 100644 index 0000000..5c88b88 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/README.md @@ -0,0 +1,109 @@ +## Models + +Welcome to the Ultralytics Models directory! Here you will find a wide variety of pre-configured model configuration +files (`*.yaml`s) that can be used to create custom YOLO models. The models in this directory have been expertly crafted +and fine-tuned by the Ultralytics team to provide the best performance for a wide range of object detection and image +segmentation tasks. + +These model configurations cover a wide range of scenarios, from simple object detection to more complex tasks like +instance segmentation and object tracking. They are also designed to run efficiently on a variety of hardware platforms, +from CPUs to GPUs. Whether you are a seasoned machine learning practitioner or just getting started with YOLO, this +directory provides a great starting point for your custom model development needs. + +To get started, simply browse through the models in this directory and find one that best suits your needs. Once you've +selected a model, you can use the provided `*.yaml` file to train and deploy your custom YOLO model with ease. See full +details at the Ultralytics [Docs](https://docs.ultralytics.com), and if you need help or have any questions, feel free +to reach out to the Ultralytics team for support. So, don't wait, start creating your custom YOLO model now! + +### Usage + +Model `*.yaml` files may be used directly in the Command Line Interface (CLI) with a `yolo` command: + +```bash +yolo task=detect mode=train model=yolov8n.yaml data=coco128.yaml epochs=100 +``` + +They may also be used directly in a Python environment, and accepts the same +[arguments](https://docs.ultralytics.com/usage/cfg/) as in the CLI example above: + +```python +from ultralytics import YOLO + +model = YOLO("model.yaml") # build a YOLOv8n model from scratch +# YOLO("model.pt") use pre-trained model if available +model.info() # display model information +model.train(data="coco128.yaml", epochs=100) # train the model +``` + +## Pre-trained Model Architectures + +Ultralytics supports many model architectures. Visit [models](#) page to view detailed information and usage. +Any of these models can be used by loading their configs or pretrained checkpoints if available. + +What to add your model architecture? [Here's](#) how you can contribute + +### 1. YOLOv8 + +**About** - Cutting edge Detection, Segmentation and Classification models developed by Ultralytics.
+**Citation** - +Available Models: + +- Detection - `yolov8n`, `yolov8s`, `yolov8m`, `yolov8l`, `yolov8x` +- Instance Segmentation - `yolov8n-seg`, `yolov8s-seg`, `yolov8m-seg`, `yolov8l-seg`, `yolov8x-seg` +- Classification - `yolov8n-cls`, `yolov8s-cls`, `yolov8m-cls`, `yolov8l-cls`, `yolov8x-cls` + +
Performance + +### Detection + +| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n.pt) | 640 | 37.3 | 80.4 | 0.99 | 3.2 | 8.7 | +| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s.pt) | 640 | 44.9 | 128.4 | 1.20 | 11.2 | 28.6 | +| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m.pt) | 640 | 50.2 | 234.7 | 1.83 | 25.9 | 78.9 | +| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 | +| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 | + +### Segmentation + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLOv8n-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-seg.pt) | 640 | 36.7 | 30.5 | 96.1 | 1.21 | 3.4 | 12.6 | +| [YOLOv8s-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-seg.pt) | 640 | 44.6 | 36.8 | 155.7 | 1.47 | 11.8 | 42.6 | +| [YOLOv8m-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-seg.pt) | 640 | 49.9 | 40.8 | 317.0 | 2.18 | 27.3 | 110.2 | +| [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 | +| [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 | + +### Classification + +| Model | size
(pixels) | acc
top1 | acc
top5 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) at 640 | +| -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ | +| [YOLOv8n-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8n-cls.pt) | 224 | 66.6 | 87.0 | 12.9 | 0.31 | 2.7 | 4.3 | +| [YOLOv8s-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8s-cls.pt) | 224 | 72.3 | 91.1 | 23.4 | 0.35 | 6.4 | 13.5 | +| [YOLOv8m-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8m-cls.pt) | 224 | 76.4 | 93.2 | 85.4 | 0.62 | 17.0 | 42.7 | +| [YOLOv8l-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8l-cls.pt) | 224 | 78.0 | 94.1 | 163.0 | 0.87 | 37.5 | 99.7 | +| [YOLOv8x-cls](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov8x-cls.pt) | 224 | 78.4 | 94.3 | 232.0 | 1.01 | 57.4 | 154.8 | + +
+ +### 2. YOLOv5u + +**About** - Anchor-free YOLOv5 models with new detection head and better speed-accuracy tradeoff
+**Citation** - +Available Models: + +- Detection - `yolov5nu`, `yolov5su`, `yolov5mu`, `yolov5lu`, `yolov5xu` + +
Performance + +### Detection + +| Model | size
(pixels) | mAPval
50-95 | Speed
CPU ONNX
(ms) | Speed
A100 TensorRT
(ms) | params
(M) | FLOPs
(B) | +| -------------------------------------------------------------------------------------- | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- | +| [YOLOv5nu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5nu.pt) | 640 | 34.3 | 73.6 | 1.06 | 2.6 | 7.7 | +| [YOLOv5su](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5su.pt) | 640 | 43.0 | 120.7 | 1.27 | 9.1 | 24.0 | +| [YOLOv5mu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5mu.pt) | 640 | 49.0 | 233.9 | 1.86 | 25.1 | 64.2 | +| [YOLOv5lu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5lu.pt) | 640 | 52.2 | 408.4 | 2.50 | 53.2 | 135.0 | +| [YOLOv5xu](https://github.com/ultralytics/assets/releases/download/v0.0.0/yolov5xu.pt) | 640 | 53.2 | 763.2 | 3.81 | 97.2 | 246.4 | + +
diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v3/yolov3-spp.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v3/yolov3-spp.yaml new file mode 100644 index 0000000..5d6794f --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v3/yolov3-spp.yaml @@ -0,0 +1,47 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3-SPP head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v3/yolov3-tiny.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v3/yolov3-tiny.yaml new file mode 100644 index 0000000..d7921d3 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v3/yolov3-tiny.yaml @@ -0,0 +1,38 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# YOLOv3-tiny backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [16, 3, 1]], # 0 + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 + [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 + ] + +# YOLOv3-tiny head +head: + [[-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + + [[19, 15], 1, Detect, [nc]], # Detect(P4, P5) + ] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v3/yolov3.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v3/yolov3.yaml new file mode 100644 index 0000000..3ecb642 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v3/yolov3.yaml @@ -0,0 +1,47 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3 head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v5/yolov5-p6.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v5/yolov5-p6.yaml new file mode 100644 index 0000000..ce9d354 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v5/yolov5-p6.yaml @@ -0,0 +1,60 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov5n-p6.yaml' will call yolov5-p6.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 1024] + l: [1.00, 1.00, 1024] + x: [1.33, 1.25, 1024] + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v5/yolov5.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v5/yolov5.yaml new file mode 100644 index 0000000..c9d6f3e --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v5/yolov5.yaml @@ -0,0 +1,49 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov5n.yaml' will call yolov5.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 1024] + l: [1.00, 1.00, 1024] + x: [1.33, 1.25, 1024] + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-cls.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-cls.yaml new file mode 100644 index 0000000..0847ed1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-cls.yaml @@ -0,0 +1,29 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLOv8-cls image classification model. For Usage examples see https://docs.ultralytics.com/tasks/classify + +# Parameters +nc: 1000 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 1024] + l: [1.00, 1.00, 1024] + x: [1.00, 1.25, 1024] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + +# YOLOv8.0n head +head: + - [-1, 1, Classify, [nc]] # Classify diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-p2.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-p2.yaml new file mode 100644 index 0000000..f91a98c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-p2.yaml @@ -0,0 +1,54 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLOv8 object detection model with P2-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 768] + l: [1.00, 1.00, 512] + x: [1.00, 1.25, 512] + +# YOLOv8.0 backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0-p2 head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 2], 1, Concat, [1]] # cat backbone P2 + - [-1, 3, C2f, [128]] # 18 (P2/4-xsmall) + + - [-1, 1, Conv, [128, 3, 2]] + - [[-1, 15], 1, Concat, [1]] # cat head P3 + - [-1, 3, C2f, [256]] # 21 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 24 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 27 (P5/32-large) + + - [[18, 21, 24, 27], 1, Detect, [nc]] # Detect(P2, P3, P4, P5) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-p6.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-p6.yaml new file mode 100644 index 0000000..ab8a68e --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-p6.yaml @@ -0,0 +1,56 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLOv8 object detection model with P3-P6 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 768] + l: [1.00, 1.00, 512] + x: [1.00, 1.25, 512] + +# YOLOv8.0x6 backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [768, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 11 + +# YOLOv8.0x6 head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [768, False]] # 14 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [768, False]] # 26 (P5/32-large) + + - [-1, 1, Conv, [768, 3, 2]] + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge) + + - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-seg.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-seg.yaml new file mode 100644 index 0000000..23a52a9 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8-seg.yaml @@ -0,0 +1,46 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLOv8-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n-seg.yaml' will call yolov8-seg.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] + s: [0.33, 0.50, 1024] + m: [0.67, 0.75, 768] + l: [1.00, 1.00, 512] + x: [1.00, 1.25, 512] + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8.yaml new file mode 100644 index 0000000..addaa4e --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8.yaml @@ -0,0 +1,46 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# YOLOv8 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect + +# Parameters +nc: 80 # number of classes +scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n' + # [depth, width, max_channels] + n: [0.33, 0.25, 1024] # YOLOv8n summary: 225 layers, 3157200 parameters, 3157184 gradients, 8.9 GFLOPs + s: [0.33, 0.50, 1024] # YOLOv8s summary: 225 layers, 11166560 parameters, 11166544 gradients, 28.8 GFLOPs + m: [0.67, 0.75, 768] # YOLOv8m summary: 295 layers, 25902640 parameters, 25902624 gradients, 79.3 GFLOPs + l: [1.00, 1.00, 512] # YOLOv8l summary: 365 layers, 43691520 parameters, 43691504 gradients, 165.7 GFLOPs + x: [1.00, 1.25, 512] # YOLOv8x summary: 365 layers, 68229648 parameters, 68229632 gradients, 258.5 GFLOPs + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8l.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8l.yaml new file mode 100644 index 0000000..afbe4c6 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8l.yaml @@ -0,0 +1,40 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.00 # scales module repeats +width_multiple: 1.00 # scales convolution channels + +# YOLOv8.0l backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [512, True]] + - [-1, 1, SPPF, [512, 5]] # 9 + +# YOLOv8.0l head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [512]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8m.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8m.yaml new file mode 100644 index 0000000..a17763c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8m.yaml @@ -0,0 +1,40 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.67 # scales module repeats +width_multiple: 0.75 # scales convolution channels + +# YOLOv8.0m backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [768, True]] + - [-1, 1, SPPF, [768, 5]] # 9 + +# YOLOv8.0m head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [768]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8n.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8n.yaml new file mode 100644 index 0000000..2519b40 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8n.yaml @@ -0,0 +1,40 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # scales module repeats +width_multiple: 0.25 # scales convolution channels + +# YOLOv8.0n backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0n head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8s.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8s.yaml new file mode 100644 index 0000000..7905654 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8s.yaml @@ -0,0 +1,40 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # scales module repeats +width_multiple: 0.50 # scales convolution channels + +# YOLOv8.0s backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [1024, True]] + - [-1, 1, SPPF, [1024, 5]] # 9 + +# YOLOv8.0s head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [1024]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8x.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8x.yaml new file mode 100644 index 0000000..d254523 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8x.yaml @@ -0,0 +1,40 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.00 # scales module repeats +width_multiple: 1.25 # scales convolution channels + +# YOLOv8.0x backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [512, True]] + - [-1, 1, SPPF, [512, 5]] # 9 + +# YOLOv8.0x head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2f, [512]] # 12 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2f, [256]] # 15 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 12], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2f, [512]] # 18 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 9], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2f, [512]] # 21 (P5/32-large) + + - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8x6.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8x6.yaml new file mode 100644 index 0000000..8ffcdea --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/models/v8/yolov8x6.yaml @@ -0,0 +1,50 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.00 # scales module repeats +width_multiple: 1.25 # scales convolution channels + +# YOLOv8.0x6 backbone +backbone: + # [from, repeats, module, args] + - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2 + - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4 + - [-1, 3, C2f, [128, True]] + - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8 + - [-1, 6, C2f, [256, True]] + - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16 + - [-1, 6, C2f, [512, True]] + - [-1, 1, Conv, [512, 3, 2]] # 7-P5/32 + - [-1, 3, C2f, [512, True]] + - [-1, 1, Conv, [512, 3, 2]] # 9-P6/64 + - [-1, 3, C2f, [512, True]] + - [-1, 1, SPPF, [512, 5]] # 11 + +# YOLOv8.0x6 head +head: + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 8], 1, Concat, [1]] # cat backbone P5 + - [-1, 3, C2, [512, False]] # 14 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 6], 1, Concat, [1]] # cat backbone P4 + - [-1, 3, C2, [512, False]] # 17 + + - [-1, 1, nn.Upsample, [None, 2, 'nearest']] + - [[-1, 4], 1, Concat, [1]] # cat backbone P3 + - [-1, 3, C2, [256, False]] # 20 (P3/8-small) + + - [-1, 1, Conv, [256, 3, 2]] + - [[-1, 17], 1, Concat, [1]] # cat head P4 + - [-1, 3, C2, [512, False]] # 23 (P4/16-medium) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 14], 1, Concat, [1]] # cat head P5 + - [-1, 3, C2, [512, False]] # 26 (P5/32-large) + + - [-1, 1, Conv, [512, 3, 2]] + - [[-1, 11], 1, Concat, [1]] # cat head P6 + - [-1, 3, C2, [512, False]] # 29 (P6/64-xlarge) + + - [[20, 23, 26, 29], 1, Detect, [nc]] # Detect(P3, P4, P5, P6) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/nn/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/nn/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/nn/autobackend.py b/src/train_utils/train_models/models/ultralytics/ultralytics/nn/autobackend.py new file mode 100644 index 0000000..7e35508 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/nn/autobackend.py @@ -0,0 +1,447 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +import ast +import contextlib +import json +import platform +import zipfile +from collections import OrderedDict, namedtuple +from pathlib import Path +from urllib.parse import urlparse + +import cv2 +import numpy as np +import torch +import torch.nn as nn +from PIL import Image + +from ultralytics.yolo.utils import LINUX, LOGGER, ROOT, yaml_load +from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_version, check_yaml +from ultralytics.yolo.utils.downloads import attempt_download_asset, is_url +from ultralytics.yolo.utils.ops import xywh2xyxy + + +def check_class_names(names): + # Check class names. Map imagenet class codes to human-readable names if required. Convert lists to dicts. + if isinstance(names, list): # names is a list + names = dict(enumerate(names)) # convert to dict + if isinstance(names, dict): + # convert 1) string keys to int, i.e. '0' to 0, and non-string values to strings, i.e. True to 'True' + names = {int(k): str(v) for k, v in names.items()} + n = len(names) + if max(names.keys()) >= n: + raise KeyError(f'{n}-class dataset requires class indices 0-{n - 1}, but you have invalid class indices ' + f'{min(names.keys())}-{max(names.keys())} defined in your dataset YAML.') + if isinstance(names[0], str) and names[0].startswith('n0'): # imagenet class codes, i.e. 'n01440764' + map = yaml_load(ROOT / 'datasets/ImageNet.yaml')['map'] # human-readable names + names = {k: map[v] for k, v in names.items()} + return names + + +class AutoBackend(nn.Module): + + def __init__(self, + weights='yolov8n.pt', + device=torch.device('cpu'), + dnn=False, + data=None, + fp16=False, + fuse=True, + verbose=True): + """ + MultiBackend class for python inference on various platforms using Ultralytics YOLO. + + Args: + weights (str): The path to the weights file. Default: 'yolov8n.pt' + device (torch.device): The device to run the model on. + dnn (bool): Use OpenCV's DNN module for inference if True, defaults to False. + data (str), (Path): Additional data.yaml file for class names, optional + fp16 (bool): If True, use half precision. Default: False + fuse (bool): Whether to fuse the model or not. Default: True + verbose (bool): Whether to run in verbose mode or not. Default: True + + Supported formats and their naming conventions: + | Format | Suffix | + |-----------------------|------------------| + | PyTorch | *.pt | + | TorchScript | *.torchscript | + | ONNX Runtime | *.onnx | + | ONNX OpenCV DNN | *.onnx dnn=True | + | OpenVINO | *.xml | + | CoreML | *.mlmodel | + | TensorRT | *.engine | + | TensorFlow SavedModel | *_saved_model | + | TensorFlow GraphDef | *.pb | + | TensorFlow Lite | *.tflite | + | TensorFlow Edge TPU | *_edgetpu.tflite | + | PaddlePaddle | *_paddle_model | + """ + super().__init__() + w = str(weights[0] if isinstance(weights, list) else weights) + nn_module = isinstance(weights, torch.nn.Module) + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w) + fp16 &= pt or jit or onnx or engine or nn_module # FP16 + nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) + stride = 32 # default stride + model, metadata = None, None + cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA + if not (pt or triton or nn_module): + w = attempt_download_asset(w) # download if not local + + # NOTE: special case: in-memory pytorch model + if nn_module: + model = weights.to(device) + model = model.fuse(verbose=verbose) if fuse else model + names = model.module.names if hasattr(model, 'module') else model.names # get class names + stride = max(int(model.stride.max()), 32) # model stride + model.half() if fp16 else model.float() + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + pt = True + elif pt: # PyTorch + from ultralytics.nn.tasks import attempt_load_weights + model = attempt_load_weights(weights if isinstance(weights, list) else w, + device=device, + inplace=True, + fuse=fuse) + stride = max(int(model.stride.max()), 32) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + model.half() if fp16 else model.float() + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + elif jit: # TorchScript + LOGGER.info(f'Loading {w} for TorchScript inference...') + extra_files = {'config.txt': ''} # model metadata + model = torch.jit.load(w, _extra_files=extra_files, map_location=device) + model.half() if fp16 else model.float() + if extra_files['config.txt']: # load metadata dict + metadata = json.loads(extra_files['config.txt'], object_hook=lambda x: dict(x.items())) + elif dnn: # ONNX OpenCV DNN + LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') + check_requirements('opencv-python>=4.5.4') + net = cv2.dnn.readNetFromONNX(w) + elif onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + import onnxruntime + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) + output_names = [x.name for x in session.get_outputs()] + metadata = session.get_modelmeta().custom_metadata_map # metadata + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + from openvino.runtime import Core, Layout, get_batch # noqa + ie = Core() + w = Path(w) + if not w.is_file(): # if not *.xml + w = next(w.glob('*.xml')) # get *.xml file from *_openvino_model dir + network = ie.read_model(model=str(w), weights=w.with_suffix('.bin')) + if network.get_parameters()[0].get_layout().empty: + network.get_parameters()[0].set_layout(Layout('NCHW')) + batch_dim = get_batch(network) + if batch_dim.is_static: + batch_size = batch_dim.get_length() + executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for NCS2 + metadata = w.parent / 'metadata.yaml' + elif engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + try: + import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download + except ImportError: + if LINUX: + check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + import tensorrt as trt # noqa + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + if device.type == 'cpu': + device = torch.device('cuda:0') + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + # Read file + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + meta_len = int.from_bytes(f.read(4), byteorder='little') # read metadata length + metadata = json.loads(f.read(meta_len).decode('utf-8')) # read metadata + model = runtime.deserialize_cuda_engine(f.read()) # read engine + context = model.create_execution_context() + bindings = OrderedDict() + output_names = [] + fp16 = False # default updated below + dynamic = False + for i in range(model.num_bindings): + name = model.get_binding_name(i) + dtype = trt.nptype(model.get_binding_dtype(i)) + if model.binding_is_input(i): + if -1 in tuple(model.get_binding_shape(i)): # dynamic + dynamic = True + context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) + if dtype == np.float16: + fp16 = True + else: # output + output_names.append(name) + shape = tuple(context.get_binding_shape(i)) + im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) + binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) + batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size + elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') + import coremltools as ct + model = ct.models.MLModel(w) + metadata = dict(model.user_defined_metadata) + elif saved_model: # TF SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + keras = False # assume TF1 saved_model + model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) + metadata = Path(w) / 'metadata.yaml' + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + import tensorflow as tf + + from ultralytics.yolo.engine.exporter import gd_outputs + + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped + ge = x.graph.as_graph_element + return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + + gd = tf.Graph().as_graph_def() # TF GraphDef + with open(w, 'rb') as f: + gd.ParseFromString(f.read()) + frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) + elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate + if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) + else: # TFLite + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + # load metadata + with contextlib.suppress(zipfile.BadZipFile): + with zipfile.ZipFile(w, 'r') as model: + meta_file = model.namelist()[0] + metadata = ast.literal_eval(model.read(meta_file).decode('utf-8')) + elif tfjs: # TF.js + raise NotImplementedError('YOLOv8 TF.js inference is not supported') + elif paddle: # PaddlePaddle + LOGGER.info(f'Loading {w} for PaddlePaddle inference...') + check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') + import paddle.inference as pdi # noqa + w = Path(w) + if not w.is_file(): # if not *.pdmodel + w = next(w.rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir + config = pdi.Config(str(w), str(w.with_suffix('.pdiparams'))) + if cuda: + config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) + predictor = pdi.create_predictor(config) + input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) + output_names = predictor.get_output_names() + metadata = w.parents[1] / 'metadata.yaml' + elif triton: # NVIDIA Triton Inference Server + LOGGER.info('Triton Inference Server not supported...') + ''' + TODO: + check_requirements('tritonclient[all]') + from utils.triton import TritonRemoteModel + model = TritonRemoteModel(url=w) + nhwc = model.runtime.startswith("tensorflow") + ''' + else: + from ultralytics.yolo.engine.exporter import export_formats + raise TypeError(f"model='{w}' is not a supported model format. " + 'See https://docs.ultralytics.com/modes/predict for help.' + f'\n\n{export_formats()}') + + # Load external metadata YAML + if isinstance(metadata, (str, Path)) and Path(metadata).exists(): + metadata = yaml_load(metadata) + if metadata: + for k, v in metadata.items(): + if k in ('stride', 'batch'): + metadata[k] = int(v) + elif k in ('imgsz', 'names') and isinstance(v, str): + metadata[k] = eval(v) + stride = metadata['stride'] + task = metadata['task'] + batch = metadata['batch'] + imgsz = metadata['imgsz'] + names = metadata['names'] + elif not (pt or triton or nn_module): + LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'") + + # Check names + if 'names' not in locals(): # names missing + names = self._apply_default_class_names(data) + names = check_class_names(names) + + self.__dict__.update(locals()) # assign all variables to self + + def forward(self, im, augment=False, visualize=False): + """ + Runs inference on the YOLOv8 MultiBackend model. + + Args: + im (torch.Tensor): The image tensor to perform inference on. + augment (bool): whether to perform data augmentation during inference, defaults to False + visualize (bool): whether to visualize the output predictions, defaults to False + + Returns: + (tuple): Tuple containing the raw output tensor, and processed output for visualization (if visualize=True) + """ + b, ch, h, w = im.shape # batch, channel, height, width + if self.fp16 and im.dtype != torch.float16: + im = im.half() # to FP16 + if self.nhwc: + im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) + + if self.pt or self.nn_module: # PyTorch + y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) + elif self.jit: # TorchScript + y = self.model(im) + elif self.dnn: # ONNX OpenCV DNN + im = im.cpu().numpy() # torch to numpy + self.net.setInput(im) + y = self.net.forward() + elif self.onnx: # ONNX Runtime + im = im.cpu().numpy() # torch to numpy + y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) + elif self.xml: # OpenVINO + im = im.cpu().numpy() # FP32 + y = list(self.executable_network([im]).values()) + elif self.engine: # TensorRT + if self.dynamic and im.shape != self.bindings['images'].shape: + i = self.model.get_binding_index('images') + self.context.set_binding_shape(i, im.shape) # reshape if dynamic + self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) + for name in self.output_names: + i = self.model.get_binding_index(name) + self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) + s = self.bindings['images'].shape + assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" + self.binding_addrs['images'] = int(im.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + y = [self.bindings[x].data for x in sorted(self.output_names)] + elif self.coreml: # CoreML + im = im[0].cpu().numpy() + im_pil = Image.fromarray((im * 255).astype('uint8')) + # im = im.resize((192, 320), Image.ANTIALIAS) + y = self.model.predict({'image': im_pil}) # coordinates are xywh normalized + if 'confidence' in y: + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + elif len(y) == 1: # classification model + y = list(y.values()) + elif len(y) == 2: # segmentation model + y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) + elif self.paddle: # PaddlePaddle + im = im.cpu().numpy().astype(np.float32) + self.input_handle.copy_from_cpu(im) + self.predictor.run() + y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] + elif self.triton: # NVIDIA Triton Inference Server + y = self.model(im) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + im = im.cpu().numpy() + if self.saved_model: # SavedModel + y = self.model(im, training=False) if self.keras else self.model(im) + if not isinstance(y, list): + y = [y] + elif self.pb: # GraphDef + y = self.frozen_func(x=self.tf.constant(im)) + if len(y) == 2 and len(self.names) == 999: # segments and names not defined + ip, ib = (0, 1) if len(y[0].shape) == 4 else (1, 0) # index of protos, boxes + nc = y[ib].shape[1] - y[ip].shape[3] - 4 # y = (1, 160, 160, 32), (1, 116, 8400) + self.names = {i: f'class{i}' for i in range(nc)} + else: # Lite or Edge TPU + input = self.input_details[0] + int8 = input['dtype'] == np.int8 # is TFLite quantized int8 model + if int8: + scale, zero_point = input['quantization'] + im = (im / scale + zero_point).astype(np.int8) # de-scale + self.interpreter.set_tensor(input['index'], im) + self.interpreter.invoke() + y = [] + for output in self.output_details: + x = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + x = (x.astype(np.float32) - zero_point) * scale # re-scale + y.append(x) + # TF segment fixes: export is reversed vs ONNX export and protos are transposed + if len(y) == 2: # segment with (det, proto) output order reversed + if len(y[1].shape) != 4: + y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32) + y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160) + y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] + # y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels + + # for x in y: + # print(type(x), len(x)) if isinstance(x, (list, tuple)) else print(type(x), x.shape) # debug shapes + if isinstance(y, (list, tuple)): + return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] + else: + return self.from_numpy(y) + + def from_numpy(self, x): + """ + Convert a numpy array to a tensor. + + Args: + x (np.ndarray): The array to be converted. + + Returns: + (torch.Tensor): The converted tensor + """ + return torch.tensor(x).to(self.device) if isinstance(x, np.ndarray) else x + + def warmup(self, imgsz=(1, 3, 640, 640)): + """ + Warm up the model by running one forward pass with a dummy input. + + Args: + imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width) + + Returns: + (None): This method runs the forward pass and don't return any value + """ + warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton, self.nn_module + if any(warmup_types) and (self.device.type != 'cpu' or self.triton): + im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input + for _ in range(2 if self.jit else 1): # + self.forward(im) # warmup + + @staticmethod + def _apply_default_class_names(data): + with contextlib.suppress(Exception): + return yaml_load(check_yaml(data))['names'] + return {i: f'class{i}' for i in range(999)} # return default if above errors + + @staticmethod + def _model_type(p='path/to/model.pt'): + """ + This function takes a path to a model file and returns the model type + + Args: + p: path to the model file. Defaults to path/to/model.pt + """ + # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] + from ultralytics.yolo.engine.exporter import export_formats + sf = list(export_formats().Suffix) # export suffixes + if not is_url(p, check=False) and not isinstance(p, str): + check_suffix(p, sf) # checks + url = urlparse(p) # if url may be Triton inference server + types = [s in Path(p).name for s in sf] + types[8] &= not types[9] # tflite &= not edgetpu + triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) + return types + [triton] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/nn/autoshape.py b/src/train_utils/train_models/models/ultralytics/ultralytics/nn/autoshape.py new file mode 100644 index 0000000..3c983dc --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/nn/autoshape.py @@ -0,0 +1,234 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Common modules +""" + +from copy import copy +from pathlib import Path + +import cv2 +import numpy as np +import requests +import torch +import torch.nn as nn +from PIL import Image, ImageOps +from torch.cuda import amp + +from ultralytics.nn.autobackend import AutoBackend +from ultralytics.yolo.data.augment import LetterBox +from ultralytics.yolo.utils import LOGGER, colorstr +from ultralytics.yolo.utils.files import increment_path +from ultralytics.yolo.utils.ops import Profile, make_divisible, non_max_suppression, scale_boxes, xyxy2xywh +from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box +from ultralytics.yolo.utils.torch_utils import copy_attr, smart_inference_mode + + +class AutoShape(nn.Module): + # YOLOv8 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + agnostic = False # NMS class-agnostic + multi_label = False # NMS multiple labels per box + classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs + max_det = 1000 # maximum number of detections per image + amp = False # Automatic Mixed Precision (AMP) inference + + def __init__(self, model, verbose=True): + super().__init__() + if verbose: + LOGGER.info('Adding AutoShape... ') + copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes + self.dmb = isinstance(model, AutoBackend) # DetectMultiBackend() instance + self.pt = not self.dmb or model.pt # PyTorch model + self.model = model.eval() + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.inplace = False # Detect.inplace=False for safe multithread inference + m.export = True # do not output loss values + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + @smart_inference_mode() + def forward(self, ims, size=640, augment=False, profile=False): + # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are: + # file: ims = 'data/images/zidane.jpg' # str or PosixPath + # URI: = 'https://ultralytics.com/images/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + dt = (Profile(), Profile(), Profile()) + with dt[0]: + if isinstance(size, int): # expand + size = (size, size) + p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference + if isinstance(ims, torch.Tensor): # torch + with amp.autocast(autocast): + return self.model(ims.to(p.device).type_as(p), augment=augment) # inference + + # Preprocess + n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(ims): + f = f'image{i}' # filename + if isinstance(im, (str, Path)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im + im = np.asarray(ImageOps.exif_transpose(im)) + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(ImageOps.exif_transpose(im)), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = max(size) / max(s) # gain + shape1.append([y * g for y in s]) + ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] if self.pt else size # inf shape + x = [LetterBox(shape1, auto=False)(image=im)['img'] for im in ims] # pad + x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 + + with amp.autocast(autocast): + # Inference + with dt[1]: + y = self.model(x, augment=augment) # forward + + # Postprocess + with dt[2]: + y = non_max_suppression(y if self.dmb else y[0], + self.conf, + self.iou, + self.classes, + self.agnostic, + self.multi_label, + max_det=self.max_det) # NMS + for i in range(n): + scale_boxes(shape1, y[i][:, :4], shape0[i]) + + return Detections(ims, y, files, dt, self.names, x.shape) + + +class Detections: + # YOLOv8 detections class for inference results + def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): + super().__init__() + d = pred[0].device # device + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations + self.ims = ims # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.times = times # profiling times + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) + self.s = tuple(shape) # inference BCHW shape + + def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + s, crops = '', [] + for i, (im, pred) in enumerate(zip(self.ims, self.pred)): + s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + if pred.shape[0]: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + s = s.rstrip(', ') + if show or save or render or crop: + annotator = Annotator(im, example=str(self.names)) + for *box, conf, cls in reversed(pred): # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + if crop: + file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None + crops.append({ + 'box': box, + 'conf': conf, + 'cls': cls, + 'label': label, + 'im': save_one_box(box, im, file=file, save=save)}) + else: # all others + annotator.box_label(box, label if labels else '', color=colors(cls)) + im = annotator.im + else: + s += '(no detections)' + + im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np + if show: + im.show(self.files[i]) # show + if save: + f = self.files[i] + im.save(save_dir / f) # save + if i == self.n - 1: + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") + if render: + self.ims[i] = np.asarray(im) + if pprint: + s = s.lstrip('\n') + return f'{s}\nSpeed: %.1fms preprocess, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t + if crop: + if save: + LOGGER.info(f'Saved results to {save_dir}\n') + return crops + + def show(self, labels=True): + self._run(show=True, labels=labels) # show results + + def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir + self._run(save=True, labels=labels, save_dir=save_dir) # save results + + def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None + return self._run(crop=True, save=save, save_dir=save_dir) # crop results + + def render(self, labels=True): + self._run(render=True, labels=labels) # render results + return self.ims + + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + import pandas + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pandas.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + r = range(self.n) # iterable + x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + # for d in x: + # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def print(self): + LOGGER.info(self.__str__()) + + def __len__(self): # override len(results) + return self.n + + def __str__(self): # override print(results) + return self._run(pprint=True) # print results + + def __repr__(self): + return f'YOLOv8 {self.__class__} instance\n' + self.__str__() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/nn/modules.py b/src/train_utils/train_models/models/ultralytics/ultralytics/nn/modules.py new file mode 100644 index 0000000..ddf0085 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/nn/modules.py @@ -0,0 +1,471 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Common modules +""" + +import math + +import torch +import torch.nn as nn + +from ultralytics.yolo.utils.tal import dist2bbox, make_anchors + + +def autopad(k, p=None, d=1): # kernel, padding, dilation + # Pad to 'same' shape outputs + if d > 1: + k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +class Conv(nn.Module): + # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) + default_act = nn.SiLU() # default activation + + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): + super().__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def forward_fuse(self, x): + return self.act(self.conv(x)) + + +class DWConv(Conv): + # Depth-wise convolution + def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act) + + +class DWConvTranspose2d(nn.ConvTranspose2d): + # Depth-wise transpose convolution + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out + super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) + + +class ConvTranspose(nn.Module): + # Convolution transpose 2d layer + default_act = nn.SiLU() # default activation + + def __init__(self, c1, c2, k=2, s=2, p=0, bn=True, act=True): + super().__init__() + self.conv_transpose = nn.ConvTranspose2d(c1, c2, k, s, p, bias=not bn) + self.bn = nn.BatchNorm2d(c2) if bn else nn.Identity() + self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() + + def forward(self, x): + return self.act(self.bn(self.conv_transpose(x))) + + def forward_fuse(self, x): + return self.act(self.conv_transpose(x)) + + +class DFL(nn.Module): + # Integral module of Distribution Focal Loss (DFL) + # Proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391 + def __init__(self, c1=16): + super().__init__() + self.conv = nn.Conv2d(c1, 1, 1, bias=False).requires_grad_(False) + x = torch.arange(c1, dtype=torch.float) + self.conv.weight.data[:] = nn.Parameter(x.view(1, c1, 1, 1)) + self.c1 = c1 + + def forward(self, x): + b, c, a = x.shape # batch, channels, anchors + return self.conv(x.view(b, 4, self.c1, a).transpose(2, 1).softmax(1)).view(b, 4, a) + # return self.conv(x.view(b, self.c1, 4, a).softmax(1)).view(b, 4, a) + + +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2).permute(2, 0, 1) + return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) + + +class Bottleneck(nn.Module): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5): # ch_in, ch_out, shortcut, groups, kernels, expand + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, k[0], 1) + self.cv2 = Conv(c_, c2, k[1], 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class BottleneckCSP(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.SiLU() + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) + + +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, k=((1, 1), (3, 3)), e=1.0) for _ in range(n))) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) + + +class C2(nn.Module): + # CSP Bottleneck with 2 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + self.c = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, 2 * self.c, 1, 1) + self.cv2 = Conv(2 * self.c, c2, 1) # optional act=FReLU(c2) + # self.attention = ChannelAttention(2 * self.c) # or SpatialAttention() + self.m = nn.Sequential(*(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n))) + + def forward(self, x): + a, b = self.cv1(x).chunk(2, 1) + return self.cv2(torch.cat((self.m(a), b), 1)) + + +class C2f(nn.Module): + # CSP Bottleneck with 2 convolutions + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + self.c = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, 2 * self.c, 1, 1) + self.cv2 = Conv((2 + n) * self.c, c2, 1) # optional act=FReLU(c2) + self.m = nn.ModuleList(Bottleneck(self.c, self.c, shortcut, g, k=((3, 3), (3, 3)), e=1.0) for _ in range(n)) + + def forward(self, x): + y = list(self.cv1(x).chunk(2, 1)) + y.extend(m(y[-1]) for m in self.m) + return self.cv2(torch.cat(y, 1)) + + def forward_split(self, x): + y = list(self.cv1(x).split((self.c, self.c), 1)) + y.extend(m(y[-1]) for m in self.m) + return self.cv2(torch.cat(y, 1)) + + +class ChannelAttention(nn.Module): + # Channel-attention module https://github.com/open-mmlab/mmdetection/tree/v3.0.0rc1/configs/rtmdet + def __init__(self, channels: int) -> None: + super().__init__() + self.pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Conv2d(channels, channels, 1, 1, 0, bias=True) + self.act = nn.Sigmoid() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return x * self.act(self.fc(self.pool(x))) + + +class SpatialAttention(nn.Module): + # Spatial-attention module + def __init__(self, kernel_size=7): + super().__init__() + assert kernel_size in (3, 7), 'kernel size must be 3 or 7' + padding = 3 if kernel_size == 7 else 1 + self.cv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False) + self.act = nn.Sigmoid() + + def forward(self, x): + return x * self.act(self.cv1(torch.cat([torch.mean(x, 1, keepdim=True), torch.max(x, 1, keepdim=True)[0]], 1))) + + +class CBAM(nn.Module): + # Convolutional Block Attention Module + def __init__(self, c1, kernel_size=7): # ch_in, kernels + super().__init__() + self.channel_attention = ChannelAttention(c1) + self.spatial_attention = SpatialAttention(kernel_size) + + def forward(self, x): + return self.spatial_attention(self.channel_attention(x)) + + +class C1(nn.Module): + # CSP Bottleneck with 1 convolution + def __init__(self, c1, c2, n=1): # ch_in, ch_out, number + super().__init__() + self.cv1 = Conv(c1, c2, 1, 1) + self.m = nn.Sequential(*(Conv(c2, c2, 3) for _ in range(n))) + + def forward(self, x): + y = self.cv1(x) + return self.m(y) + y + + +class C3x(C3): + # C3 module with cross-convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + self.c_ = int(c2 * e) + self.m = nn.Sequential(*(Bottleneck(self.c_, self.c_, shortcut, g, k=((1, 3), (3, 1)), e=1) for _ in range(n))) + + +class C3TR(C3): + # C3 module with TransformerBlock() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + +class C3Ghost(C3): + # C3 module with GhostBottleneck() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) + + +class SPP(nn.Module): + # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 + def __init__(self, c1, c2, k=(5, 9, 13)): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) + + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) + # return self.conv(self.contract(x)) + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super().__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act=act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat((y, self.cv2(y)), 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super().__init__() + c_ = c2 // 2 + self.conv = nn.Sequential( + GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, + act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + +class Concat(nn.Module): + # Concatenate a list of tensors along dimension + def __init__(self, dimension=1): + super().__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class Proto(nn.Module): + # YOLOv8 mask Proto module for segmentation models + def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks + super().__init__() + self.cv1 = Conv(c1, c_, k=3) + self.upsample = nn.ConvTranspose2d(c_, c_, 2, 2, 0, bias=True) # nn.Upsample(scale_factor=2, mode='nearest') + self.cv2 = Conv(c_, c_, k=3) + self.cv3 = Conv(c_, c2) + + def forward(self, x): + return self.cv3(self.cv2(self.upsample(self.cv1(x)))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super().__init__() + + def forward(self, x, augment=False, profile=False, visualize=False): + y = [module(x, augment, profile, visualize)[0] for module in self] + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + +# heads +class Detect(nn.Module): + # YOLOv8 Detect head for detection models + dynamic = False # force grid reconstruction + export = False # export mode + shape = None + anchors = torch.empty(0) # init + strides = torch.empty(0) # init + + def __init__(self, nc=80, ch=()): # detection layer + super().__init__() + self.nc = nc # number of classes + self.nl = len(ch) # number of detection layers + self.reg_max = 16 # DFL channels (ch[0] // 16 to scale 4/8/12/16/20 for n/s/m/l/x) + self.no = nc + self.reg_max * 4 # number of outputs per anchor + self.stride = torch.zeros(self.nl) # strides computed during build + + c2, c3 = max((16, ch[0] // 4, self.reg_max * 4)), max(ch[0], self.nc) # channels + self.cv2 = nn.ModuleList( + nn.Sequential(Conv(x, c2, 3), Conv(c2, c2, 3), nn.Conv2d(c2, 4 * self.reg_max, 1)) for x in ch) + self.cv3 = nn.ModuleList(nn.Sequential(Conv(x, c3, 3), Conv(c3, c3, 3), nn.Conv2d(c3, self.nc, 1)) for x in ch) + self.dfl = DFL(self.reg_max) if self.reg_max > 1 else nn.Identity() + + def forward(self, x): + shape = x[0].shape # BCHW + for i in range(self.nl): + x[i] = torch.cat((self.cv2[i](x[i]), self.cv3[i](x[i])), 1) + if self.training: + return x + elif self.dynamic or self.shape != shape: + self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5)) + self.shape = shape + + x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2) + if self.export and self.format in ('saved_model', 'pb', 'tflite', 'edgetpu', 'tfjs'): # avoid TF FlexSplitV ops + box = x_cat[:, :self.reg_max * 4] + cls = x_cat[:, self.reg_max * 4:] + else: + box, cls = x_cat.split((self.reg_max * 4, self.nc), 1) + dbox = dist2bbox(self.dfl(box), self.anchors.unsqueeze(0), xywh=True, dim=1) * self.strides + y = torch.cat((dbox, cls.sigmoid()), 1) + return y if self.export else (y, x) + + def bias_init(self): + # Initialize Detect() biases, WARNING: requires stride availability + m = self # self.model[-1] # Detect() module + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1 + # ncf = math.log(0.6 / (m.nc - 0.999999)) if cf is None else torch.log(cf / cf.sum()) # nominal class frequency + for a, b, s in zip(m.cv2, m.cv3, m.stride): # from + a[-1].bias.data[:] = 1.0 # box + b[-1].bias.data[:m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img) + + +class Segment(Detect): + # YOLOv8 Segment head for segmentation models + def __init__(self, nc=80, nm=32, npr=256, ch=()): + super().__init__(nc, ch) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.proto = Proto(ch[0], self.npr, self.nm) # protos + self.detect = Detect.forward + + c4 = max(ch[0] // 4, self.nm) + self.cv4 = nn.ModuleList(nn.Sequential(Conv(x, c4, 3), Conv(c4, c4, 3), nn.Conv2d(c4, self.nm, 1)) for x in ch) + + def forward(self, x): + p = self.proto(x[0]) # mask protos + bs = p.shape[0] # batch size + + mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2) # mask coefficients + x = self.detect(self, x) + if self.training: + return x, mc, p + return (torch.cat([x, mc], 1), p) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p)) + + +class Classify(nn.Module): + # YOLOv8 classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + c_ = 1280 # efficientnet_b0 size + self.conv = Conv(c1, c_, k, s, autopad(k, p), g) + self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) + self.drop = nn.Dropout(p=0.0, inplace=True) + self.linear = nn.Linear(c_, c2) # to x(b,c2) + + def forward(self, x): + if isinstance(x, list): + x = torch.cat(x, 1) + x = self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) + return x if self.training else x.softmax(1) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/nn/tasks.py b/src/train_utils/train_models/models/ultralytics/ultralytics/nn/tasks.py new file mode 100644 index 0000000..dffd8e6 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/nn/tasks.py @@ -0,0 +1,582 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import contextlib +from copy import deepcopy +from pathlib import Path + +import thop +import torch +import torch.nn as nn + +from ultralytics.nn.modules import (C1, C2, C3, C3TR, SPP, SPPF, Bottleneck, BottleneckCSP, C2f, C3Ghost, C3x, Classify, + Concat, Conv, ConvTranspose, Detect, DWConv, DWConvTranspose2d, Ensemble, Focus, + GhostBottleneck, GhostConv, Segment) +from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, colorstr, emojis, yaml_load +from ultralytics.yolo.utils.checks import check_requirements, check_suffix, check_yaml +from ultralytics.yolo.utils.torch_utils import (fuse_conv_and_bn, fuse_deconv_and_bn, initialize_weights, + intersect_dicts, make_divisible, model_info, scale_img, time_sync) + + +class BaseModel(nn.Module): + """ + The BaseModel class serves as a base class for all the models in the Ultralytics YOLO family. + """ + + def forward(self, x, profile=False, visualize=False): + """ + Forward pass of the model on a single scale. + Wrapper for `_forward_once` method. + + Args: + x (torch.Tensor): The input image tensor + profile (bool): Whether to profile the model, defaults to False + visualize (bool): Whether to return the intermediate feature maps, defaults to False + + Returns: + (torch.Tensor): The output of the network. + """ + return self._forward_once(x, profile, visualize) + + def _forward_once(self, x, profile=False, visualize=False): + """ + Perform a forward pass through the network. + + Args: + x (torch.Tensor): The input tensor to the model + profile (bool): Print the computation time of each layer if True, defaults to False. + visualize (bool): Save the feature maps of the model if True, defaults to False + + Returns: + (torch.Tensor): The last output of the model. + """ + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + if profile: + self._profile_one_layer(m, x, dt) + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + if visualize: + LOGGER.info('visualize feature not yet supported') + # TODO: feature_visualization(x, m.type, m.i, save_dir=visualize) + return x + + def _profile_one_layer(self, m, x, dt): + """ + Profile the computation time and FLOPs of a single layer of the model on a given input. + Appends the results to the provided list. + + Args: + m (nn.Module): The layer to be profiled. + x (torch.Tensor): The input data to the layer. + dt (list): A list to store the computation time of the layer. + + Returns: + None + """ + c = m == self.model[-1] # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=[x.clone() if c else x], verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.clone() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + + def fuse(self, verbose=True): + """ + Fuse the `Conv2d()` and `BatchNorm2d()` layers of the model into a single layer, in order to improve the + computation efficiency. + + Returns: + (nn.Module): The fused model is returned. + """ + if not self.is_fused(): + for m in self.model.modules(): + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + if isinstance(m, ConvTranspose) and hasattr(m, 'bn'): + m.conv_transpose = fuse_deconv_and_bn(m.conv_transpose, m.bn) + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + self.info(verbose=verbose) + + return self + + def is_fused(self, thresh=10): + """ + Check if the model has less than a certain threshold of BatchNorm layers. + + Args: + thresh (int, optional): The threshold number of BatchNorm layers. Default is 10. + + Returns: + (bool): True if the number of BatchNorm layers in the model is less than the threshold, False otherwise. + """ + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + return sum(isinstance(v, bn) for v in self.modules()) < thresh # True if < 'thresh' BatchNorm layers in model + + def info(self, verbose=True, imgsz=640): + """ + Prints model information + + Args: + verbose (bool): if True, prints out the model information. Defaults to False + imgsz (int): the size of the image that the model will be trained on. Defaults to 640 + """ + model_info(self, verbose=verbose, imgsz=imgsz) + + def _apply(self, fn): + """ + `_apply()` is a function that applies a function to all the tensors in the model that are not + parameters or registered buffers + + Args: + fn: the function to apply to the model + + Returns: + A model that is a Detect() object. + """ + self = super()._apply(fn) + m = self.model[-1] # Detect() + if isinstance(m, (Detect, Segment)): + m.stride = fn(m.stride) + m.anchors = fn(m.anchors) + m.strides = fn(m.strides) + return self + + def load(self, weights, verbose=True): + """Load the weights into the model. + + Args: + weights (dict) or (torch.nn.Module): The pre-trained weights to be loaded. + verbose (bool, optional): Whether to log the transfer progress. Defaults to True. + """ + model = weights['model'] if isinstance(weights, dict) else weights # torchvision models are not dicts + csd = model.float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, self.state_dict()) # intersect + self.load_state_dict(csd, strict=False) # load + if verbose: + LOGGER.info(f'Transferred {len(csd)}/{len(self.model.state_dict())} items from pretrained weights') + + +class DetectionModel(BaseModel): + # YOLOv8 detection model + def __init__(self, cfg='yolov8n.yaml', ch=3, nc=None, verbose=True): # model, input channels, number of classes + super().__init__() + self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=ch, verbose=verbose) # model, savelist + self.names = {i: f'{i}' for i in range(self.yaml['nc'])} # default names dict + self.inplace = self.yaml.get('inplace', True) + + # Build strides + m = self.model[-1] # Detect() + if isinstance(m, (Detect, Segment)): + s = 256 # 2x min stride + m.inplace = self.inplace + forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) + m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward + self.stride = m.stride + m.bias_init() # only run once + + # Init weights, biases + initialize_weights(self) + if verbose: + self.info() + LOGGER.info('') + + def forward(self, x, augment=False, profile=False, visualize=False): + if augment: + return self._forward_augment(x) # augmented inference, None + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_augment(self, x): + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self._forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi = self._descale_pred(yi, fi, si, img_size) + y.append(yi) + y = self._clip_augmented(y) # clip augmented tails + return torch.cat(y, -1), None # augmented inference, train + + @staticmethod + def _descale_pred(p, flips, scale, img_size, dim=1): + # de-scale predictions following augmented inference (inverse operation) + p[:, :4] /= scale # de-scale + x, y, wh, cls = p.split((1, 1, 2, p.shape[dim] - 4), dim) + if flips == 2: + y = img_size[0] - y # de-flip ud + elif flips == 3: + x = img_size[1] - x # de-flip lr + return torch.cat((x, y, wh, cls), dim) + + def _clip_augmented(self, y): + # Clip YOLOv5 augmented inference tails + nl = self.model[-1].nl # number of detection layers (P3-P5) + g = sum(4 ** x for x in range(nl)) # grid points + e = 1 # exclude layer count + i = (y[0].shape[-1] // g) * sum(4 ** x for x in range(e)) # indices + y[0] = y[0][..., :-i] # large + i = (y[-1].shape[-1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices + y[-1] = y[-1][..., i:] # small + return y + + +class SegmentationModel(DetectionModel): + # YOLOv8 segmentation model + def __init__(self, cfg='yolov8n-seg.yaml', ch=3, nc=None, verbose=True): + super().__init__(cfg, ch, nc, verbose) + + def _forward_augment(self, x): + raise NotImplementedError(emojis('WARNING ⚠️ SegmentationModel has not supported augment inference yet!')) + + +class ClassificationModel(BaseModel): + # YOLOv8 classification model + def __init__(self, + cfg=None, + model=None, + ch=3, + nc=None, + cutoff=10, + verbose=True): # yaml, model, channels, number of classes, cutoff index, verbose flag + super().__init__() + self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg, ch, nc, verbose) + + def _from_detection_model(self, model, nc=1000, cutoff=10): + # Create a YOLOv5 classification model from a YOLOv5 detection model + from ultralytics.nn.autobackend import AutoBackend + if isinstance(model, AutoBackend): + model = model.model # unwrap DetectMultiBackend + model.model = model.model[:cutoff] # backbone + m = model.model[-1] # last layer + ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module + c = Classify(ch, nc) # Classify() + c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type + model.model[-1] = c # replace + self.model = model.model + self.stride = model.stride + self.save = [] + self.nc = nc + + def _from_yaml(self, cfg, ch, nc, verbose): + self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + elif not nc and not self.yaml.get('nc', None): + raise ValueError('nc not specified. Must specify nc in model.yaml or function arguments.') + self.model, self.save = parse_model(deepcopy(self.yaml), ch=ch, verbose=verbose) # model, savelist + self.stride = torch.Tensor([1]) # no stride constraints + self.names = {i: f'{i}' for i in range(self.yaml['nc'])} # default names dict + self.info() + + @staticmethod + def reshape_outputs(model, nc): + # Update a TorchVision classification model to class count 'n' if required + name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module + if isinstance(m, Classify): # YOLO Classify() head + if m.linear.out_features != nc: + m.linear = nn.Linear(m.linear.in_features, nc) + elif isinstance(m, nn.Linear): # ResNet, EfficientNet + if m.out_features != nc: + setattr(model, name, nn.Linear(m.in_features, nc)) + elif isinstance(m, nn.Sequential): + types = [type(x) for x in m] + if nn.Linear in types: + i = types.index(nn.Linear) # nn.Linear index + if m[i].out_features != nc: + m[i] = nn.Linear(m[i].in_features, nc) + elif nn.Conv2d in types: + i = types.index(nn.Conv2d) # nn.Conv2d index + if m[i].out_channels != nc: + m[i] = nn.Conv2d(m[i].in_channels, nc, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) + + +# Functions ------------------------------------------------------------------------------------------------------------ + + +def torch_safe_load(weight): + """ + This function attempts to load a PyTorch model with the torch.load() function. If a ModuleNotFoundError is raised, + it catches the error, logs a warning message, and attempts to install the missing module via the + check_requirements() function. After installation, the function again attempts to load the model using torch.load(). + + Args: + weight (str): The file path of the PyTorch model. + + Returns: + The loaded PyTorch model. + """ + from ultralytics.yolo.utils.downloads import attempt_download_asset + + check_suffix(file=weight, suffix='.pt') + file = attempt_download_asset(weight) # search online if missing locally + try: + return torch.load(file, map_location='cpu'), file # load + except ModuleNotFoundError as e: # e.name is missing module name + if e.name == 'models': + raise TypeError( + emojis(f'ERROR ❌️ {weight} appears to be an Ultralytics YOLOv5 model originally trained ' + f'with https://github.com/ultralytics/yolov5.\nThis model is NOT forwards compatible with ' + f'YOLOv8 at https://github.com/ultralytics/ultralytics.' + f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to " + f"run a command with an official YOLOv8 model, i.e. 'yolo predict model=yolov8n.pt'")) from e + LOGGER.warning(f"WARNING ⚠️ {weight} appears to require '{e.name}', which is not in ultralytics requirements." + f"\nAutoInstall will run now for '{e.name}' but this feature will be removed in the future." + f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to " + f"run a command with an official YOLOv8 model, i.e. 'yolo predict model=yolov8n.pt'") + check_requirements(e.name) # install missing module + + return torch.load(file, map_location='cpu'), file # load + + +def attempt_load_weights(weights, device=None, inplace=True, fuse=False): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + + ensemble = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + ckpt, w = torch_safe_load(w) # load ckpt + args = {**DEFAULT_CFG_DICT, **ckpt['train_args']} # combine model and default args, preferring model args + model = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + + # Model compatibility updates + model.args = args # attach args to model + model.pt_path = w # attach *.pt file path to model + model.task = guess_model_task(model) + if not hasattr(model, 'stride'): + model.stride = torch.tensor([32.]) + + # Append + ensemble.append(model.fuse().eval() if fuse and hasattr(model, 'fuse') else model.eval()) # model in eval mode + + # Module compatibility updates + for m in ensemble.modules(): + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment): + m.inplace = inplace # torch 1.7.0 compatibility + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility + + # Return model + if len(ensemble) == 1: + return ensemble[-1] + + # Return ensemble + LOGGER.info(f'Ensemble created with {weights}\n') + for k in 'names', 'nc', 'yaml': + setattr(ensemble, k, getattr(ensemble[0], k)) + ensemble.stride = ensemble[torch.argmax(torch.tensor([m.stride.max() for m in ensemble])).int()].stride + assert all(ensemble[0].nc == m.nc for m in ensemble), f'Models differ in class counts: {[m.nc for m in ensemble]}' + return ensemble + + +def attempt_load_one_weight(weight, device=None, inplace=True, fuse=False): + # Loads a single model weights + ckpt, weight = torch_safe_load(weight) # load ckpt + args = {**DEFAULT_CFG_DICT, **ckpt['train_args']} # combine model and default args, preferring model args + model = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + + # Model compatibility updates + model.args = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # attach args to model + model.pt_path = weight # attach *.pt file path to model + model.task = guess_model_task(model) + if not hasattr(model, 'stride'): + model.stride = torch.tensor([32.]) + + model = model.fuse().eval() if fuse and hasattr(model, 'fuse') else model.eval() # model in eval mode + + # Module compatibility updates + for m in model.modules(): + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Segment): + m.inplace = inplace # torch 1.7.0 compatibility + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility + + # Return model and ckpt + return model, ckpt + + +def parse_model(d, ch, verbose=True): # model_dict, input_channels(3) + # Parse a YOLO model.yaml dictionary into a PyTorch model + import ast + + # Args + max_channels = float('inf') + nc, act, scales = (d.get(x) for x in ('nc', 'act', 'scales')) + depth, width = (d.get(x, 1.0) for x in ('depth_multiple', 'width_multiple')) + if scales: + scale = d.get('scale') + if not scale: + scale = tuple(scales.keys())[0] + LOGGER.warning(f"WARNING ⚠️ no model scale passed. Assuming scale='{scale}'.") + depth, width, max_channels = scales[scale] + + if act: + Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() + if verbose: + LOGGER.info(f"{colorstr('activation:')} {act}") # print + + if verbose: + LOGGER.info(f"\n{'':>3}{'from':>20}{'n':>3}{'params':>10} {'module':<45}{'arguments':<30}") + ch = [ch] + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = getattr(torch.nn, m[3:]) if 'nn.' in m else globals()[m] # get module + for j, a in enumerate(args): + if isinstance(a, str): + with contextlib.suppress(ValueError): + args[j] = locals()[a] if a in locals() else ast.literal_eval(a) + + n = n_ = max(round(n * depth), 1) if n > 1 else n # depth gain + if m in (Classify, Conv, ConvTranspose, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, Focus, + BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x): + c1, c2 = ch[f], args[0] + if c2 != nc: # if c2 not equal to number of classes (i.e. for Classify() output) + c2 = make_divisible(min(c2, max_channels) * width, 8) + + args = [c1, c2, *args[1:]] + if m in (BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, C3x): + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[x] for x in f) + elif m in (Detect, Segment): + args.append([ch[x] for x in f]) + if m is Segment: + args[2] = make_divisible(min(args[2], max_channels) * width, 8) + else: + c2 = ch[f] + + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + m.np = sum(x.numel() for x in m_.parameters()) # number params + m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type + if verbose: + LOGGER.info(f'{i:>3}{str(f):>20}{n_:>3}{m.np:10.0f} {t:<45}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +def yaml_model_load(path): + import re + + path = Path(path) + if path.stem in (f'yolov{d}{x}6' for x in 'nsmlx' for d in (5, 8)): + new_stem = re.sub(r'(\d+)([nslmx])6(.+)?$', r'\1\2-p6\3', path.stem) + LOGGER.warning(f'WARNING ⚠️ Ultralytics YOLO P6 models now use -p6 suffix. Renaming {path.stem} to {new_stem}.') + path = path.with_stem(new_stem) + + unified_path = re.sub(r'(\d+)([nslmx])(.+)?$', r'\1\3', str(path)) # i.e. yolov8x.yaml -> yolov8.yaml + yaml_file = check_yaml(unified_path, hard=False) or check_yaml(path) + d = yaml_load(yaml_file) # model dict + d['scale'] = guess_model_scale(path) + d['yaml_file'] = str(path) + return d + + +def guess_model_scale(model_path): + """ + Takes a path to a YOLO model's YAML file as input and extracts the size character of the model's scale. + The function uses regular expression matching to find the pattern of the model scale in the YAML file name, + which is denoted by n, s, m, l, or x. The function returns the size character of the model scale as a string. + + Args: + model_path (str or Path): The path to the YOLO model's YAML file. + + Returns: + (str): The size character of the model's scale, which can be n, s, m, l, or x. + """ + with contextlib.suppress(AttributeError): + import re + return re.search(r'yolov\d+([nslmx])', Path(model_path).stem).group(1) # n, s, m, l, or x + return '' + + +def guess_model_task(model): + """ + Guess the task of a PyTorch model from its architecture or configuration. + + Args: + model (nn.Module) or (dict): PyTorch model or model configuration in YAML format. + + Returns: + str: Task of the model ('detect', 'segment', 'classify'). + + Raises: + SyntaxError: If the task of the model could not be determined. + """ + + def cfg2task(cfg): + # Guess from YAML dictionary + m = cfg['head'][-1][-2].lower() # output module name + if m in ('classify', 'classifier', 'cls', 'fc'): + return 'classify' + if m == 'detect': + return 'detect' + if m == 'segment': + return 'segment' + + # Guess from model cfg + if isinstance(model, dict): + with contextlib.suppress(Exception): + return cfg2task(model) + + # Guess from PyTorch model + if isinstance(model, nn.Module): # PyTorch model + for x in 'model.args', 'model.model.args', 'model.model.model.args': + with contextlib.suppress(Exception): + return eval(x)['task'] + for x in 'model.yaml', 'model.model.yaml', 'model.model.model.yaml': + with contextlib.suppress(Exception): + return cfg2task(eval(x)) + + for m in model.modules(): + if isinstance(m, Detect): + return 'detect' + elif isinstance(m, Segment): + return 'segment' + elif isinstance(m, Classify): + return 'classify' + + # Guess from model filename + if isinstance(model, (str, Path)): + model = Path(model) + if '-seg' in model.stem or 'segment' in model.parts: + return 'segment' + elif '-cls' in model.stem or 'classify' in model.parts: + return 'classify' + elif 'detect' in model.parts: + return 'detect' + + # Unable to determine task from model + LOGGER.warning("WARNING ⚠️ Unable to automatically guess model task, assuming 'task=detect'. " + "Explicitly define task for your model, i.e. 'task=detect', 'task=segment' or 'task=classify'.") + return 'detect' # assume detect diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/README.md b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/README.md new file mode 100644 index 0000000..387ca98 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/README.md @@ -0,0 +1,33 @@ +## Tracker + +### Trackers + +- [x] ByteTracker +- [x] BoT-SORT + +### Usage + +python interface: + +```python +from ultralytics import YOLO + +model = YOLO("yolov8n.pt") # or a segmentation model .i.e yolov8n-seg.pt +model.track( + source="video/streams", + stream=True, + tracker="botsort.yaml", # or 'bytetrack.yaml' + ..., +) +``` + +cli: + +```bash +yolo detect track source=... tracker=... +yolo segment track source=... tracker=... +``` + +By default, trackers will use the configuration in `ultralytics/tracker/cfg`. +We also support using a modified tracker config file. Please refer to the tracker config files +in `ultralytics/tracker/cfg`. diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/__init__.py new file mode 100644 index 0000000..85bf24e --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/__init__.py @@ -0,0 +1,6 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from .track import register_tracker +from .trackers import BOTSORT, BYTETracker + +__all__ = 'register_tracker', 'BOTSORT', 'BYTETracker' # allow simpler import diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/cfg/botsort.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/cfg/botsort.yaml new file mode 100644 index 0000000..445d1a1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/cfg/botsort.yaml @@ -0,0 +1,18 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Default YOLO tracker settings for BoT-SORT tracker https://github.com/NirAharon/BoT-SORT + +tracker_type: botsort # tracker type, ['botsort', 'bytetrack'] +track_high_thresh: 0.5 # threshold for the first association +track_low_thresh: 0.1 # threshold for the second association +new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks +track_buffer: 30 # buffer to calculate the time when to remove tracks +match_thresh: 0.8 # threshold for matching tracks +# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) +# mot20: False # for tracker evaluation(not used for now) + +# BoT-SORT settings +cmc_method: sparseOptFlow # method of global motion compensation +# ReID model related thresh (not supported yet) +proximity_thresh: 0.5 +appearance_thresh: 0.25 +with_reid: False diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/cfg/bytetrack.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/cfg/bytetrack.yaml new file mode 100644 index 0000000..fe9378c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/cfg/bytetrack.yaml @@ -0,0 +1,11 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Default YOLO tracker settings for ByteTrack tracker https://github.com/ifzhang/ByteTrack + +tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack'] +track_high_thresh: 0.5 # threshold for the first association +track_low_thresh: 0.1 # threshold for the second association +new_track_thresh: 0.6 # threshold for init new track if the detection does not match any tracks +track_buffer: 30 # buffer to calculate the time when to remove tracks +match_thresh: 0.8 # threshold for matching tracks +# min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now) +# mot20: False # for tracker evaluation(not used for now) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/track.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/track.py new file mode 100644 index 0000000..78e32c6 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/track.py @@ -0,0 +1,44 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import torch + +from ultralytics.yolo.utils import IterableSimpleNamespace, yaml_load +from ultralytics.yolo.utils.checks import check_yaml + +from .trackers import BOTSORT, BYTETracker + +TRACKER_MAP = {'bytetrack': BYTETracker, 'botsort': BOTSORT} + + +def on_predict_start(predictor): + tracker = check_yaml(predictor.args.tracker) + cfg = IterableSimpleNamespace(**yaml_load(tracker)) + assert cfg.tracker_type in ['bytetrack', 'botsort'], \ + f"Only support 'bytetrack' and 'botsort' for now, but got '{cfg.tracker_type}'" + trackers = [] + for _ in range(predictor.dataset.bs): + tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30) + trackers.append(tracker) + predictor.trackers = trackers + + +def on_predict_postprocess_end(predictor): + bs = predictor.dataset.bs + im0s = predictor.batch[2] + im0s = im0s if isinstance(im0s, list) else [im0s] + for i in range(bs): + det = predictor.results[i].boxes.cpu().numpy() + if len(det) == 0: + continue + tracks = predictor.trackers[i].update(det, im0s[i]) + if len(tracks) == 0: + continue + predictor.results[i].update(boxes=torch.as_tensor(tracks[:, :-1])) + if predictor.results[i].masks is not None: + idx = tracks[:, -1].tolist() + predictor.results[i].masks = predictor.results[i].masks[idx] + + +def register_tracker(model): + model.add_callback('on_predict_start', on_predict_start) + model.add_callback('on_predict_postprocess_end', on_predict_postprocess_end) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/__init__.py new file mode 100644 index 0000000..10f1cf5 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/__init__.py @@ -0,0 +1,6 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from .bot_sort import BOTSORT +from .byte_tracker import BYTETracker + +__all__ = 'BOTSORT', 'BYTETracker' # allow simpler import diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/basetrack.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/basetrack.py new file mode 100644 index 0000000..71c8541 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/basetrack.py @@ -0,0 +1,59 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from collections import OrderedDict + +import numpy as np + + +class TrackState: + New = 0 + Tracked = 1 + Lost = 2 + Removed = 3 + + +class BaseTrack: + _count = 0 + + track_id = 0 + is_activated = False + state = TrackState.New + + history = OrderedDict() + features = [] + curr_feature = None + score = 0 + start_frame = 0 + frame_id = 0 + time_since_update = 0 + + # multi-camera + location = (np.inf, np.inf) + + @property + def end_frame(self): + return self.frame_id + + @staticmethod + def next_id(): + BaseTrack._count += 1 + return BaseTrack._count + + def activate(self, *args): + raise NotImplementedError + + def predict(self): + raise NotImplementedError + + def update(self, *args, **kwargs): + raise NotImplementedError + + def mark_lost(self): + self.state = TrackState.Lost + + def mark_removed(self): + self.state = TrackState.Removed + + @staticmethod + def reset_id(): + BaseTrack._count = 0 diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/bot_sort.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/bot_sort.py new file mode 100644 index 0000000..718875a --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/bot_sort.py @@ -0,0 +1,136 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from collections import deque + +import numpy as np + +from ..utils import matching +from ..utils.gmc import GMC +from ..utils.kalman_filter import KalmanFilterXYWH +from .basetrack import TrackState +from .byte_tracker import BYTETracker, STrack + + +class BOTrack(STrack): + shared_kalman = KalmanFilterXYWH() + + def __init__(self, tlwh, score, cls, feat=None, feat_history=50): + super().__init__(tlwh, score, cls) + + self.smooth_feat = None + self.curr_feat = None + if feat is not None: + self.update_features(feat) + self.features = deque([], maxlen=feat_history) + self.alpha = 0.9 + + def update_features(self, feat): + feat /= np.linalg.norm(feat) + self.curr_feat = feat + if self.smooth_feat is None: + self.smooth_feat = feat + else: + self.smooth_feat = self.alpha * self.smooth_feat + (1 - self.alpha) * feat + self.features.append(feat) + self.smooth_feat /= np.linalg.norm(self.smooth_feat) + + def predict(self): + mean_state = self.mean.copy() + if self.state != TrackState.Tracked: + mean_state[6] = 0 + mean_state[7] = 0 + + self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) + + def re_activate(self, new_track, frame_id, new_id=False): + if new_track.curr_feat is not None: + self.update_features(new_track.curr_feat) + super().re_activate(new_track, frame_id, new_id) + + def update(self, new_track, frame_id): + if new_track.curr_feat is not None: + self.update_features(new_track.curr_feat) + super().update(new_track, frame_id) + + @property + def tlwh(self): + """Get current position in bounding box format `(top left x, top left y, + width, height)`. + """ + if self.mean is None: + return self._tlwh.copy() + ret = self.mean[:4].copy() + ret[:2] -= ret[2:] / 2 + return ret + + @staticmethod + def multi_predict(stracks): + if len(stracks) <= 0: + return + multi_mean = np.asarray([st.mean.copy() for st in stracks]) + multi_covariance = np.asarray([st.covariance for st in stracks]) + for i, st in enumerate(stracks): + if st.state != TrackState.Tracked: + multi_mean[i][6] = 0 + multi_mean[i][7] = 0 + multi_mean, multi_covariance = BOTrack.shared_kalman.multi_predict(multi_mean, multi_covariance) + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + stracks[i].mean = mean + stracks[i].covariance = cov + + def convert_coords(self, tlwh): + return self.tlwh_to_xywh(tlwh) + + @staticmethod + def tlwh_to_xywh(tlwh): + """Convert bounding box to format `(center x, center y, width, + height)`. + """ + ret = np.asarray(tlwh).copy() + ret[:2] += ret[2:] / 2 + return ret + + +class BOTSORT(BYTETracker): + + def __init__(self, args, frame_rate=30): + super().__init__(args, frame_rate) + # ReID module + self.proximity_thresh = args.proximity_thresh + self.appearance_thresh = args.appearance_thresh + + if args.with_reid: + # haven't supported BoT-SORT(reid) yet + self.encoder = None + # self.gmc = GMC(method=args.cmc_method, verbose=[args.name, args.ablation]) + self.gmc = GMC(method=args.cmc_method) + + def get_kalmanfilter(self): + return KalmanFilterXYWH() + + def init_track(self, dets, scores, cls, img=None): + if len(dets) == 0: + return [] + if self.args.with_reid and self.encoder is not None: + features_keep = self.encoder.inference(img, dets) + return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections + else: + return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections + + def get_dists(self, tracks, detections): + dists = matching.iou_distance(tracks, detections) + dists_mask = (dists > self.proximity_thresh) + + # TODO: mot20 + # if not self.args.mot20: + dists = matching.fuse_score(dists, detections) + + if self.args.with_reid and self.encoder is not None: + emb_dists = matching.embedding_distance(tracks, detections) / 2.0 + emb_dists[emb_dists > self.appearance_thresh] = 1.0 + emb_dists[dists_mask] = 1.0 + dists = np.minimum(dists, emb_dists) + return dists + + def multi_predict(self, tracks): + BOTrack.multi_predict(tracks) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/byte_tracker.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/byte_tracker.py new file mode 100644 index 0000000..a6103e2 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/trackers/byte_tracker.py @@ -0,0 +1,343 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import numpy as np + +from ..utils import matching +from ..utils.kalman_filter import KalmanFilterXYAH +from .basetrack import BaseTrack, TrackState + + +class STrack(BaseTrack): + shared_kalman = KalmanFilterXYAH() + + def __init__(self, tlwh, score, cls): + + # wait activate + self._tlwh = np.asarray(self.tlbr_to_tlwh(tlwh[:-1]), dtype=np.float32) + self.kalman_filter = None + self.mean, self.covariance = None, None + self.is_activated = False + + self.score = score + self.tracklet_len = 0 + self.cls = cls + self.idx = tlwh[-1] + + def predict(self): + mean_state = self.mean.copy() + if self.state != TrackState.Tracked: + mean_state[7] = 0 + self.mean, self.covariance = self.kalman_filter.predict(mean_state, self.covariance) + + @staticmethod + def multi_predict(stracks): + if len(stracks) <= 0: + return + multi_mean = np.asarray([st.mean.copy() for st in stracks]) + multi_covariance = np.asarray([st.covariance for st in stracks]) + for i, st in enumerate(stracks): + if st.state != TrackState.Tracked: + multi_mean[i][7] = 0 + multi_mean, multi_covariance = STrack.shared_kalman.multi_predict(multi_mean, multi_covariance) + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + stracks[i].mean = mean + stracks[i].covariance = cov + + @staticmethod + def multi_gmc(stracks, H=np.eye(2, 3)): + if len(stracks) > 0: + multi_mean = np.asarray([st.mean.copy() for st in stracks]) + multi_covariance = np.asarray([st.covariance for st in stracks]) + + R = H[:2, :2] + R8x8 = np.kron(np.eye(4, dtype=float), R) + t = H[:2, 2] + + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + mean = R8x8.dot(mean) + mean[:2] += t + cov = R8x8.dot(cov).dot(R8x8.transpose()) + + stracks[i].mean = mean + stracks[i].covariance = cov + + def activate(self, kalman_filter, frame_id): + """Start a new tracklet""" + self.kalman_filter = kalman_filter + self.track_id = self.next_id() + self.mean, self.covariance = self.kalman_filter.initiate(self.convert_coords(self._tlwh)) + + self.tracklet_len = 0 + self.state = TrackState.Tracked + if frame_id == 1: + self.is_activated = True + self.frame_id = frame_id + self.start_frame = frame_id + + def re_activate(self, new_track, frame_id, new_id=False): + self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, + self.convert_coords(new_track.tlwh)) + self.tracklet_len = 0 + self.state = TrackState.Tracked + self.is_activated = True + self.frame_id = frame_id + if new_id: + self.track_id = self.next_id() + self.score = new_track.score + self.cls = new_track.cls + self.idx = new_track.idx + + def update(self, new_track, frame_id): + """ + Update a matched track + :type new_track: STrack + :type frame_id: int + :return: + """ + self.frame_id = frame_id + self.tracklet_len += 1 + + new_tlwh = new_track.tlwh + self.mean, self.covariance = self.kalman_filter.update(self.mean, self.covariance, + self.convert_coords(new_tlwh)) + self.state = TrackState.Tracked + self.is_activated = True + + self.score = new_track.score + self.cls = new_track.cls + self.idx = new_track.idx + + def convert_coords(self, tlwh): + return self.tlwh_to_xyah(tlwh) + + @property + def tlwh(self): + """Get current position in bounding box format `(top left x, top left y, + width, height)`. + """ + if self.mean is None: + return self._tlwh.copy() + ret = self.mean[:4].copy() + ret[2] *= ret[3] + ret[:2] -= ret[2:] / 2 + return ret + + @property + def tlbr(self): + """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., + `(top left, bottom right)`. + """ + ret = self.tlwh.copy() + ret[2:] += ret[:2] + return ret + + @staticmethod + def tlwh_to_xyah(tlwh): + """Convert bounding box to format `(center x, center y, aspect ratio, + height)`, where the aspect ratio is `width / height`. + """ + ret = np.asarray(tlwh).copy() + ret[:2] += ret[2:] / 2 + ret[2] /= ret[3] + return ret + + @staticmethod + def tlbr_to_tlwh(tlbr): + ret = np.asarray(tlbr).copy() + ret[2:] -= ret[:2] + return ret + + @staticmethod + def tlwh_to_tlbr(tlwh): + ret = np.asarray(tlwh).copy() + ret[2:] += ret[:2] + return ret + + def __repr__(self): + return f'OT_{self.track_id}_({self.start_frame}-{self.end_frame})' + + +class BYTETracker: + + def __init__(self, args, frame_rate=30): + self.tracked_stracks = [] # type: list[STrack] + self.lost_stracks = [] # type: list[STrack] + self.removed_stracks = [] # type: list[STrack] + + self.frame_id = 0 + self.args = args + self.max_time_lost = int(frame_rate / 30.0 * args.track_buffer) + self.kalman_filter = self.get_kalmanfilter() + self.reset_id() + + def update(self, results, img=None): + self.frame_id += 1 + activated_starcks = [] + refind_stracks = [] + lost_stracks = [] + removed_stracks = [] + + scores = results.conf + bboxes = results.xyxy + # add index + bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1) + cls = results.cls + + remain_inds = scores > self.args.track_high_thresh + inds_low = scores > self.args.track_low_thresh + inds_high = scores < self.args.track_high_thresh + + inds_second = np.logical_and(inds_low, inds_high) + dets_second = bboxes[inds_second] + dets = bboxes[remain_inds] + scores_keep = scores[remain_inds] + scores_second = scores[inds_second] + cls_keep = cls[remain_inds] + cls_second = cls[inds_second] + + detections = self.init_track(dets, scores_keep, cls_keep, img) + """ Add newly detected tracklets to tracked_stracks""" + unconfirmed = [] + tracked_stracks = [] # type: list[STrack] + for track in self.tracked_stracks: + if not track.is_activated: + unconfirmed.append(track) + else: + tracked_stracks.append(track) + """ Step 2: First association, with high score detection boxes""" + strack_pool = self.joint_stracks(tracked_stracks, self.lost_stracks) + # Predict the current location with KF + self.multi_predict(strack_pool) + if hasattr(self, 'gmc'): + warp = self.gmc.apply(img, dets) + STrack.multi_gmc(strack_pool, warp) + STrack.multi_gmc(unconfirmed, warp) + + dists = self.get_dists(strack_pool, detections) + matches, u_track, u_detection = matching.linear_assignment(dists, thresh=self.args.match_thresh) + + for itracked, idet in matches: + track = strack_pool[itracked] + det = detections[idet] + if track.state == TrackState.Tracked: + track.update(det, self.frame_id) + activated_starcks.append(track) + else: + track.re_activate(det, self.frame_id, new_id=False) + refind_stracks.append(track) + """ Step 3: Second association, with low score detection boxes""" + # association the untrack to the low score detections + detections_second = self.init_track(dets_second, scores_second, cls_second, img) + r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked] + # TODO + dists = matching.iou_distance(r_tracked_stracks, detections_second) + matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5) + for itracked, idet in matches: + track = r_tracked_stracks[itracked] + det = detections_second[idet] + if track.state == TrackState.Tracked: + track.update(det, self.frame_id) + activated_starcks.append(track) + else: + track.re_activate(det, self.frame_id, new_id=False) + refind_stracks.append(track) + + for it in u_track: + track = r_tracked_stracks[it] + if track.state != TrackState.Lost: + track.mark_lost() + lost_stracks.append(track) + """Deal with unconfirmed tracks, usually tracks with only one beginning frame""" + detections = [detections[i] for i in u_detection] + dists = self.get_dists(unconfirmed, detections) + matches, u_unconfirmed, u_detection = matching.linear_assignment(dists, thresh=0.7) + for itracked, idet in matches: + unconfirmed[itracked].update(detections[idet], self.frame_id) + activated_starcks.append(unconfirmed[itracked]) + for it in u_unconfirmed: + track = unconfirmed[it] + track.mark_removed() + removed_stracks.append(track) + """ Step 4: Init new stracks""" + for inew in u_detection: + track = detections[inew] + if track.score < self.args.new_track_thresh: + continue + track.activate(self.kalman_filter, self.frame_id) + activated_starcks.append(track) + """ Step 5: Update state""" + for track in self.lost_stracks: + if self.frame_id - track.end_frame > self.max_time_lost: + track.mark_removed() + removed_stracks.append(track) + + self.tracked_stracks = [t for t in self.tracked_stracks if t.state == TrackState.Tracked] + self.tracked_stracks = self.joint_stracks(self.tracked_stracks, activated_starcks) + self.tracked_stracks = self.joint_stracks(self.tracked_stracks, refind_stracks) + self.lost_stracks = self.sub_stracks(self.lost_stracks, self.tracked_stracks) + self.lost_stracks.extend(lost_stracks) + self.lost_stracks = self.sub_stracks(self.lost_stracks, self.removed_stracks) + self.removed_stracks.extend(removed_stracks) + self.tracked_stracks, self.lost_stracks = self.remove_duplicate_stracks(self.tracked_stracks, self.lost_stracks) + output = [ + track.tlbr.tolist() + [track.track_id, track.score, track.cls, track.idx] for track in self.tracked_stracks + if track.is_activated] + return np.asarray(output, dtype=np.float32) + + def get_kalmanfilter(self): + return KalmanFilterXYAH() + + def init_track(self, dets, scores, cls, img=None): + return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections + + def get_dists(self, tracks, detections): + dists = matching.iou_distance(tracks, detections) + # TODO: mot20 + # if not self.args.mot20: + dists = matching.fuse_score(dists, detections) + return dists + + def multi_predict(self, tracks): + STrack.multi_predict(tracks) + + def reset_id(self): + STrack.reset_id() + + @staticmethod + def joint_stracks(tlista, tlistb): + exists = {} + res = [] + for t in tlista: + exists[t.track_id] = 1 + res.append(t) + for t in tlistb: + tid = t.track_id + if not exists.get(tid, 0): + exists[tid] = 1 + res.append(t) + return res + + @staticmethod + def sub_stracks(tlista, tlistb): + stracks = {t.track_id: t for t in tlista} + for t in tlistb: + tid = t.track_id + if stracks.get(tid, 0): + del stracks[tid] + return list(stracks.values()) + + @staticmethod + def remove_duplicate_stracks(stracksa, stracksb): + pdist = matching.iou_distance(stracksa, stracksb) + pairs = np.where(pdist < 0.15) + dupa, dupb = [], [] + for p, q in zip(*pairs): + timep = stracksa[p].frame_id - stracksa[p].start_frame + timeq = stracksb[q].frame_id - stracksb[q].start_frame + if timep > timeq: + dupb.append(q) + else: + dupa.append(p) + resa = [t for i, t in enumerate(stracksa) if i not in dupa] + resb = [t for i, t in enumerate(stracksb) if i not in dupb] + return resa, resb diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/gmc.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/gmc.py new file mode 100644 index 0000000..fec09a3 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/gmc.py @@ -0,0 +1,318 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import copy + +import cv2 +import matplotlib.pyplot as plt +import numpy as np + +from ultralytics.yolo.utils import LOGGER + + +class GMC: + + def __init__(self, method='sparseOptFlow', downscale=2, verbose=None): + super().__init__() + + self.method = method + self.downscale = max(1, int(downscale)) + + if self.method == 'orb': + self.detector = cv2.FastFeatureDetector_create(20) + self.extractor = cv2.ORB_create() + self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING) + + elif self.method == 'sift': + self.detector = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) + self.extractor = cv2.SIFT_create(nOctaveLayers=3, contrastThreshold=0.02, edgeThreshold=20) + self.matcher = cv2.BFMatcher(cv2.NORM_L2) + + elif self.method == 'ecc': + number_of_iterations = 5000 + termination_eps = 1e-6 + self.warp_mode = cv2.MOTION_EUCLIDEAN + self.criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, number_of_iterations, termination_eps) + + elif self.method == 'sparseOptFlow': + self.feature_params = dict(maxCorners=1000, + qualityLevel=0.01, + minDistance=1, + blockSize=3, + useHarrisDetector=False, + k=0.04) + # self.gmc_file = open('GMC_results.txt', 'w') + + elif self.method in ['file', 'files']: + seqName = verbose[0] + ablation = verbose[1] + if ablation: + filePath = r'tracker/GMC_files/MOT17_ablation' + else: + filePath = r'tracker/GMC_files/MOTChallenge' + + if '-FRCNN' in seqName: + seqName = seqName[:-6] + elif '-DPM' in seqName or '-SDP' in seqName: + seqName = seqName[:-4] + self.gmcFile = open(f'{filePath}/GMC-{seqName}.txt') + + if self.gmcFile is None: + raise ValueError(f'Error: Unable to open GMC file in directory:{filePath}') + elif self.method in ['none', 'None']: + self.method = 'none' + else: + raise ValueError(f'Error: Unknown CMC method:{method}') + + self.prevFrame = None + self.prevKeyPoints = None + self.prevDescriptors = None + + self.initializedFirstFrame = False + + def apply(self, raw_frame, detections=None): + if self.method in ['orb', 'sift']: + return self.applyFeatures(raw_frame, detections) + elif self.method == 'ecc': + return self.applyEcc(raw_frame, detections) + elif self.method == 'sparseOptFlow': + return self.applySparseOptFlow(raw_frame, detections) + elif self.method == 'file': + return self.applyFile(raw_frame, detections) + elif self.method == 'none': + return np.eye(2, 3) + else: + return np.eye(2, 3) + + def applyEcc(self, raw_frame, detections=None): + + # Initialize + height, width, _ = raw_frame.shape + frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) + H = np.eye(2, 3, dtype=np.float32) + + # Downscale image (TODO: consider using pyramids) + if self.downscale > 1.0: + frame = cv2.GaussianBlur(frame, (3, 3), 1.5) + frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) + width = width // self.downscale + height = height // self.downscale + + # Handle first frame + if not self.initializedFirstFrame: + # Initialize data + self.prevFrame = frame.copy() + + # Initialization done + self.initializedFirstFrame = True + + return H + + # Run the ECC algorithm. The results are stored in warp_matrix. + # (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria) + try: + (cc, H) = cv2.findTransformECC(self.prevFrame, frame, H, self.warp_mode, self.criteria, None, 1) + except Exception as e: + LOGGER.warning(f'WARNING: find transform failed. Set warp as identity {e}') + + return H + + def applyFeatures(self, raw_frame, detections=None): + + # Initialize + height, width, _ = raw_frame.shape + frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) + H = np.eye(2, 3) + + # Downscale image (TODO: consider using pyramids) + if self.downscale > 1.0: + # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) + frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) + width = width // self.downscale + height = height // self.downscale + + # find the keypoints + mask = np.zeros_like(frame) + # mask[int(0.05 * height): int(0.95 * height), int(0.05 * width): int(0.95 * width)] = 255 + mask[int(0.02 * height):int(0.98 * height), int(0.02 * width):int(0.98 * width)] = 255 + if detections is not None: + for det in detections: + tlbr = (det[:4] / self.downscale).astype(np.int_) + mask[tlbr[1]:tlbr[3], tlbr[0]:tlbr[2]] = 0 + + keypoints = self.detector.detect(frame, mask) + + # compute the descriptors + keypoints, descriptors = self.extractor.compute(frame, keypoints) + + # Handle first frame + if not self.initializedFirstFrame: + # Initialize data + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + self.prevDescriptors = copy.copy(descriptors) + + # Initialization done + self.initializedFirstFrame = True + + return H + + # Match descriptors. + knnMatches = self.matcher.knnMatch(self.prevDescriptors, descriptors, 2) + + # Filtered matches based on smallest spatial distance + matches = [] + spatialDistances = [] + + maxSpatialDistance = 0.25 * np.array([width, height]) + + # Handle empty matches case + if len(knnMatches) == 0: + # Store to next iteration + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + self.prevDescriptors = copy.copy(descriptors) + + return H + + for m, n in knnMatches: + if m.distance < 0.9 * n.distance: + prevKeyPointLocation = self.prevKeyPoints[m.queryIdx].pt + currKeyPointLocation = keypoints[m.trainIdx].pt + + spatialDistance = (prevKeyPointLocation[0] - currKeyPointLocation[0], + prevKeyPointLocation[1] - currKeyPointLocation[1]) + + if (np.abs(spatialDistance[0]) < maxSpatialDistance[0]) and \ + (np.abs(spatialDistance[1]) < maxSpatialDistance[1]): + spatialDistances.append(spatialDistance) + matches.append(m) + + meanSpatialDistances = np.mean(spatialDistances, 0) + stdSpatialDistances = np.std(spatialDistances, 0) + + inliers = (spatialDistances - meanSpatialDistances) < 2.5 * stdSpatialDistances + + goodMatches = [] + prevPoints = [] + currPoints = [] + for i in range(len(matches)): + if inliers[i, 0] and inliers[i, 1]: + goodMatches.append(matches[i]) + prevPoints.append(self.prevKeyPoints[matches[i].queryIdx].pt) + currPoints.append(keypoints[matches[i].trainIdx].pt) + + prevPoints = np.array(prevPoints) + currPoints = np.array(currPoints) + + # Draw the keypoint matches on the output image + if 0: + matches_img = np.hstack((self.prevFrame, frame)) + matches_img = cv2.cvtColor(matches_img, cv2.COLOR_GRAY2BGR) + W = np.size(self.prevFrame, 1) + for m in goodMatches: + prev_pt = np.array(self.prevKeyPoints[m.queryIdx].pt, dtype=np.int_) + curr_pt = np.array(keypoints[m.trainIdx].pt, dtype=np.int_) + curr_pt[0] += W + color = np.random.randint(0, 255, 3) + color = (int(color[0]), int(color[1]), int(color[2])) + + matches_img = cv2.line(matches_img, prev_pt, curr_pt, tuple(color), 1, cv2.LINE_AA) + matches_img = cv2.circle(matches_img, prev_pt, 2, tuple(color), -1) + matches_img = cv2.circle(matches_img, curr_pt, 2, tuple(color), -1) + + plt.figure() + plt.imshow(matches_img) + plt.show() + + # Find rigid matrix + if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): + H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) + + # Handle downscale + if self.downscale > 1.0: + H[0, 2] *= self.downscale + H[1, 2] *= self.downscale + else: + LOGGER.warning('WARNING: not enough matching points') + + # Store to next iteration + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + self.prevDescriptors = copy.copy(descriptors) + + return H + + def applySparseOptFlow(self, raw_frame, detections=None): + # Initialize + # t0 = time.time() + height, width, _ = raw_frame.shape + frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) + H = np.eye(2, 3) + + # Downscale image + if self.downscale > 1.0: + # frame = cv2.GaussianBlur(frame, (3, 3), 1.5) + frame = cv2.resize(frame, (width // self.downscale, height // self.downscale)) + + # find the keypoints + keypoints = cv2.goodFeaturesToTrack(frame, mask=None, **self.feature_params) + + # Handle first frame + if not self.initializedFirstFrame: + # Initialize data + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + + # Initialization done + self.initializedFirstFrame = True + + return H + + # find correspondences + matchedKeypoints, status, err = cv2.calcOpticalFlowPyrLK(self.prevFrame, frame, self.prevKeyPoints, None) + + # leave good correspondences only + prevPoints = [] + currPoints = [] + + for i in range(len(status)): + if status[i]: + prevPoints.append(self.prevKeyPoints[i]) + currPoints.append(matchedKeypoints[i]) + + prevPoints = np.array(prevPoints) + currPoints = np.array(currPoints) + + # Find rigid matrix + if (np.size(prevPoints, 0) > 4) and (np.size(prevPoints, 0) == np.size(prevPoints, 0)): + H, inliers = cv2.estimateAffinePartial2D(prevPoints, currPoints, cv2.RANSAC) + + # Handle downscale + if self.downscale > 1.0: + H[0, 2] *= self.downscale + H[1, 2] *= self.downscale + else: + LOGGER.warning('WARNING: not enough matching points') + + # Store to next iteration + self.prevFrame = frame.copy() + self.prevKeyPoints = copy.copy(keypoints) + + # gmc_line = str(1000 * (time.time() - t0)) + "\t" + str(H[0, 0]) + "\t" + str(H[0, 1]) + "\t" + str( + # H[0, 2]) + "\t" + str(H[1, 0]) + "\t" + str(H[1, 1]) + "\t" + str(H[1, 2]) + "\n" + # self.gmc_file.write(gmc_line) + + return H + + def applyFile(self, raw_frame, detections=None): + line = self.gmcFile.readline() + tokens = line.split('\t') + H = np.eye(2, 3, dtype=np.float_) + H[0, 0] = float(tokens[1]) + H[0, 1] = float(tokens[2]) + H[0, 2] = float(tokens[3]) + H[1, 0] = float(tokens[4]) + H[1, 1] = float(tokens[5]) + H[1, 2] = float(tokens[6]) + + return H diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/kalman_filter.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/kalman_filter.py new file mode 100644 index 0000000..af680f3 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/kalman_filter.py @@ -0,0 +1,460 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import numpy as np +import scipy.linalg + +# Table for the 0.95 quantile of the chi-square distribution with N degrees of freedom (contains values for N=1, ..., 9) +# Taken from MATLAB/Octave's chi2inv function and used as Mahalanobis gating threshold. +chi2inv95 = {1: 3.8415, 2: 5.9915, 3: 7.8147, 4: 9.4877, 5: 11.070, 6: 12.592, 7: 14.067, 8: 15.507, 9: 16.919} + + +class KalmanFilterXYAH: + """ + For bytetrack + A simple Kalman filter for tracking bounding boxes in image space. + + The 8-dimensional state space + + x, y, a, h, vx, vy, va, vh + + contains the bounding box center position (x, y), aspect ratio a, height h, + and their respective velocities. + + Object motion follows a constant velocity model. The bounding box location + (x, y, a, h) is taken as direct observation of the state space (linear + observation model). + + """ + + def __init__(self): + ndim, dt = 4, 1. + + # Create Kalman filter model matrices. + self._motion_mat = np.eye(2 * ndim, 2 * ndim) + for i in range(ndim): + self._motion_mat[i, ndim + i] = dt + self._update_mat = np.eye(ndim, 2 * ndim) + + # Motion and observation uncertainty are chosen relative to the current + # state estimate. These weights control the amount of uncertainty in + # the model. This is a bit hacky. + self._std_weight_position = 1. / 20 + self._std_weight_velocity = 1. / 160 + + def initiate(self, measurement): + """Create track from unassociated measurement. + + Parameters + ---------- + measurement : ndarray + Bounding box coordinates (x, y, a, h) with center position (x, y), + aspect ratio a, and height h. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector (8 dimensional) and covariance matrix (8x8 + dimensional) of the new track. Unobserved velocities are initialized + to 0 mean. + + """ + mean_pos = measurement + mean_vel = np.zeros_like(mean_pos) + mean = np.r_[mean_pos, mean_vel] + + std = [ + 2 * self._std_weight_position * measurement[3], 2 * self._std_weight_position * measurement[3], 1e-2, + 2 * self._std_weight_position * measurement[3], 10 * self._std_weight_velocity * measurement[3], + 10 * self._std_weight_velocity * measurement[3], 1e-5, 10 * self._std_weight_velocity * measurement[3]] + covariance = np.diag(np.square(std)) + return mean, covariance + + def predict(self, mean, covariance): + """Run Kalman filter prediction step. + + Parameters + ---------- + mean : ndarray + The 8 dimensional mean vector of the object state at the previous + time step. + covariance : ndarray + The 8x8 dimensional covariance matrix of the object state at the + previous time step. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + + """ + std_pos = [ + self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-2, + self._std_weight_position * mean[3]] + std_vel = [ + self._std_weight_velocity * mean[3], self._std_weight_velocity * mean[3], 1e-5, + self._std_weight_velocity * mean[3]] + motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) + + # mean = np.dot(self._motion_mat, mean) + mean = np.dot(mean, self._motion_mat.T) + covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + + return mean, covariance + + def project(self, mean, covariance): + """Project state distribution to measurement space. + + Parameters + ---------- + mean : ndarray + The state's mean vector (8 dimensional array). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + + Returns + ------- + (ndarray, ndarray) + Returns the projected mean and covariance matrix of the given state + estimate. + + """ + std = [ + self._std_weight_position * mean[3], self._std_weight_position * mean[3], 1e-1, + self._std_weight_position * mean[3]] + innovation_cov = np.diag(np.square(std)) + + mean = np.dot(self._update_mat, mean) + covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) + return mean, covariance + innovation_cov + + def multi_predict(self, mean, covariance): + """Run Kalman filter prediction step (Vectorized version). + Parameters + ---------- + mean : ndarray + The Nx8 dimensional mean matrix of the object states at the previous + time step. + covariance : ndarray + The Nx8x8 dimensional covariance matrix of the object states at the + previous time step. + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + """ + std_pos = [ + self._std_weight_position * mean[:, 3], self._std_weight_position * mean[:, 3], + 1e-2 * np.ones_like(mean[:, 3]), self._std_weight_position * mean[:, 3]] + std_vel = [ + self._std_weight_velocity * mean[:, 3], self._std_weight_velocity * mean[:, 3], + 1e-5 * np.ones_like(mean[:, 3]), self._std_weight_velocity * mean[:, 3]] + sqr = np.square(np.r_[std_pos, std_vel]).T + + motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] + motion_cov = np.asarray(motion_cov) + + mean = np.dot(mean, self._motion_mat.T) + left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) + covariance = np.dot(left, self._motion_mat.T) + motion_cov + + return mean, covariance + + def update(self, mean, covariance, measurement): + """Run Kalman filter correction step. + + Parameters + ---------- + mean : ndarray + The predicted state's mean vector (8 dimensional). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + measurement : ndarray + The 4 dimensional measurement vector (x, y, a, h), where (x, y) + is the center position, a the aspect ratio, and h the height of the + bounding box. + + Returns + ------- + (ndarray, ndarray) + Returns the measurement-corrected state distribution. + + """ + projected_mean, projected_cov = self.project(mean, covariance) + + chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False) + kalman_gain = scipy.linalg.cho_solve((chol_factor, lower), + np.dot(covariance, self._update_mat.T).T, + check_finite=False).T + innovation = measurement - projected_mean + + new_mean = mean + np.dot(innovation, kalman_gain.T) + new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T)) + return new_mean, new_covariance + + def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'): + """Compute gating distance between state distribution and measurements. + A suitable distance threshold can be obtained from `chi2inv95`. If + `only_position` is False, the chi-square distribution has 4 degrees of + freedom, otherwise 2. + Parameters + ---------- + mean : ndarray + Mean vector over the state distribution (8 dimensional). + covariance : ndarray + Covariance of the state distribution (8x8 dimensional). + measurements : ndarray + An Nx4 dimensional matrix of N measurements, each in + format (x, y, a, h) where (x, y) is the bounding box center + position, a the aspect ratio, and h the height. + only_position : Optional[bool] + If True, distance computation is done with respect to the bounding + box center position only. + Returns + ------- + ndarray + Returns an array of length N, where the i-th element contains the + squared Mahalanobis distance between (mean, covariance) and + `measurements[i]`. + """ + mean, covariance = self.project(mean, covariance) + if only_position: + mean, covariance = mean[:2], covariance[:2, :2] + measurements = measurements[:, :2] + + d = measurements - mean + if metric == 'gaussian': + return np.sum(d * d, axis=1) + elif metric == 'maha': + cholesky_factor = np.linalg.cholesky(covariance) + z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) + return np.sum(z * z, axis=0) # square maha + else: + raise ValueError('invalid distance metric') + + +class KalmanFilterXYWH: + """ + For BoT-SORT + A simple Kalman filter for tracking bounding boxes in image space. + + The 8-dimensional state space + + x, y, w, h, vx, vy, vw, vh + + contains the bounding box center position (x, y), width w, height h, + and their respective velocities. + + Object motion follows a constant velocity model. The bounding box location + (x, y, w, h) is taken as direct observation of the state space (linear + observation model). + + """ + + def __init__(self): + ndim, dt = 4, 1. + + # Create Kalman filter model matrices. + self._motion_mat = np.eye(2 * ndim, 2 * ndim) + for i in range(ndim): + self._motion_mat[i, ndim + i] = dt + self._update_mat = np.eye(ndim, 2 * ndim) + + # Motion and observation uncertainty are chosen relative to the current + # state estimate. These weights control the amount of uncertainty in + # the model. This is a bit hacky. + self._std_weight_position = 1. / 20 + self._std_weight_velocity = 1. / 160 + + def initiate(self, measurement): + """Create track from unassociated measurement. + + Parameters + ---------- + measurement : ndarray + Bounding box coordinates (x, y, w, h) with center position (x, y), + width w, and height h. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector (8 dimensional) and covariance matrix (8x8 + dimensional) of the new track. Unobserved velocities are initialized + to 0 mean. + + """ + mean_pos = measurement + mean_vel = np.zeros_like(mean_pos) + mean = np.r_[mean_pos, mean_vel] + + std = [ + 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], + 2 * self._std_weight_position * measurement[2], 2 * self._std_weight_position * measurement[3], + 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3], + 10 * self._std_weight_velocity * measurement[2], 10 * self._std_weight_velocity * measurement[3]] + covariance = np.diag(np.square(std)) + return mean, covariance + + def predict(self, mean, covariance): + """Run Kalman filter prediction step. + + Parameters + ---------- + mean : ndarray + The 8 dimensional mean vector of the object state at the previous + time step. + covariance : ndarray + The 8x8 dimensional covariance matrix of the object state at the + previous time step. + + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + + """ + std_pos = [ + self._std_weight_position * mean[2], self._std_weight_position * mean[3], + self._std_weight_position * mean[2], self._std_weight_position * mean[3]] + std_vel = [ + self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3], + self._std_weight_velocity * mean[2], self._std_weight_velocity * mean[3]] + motion_cov = np.diag(np.square(np.r_[std_pos, std_vel])) + + mean = np.dot(mean, self._motion_mat.T) + covariance = np.linalg.multi_dot((self._motion_mat, covariance, self._motion_mat.T)) + motion_cov + + return mean, covariance + + def project(self, mean, covariance): + """Project state distribution to measurement space. + + Parameters + ---------- + mean : ndarray + The state's mean vector (8 dimensional array). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + + Returns + ------- + (ndarray, ndarray) + Returns the projected mean and covariance matrix of the given state + estimate. + + """ + std = [ + self._std_weight_position * mean[2], self._std_weight_position * mean[3], + self._std_weight_position * mean[2], self._std_weight_position * mean[3]] + innovation_cov = np.diag(np.square(std)) + + mean = np.dot(self._update_mat, mean) + covariance = np.linalg.multi_dot((self._update_mat, covariance, self._update_mat.T)) + return mean, covariance + innovation_cov + + def multi_predict(self, mean, covariance): + """Run Kalman filter prediction step (Vectorized version). + Parameters + ---------- + mean : ndarray + The Nx8 dimensional mean matrix of the object states at the previous + time step. + covariance : ndarray + The Nx8x8 dimensional covariance matrix of the object states at the + previous time step. + Returns + ------- + (ndarray, ndarray) + Returns the mean vector and covariance matrix of the predicted + state. Unobserved velocities are initialized to 0 mean. + """ + std_pos = [ + self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3], + self._std_weight_position * mean[:, 2], self._std_weight_position * mean[:, 3]] + std_vel = [ + self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3], + self._std_weight_velocity * mean[:, 2], self._std_weight_velocity * mean[:, 3]] + sqr = np.square(np.r_[std_pos, std_vel]).T + + motion_cov = [np.diag(sqr[i]) for i in range(len(mean))] + motion_cov = np.asarray(motion_cov) + + mean = np.dot(mean, self._motion_mat.T) + left = np.dot(self._motion_mat, covariance).transpose((1, 0, 2)) + covariance = np.dot(left, self._motion_mat.T) + motion_cov + + return mean, covariance + + def update(self, mean, covariance, measurement): + """Run Kalman filter correction step. + + Parameters + ---------- + mean : ndarray + The predicted state's mean vector (8 dimensional). + covariance : ndarray + The state's covariance matrix (8x8 dimensional). + measurement : ndarray + The 4 dimensional measurement vector (x, y, w, h), where (x, y) + is the center position, w the width, and h the height of the + bounding box. + + Returns + ------- + (ndarray, ndarray) + Returns the measurement-corrected state distribution. + + """ + projected_mean, projected_cov = self.project(mean, covariance) + + chol_factor, lower = scipy.linalg.cho_factor(projected_cov, lower=True, check_finite=False) + kalman_gain = scipy.linalg.cho_solve((chol_factor, lower), + np.dot(covariance, self._update_mat.T).T, + check_finite=False).T + innovation = measurement - projected_mean + + new_mean = mean + np.dot(innovation, kalman_gain.T) + new_covariance = covariance - np.linalg.multi_dot((kalman_gain, projected_cov, kalman_gain.T)) + return new_mean, new_covariance + + def gating_distance(self, mean, covariance, measurements, only_position=False, metric='maha'): + """Compute gating distance between state distribution and measurements. + A suitable distance threshold can be obtained from `chi2inv95`. If + `only_position` is False, the chi-square distribution has 4 degrees of + freedom, otherwise 2. + Parameters + ---------- + mean : ndarray + Mean vector over the state distribution (8 dimensional). + covariance : ndarray + Covariance of the state distribution (8x8 dimensional). + measurements : ndarray + An Nx4 dimensional matrix of N measurements, each in + format (x, y, a, h) where (x, y) is the bounding box center + position, a the aspect ratio, and h the height. + only_position : Optional[bool] + If True, distance computation is done with respect to the bounding + box center position only. + Returns + ------- + ndarray + Returns an array of length N, where the i-th element contains the + squared Mahalanobis distance between (mean, covariance) and + `measurements[i]`. + """ + mean, covariance = self.project(mean, covariance) + if only_position: + mean, covariance = mean[:2], covariance[:2, :2] + measurements = measurements[:, :2] + + d = measurements - mean + if metric == 'gaussian': + return np.sum(d * d, axis=1) + elif metric == 'maha': + cholesky_factor = np.linalg.cholesky(covariance) + z = scipy.linalg.solve_triangular(cholesky_factor, d.T, lower=True, check_finite=False, overwrite_b=True) + return np.sum(z * z, axis=0) # square maha + else: + raise ValueError('invalid distance metric') diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/matching.py b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/matching.py new file mode 100644 index 0000000..a2e2488 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/tracker/utils/matching.py @@ -0,0 +1,209 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import numpy as np +import scipy +from scipy.spatial.distance import cdist + +from .kalman_filter import chi2inv95 + +try: + import lap # for linear_assignment + assert lap.__version__ # verify package is not directory +except (ImportError, AssertionError, AttributeError): + from ultralytics.yolo.utils.checks import check_requirements + + check_requirements('lap>=0.4') # install + import lap + + +def merge_matches(m1, m2, shape): + O, P, Q = shape + m1 = np.asarray(m1) + m2 = np.asarray(m2) + + M1 = scipy.sparse.coo_matrix((np.ones(len(m1)), (m1[:, 0], m1[:, 1])), shape=(O, P)) + M2 = scipy.sparse.coo_matrix((np.ones(len(m2)), (m2[:, 0], m2[:, 1])), shape=(P, Q)) + + mask = M1 * M2 + match = mask.nonzero() + match = list(zip(match[0], match[1])) + unmatched_O = tuple(set(range(O)) - {i for i, j in match}) + unmatched_Q = tuple(set(range(Q)) - {j for i, j in match}) + + return match, unmatched_O, unmatched_Q + + +def _indices_to_matches(cost_matrix, indices, thresh): + matched_cost = cost_matrix[tuple(zip(*indices))] + matched_mask = (matched_cost <= thresh) + + matches = indices[matched_mask] + unmatched_a = tuple(set(range(cost_matrix.shape[0])) - set(matches[:, 0])) + unmatched_b = tuple(set(range(cost_matrix.shape[1])) - set(matches[:, 1])) + + return matches, unmatched_a, unmatched_b + + +def linear_assignment(cost_matrix, thresh, use_lap=True): + # Linear assignment implementations with scipy and lap.lapjv + if cost_matrix.size == 0: + return np.empty((0, 2), dtype=int), tuple(range(cost_matrix.shape[0])), tuple(range(cost_matrix.shape[1])) + + if use_lap: + _, x, y = lap.lapjv(cost_matrix, extend_cost=True, cost_limit=thresh) + matches = [[ix, mx] for ix, mx in enumerate(x) if mx >= 0] + unmatched_a = np.where(x < 0)[0] + unmatched_b = np.where(y < 0)[0] + else: + # Scipy linear sum assignment is NOT working correctly, DO NOT USE + y, x = scipy.optimize.linear_sum_assignment(cost_matrix) # row y, col x + matches = np.asarray([[i, x] for i, x in enumerate(x) if cost_matrix[i, x] <= thresh]) + unmatched = np.ones(cost_matrix.shape) + for i, xi in matches: + unmatched[i, xi] = 0.0 + unmatched_a = np.where(unmatched.all(1))[0] + unmatched_b = np.where(unmatched.all(0))[0] + + return matches, unmatched_a, unmatched_b + + +def ious(atlbrs, btlbrs): + """ + Compute cost based on IoU + :type atlbrs: list[tlbr] | np.ndarray + :type atlbrs: list[tlbr] | np.ndarray + + :rtype ious np.ndarray + """ + ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32) + if ious.size == 0: + return ious + + ious = bbox_ious(np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32)) + return ious + + +def iou_distance(atracks, btracks): + """ + Compute cost based on IoU + :type atracks: list[STrack] + :type btracks: list[STrack] + + :rtype cost_matrix np.ndarray + """ + + if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) \ + or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): + atlbrs = atracks + btlbrs = btracks + else: + atlbrs = [track.tlbr for track in atracks] + btlbrs = [track.tlbr for track in btracks] + _ious = ious(atlbrs, btlbrs) + return 1 - _ious # cost matrix + + +def v_iou_distance(atracks, btracks): + """ + Compute cost based on IoU + :type atracks: list[STrack] + :type btracks: list[STrack] + + :rtype cost_matrix np.ndarray + """ + + if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) \ + or (len(btracks) > 0 and isinstance(btracks[0], np.ndarray)): + atlbrs = atracks + btlbrs = btracks + else: + atlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in atracks] + btlbrs = [track.tlwh_to_tlbr(track.pred_bbox) for track in btracks] + _ious = ious(atlbrs, btlbrs) + return 1 - _ious # cost matrix + + +def embedding_distance(tracks, detections, metric='cosine'): + """ + :param tracks: list[STrack] + :param detections: list[BaseTrack] + :param metric: + :return: cost_matrix np.ndarray + """ + + cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) + if cost_matrix.size == 0: + return cost_matrix + det_features = np.asarray([track.curr_feat for track in detections], dtype=np.float32) + # for i, track in enumerate(tracks): + # cost_matrix[i, :] = np.maximum(0.0, cdist(track.smooth_feat.reshape(1,-1), det_features, metric)) + track_features = np.asarray([track.smooth_feat for track in tracks], dtype=np.float32) + cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Normalized features + return cost_matrix + + +def gate_cost_matrix(kf, cost_matrix, tracks, detections, only_position=False): + if cost_matrix.size == 0: + return cost_matrix + gating_dim = 2 if only_position else 4 + gating_threshold = chi2inv95[gating_dim] + measurements = np.asarray([det.to_xyah() for det in detections]) + for row, track in enumerate(tracks): + gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position) + cost_matrix[row, gating_distance > gating_threshold] = np.inf + return cost_matrix + + +def fuse_motion(kf, cost_matrix, tracks, detections, only_position=False, lambda_=0.98): + if cost_matrix.size == 0: + return cost_matrix + gating_dim = 2 if only_position else 4 + gating_threshold = chi2inv95[gating_dim] + measurements = np.asarray([det.to_xyah() for det in detections]) + for row, track in enumerate(tracks): + gating_distance = kf.gating_distance(track.mean, track.covariance, measurements, only_position, metric='maha') + cost_matrix[row, gating_distance > gating_threshold] = np.inf + cost_matrix[row] = lambda_ * cost_matrix[row] + (1 - lambda_) * gating_distance + return cost_matrix + + +def fuse_iou(cost_matrix, tracks, detections): + if cost_matrix.size == 0: + return cost_matrix + reid_sim = 1 - cost_matrix + iou_dist = iou_distance(tracks, detections) + iou_sim = 1 - iou_dist + fuse_sim = reid_sim * (1 + iou_sim) / 2 + # det_scores = np.array([det.score for det in detections]) + # det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) + return 1 - fuse_sim # fuse cost + + +def fuse_score(cost_matrix, detections): + if cost_matrix.size == 0: + return cost_matrix + iou_sim = 1 - cost_matrix + det_scores = np.array([det.score for det in detections]) + det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) + fuse_sim = iou_sim * det_scores + return 1 - fuse_sim # fuse_cost + + +def bbox_ious(box1, box2, eps=1e-7): + """Boxes are x1y1x2y2 + box1: np.array of shape(nx4) + box2: np.array of shape(mx4) + returns: np.array of shape(nxm) + """ + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1.T + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T + + # Intersection area + inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \ + (np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0) + + # box2 area + box1_area = (b1_x2 - b1_x1) * (b1_y2 - b1_y1) + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + return inter_area / (box2_area + box1_area[:, None] - inter_area + eps) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/__init__.py new file mode 100644 index 0000000..13e457d --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/__init__.py @@ -0,0 +1,5 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from . import v8 + +__all__ = 'v8', # tuple or list diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/cfg/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/cfg/__init__.py new file mode 100644 index 0000000..e61a30b --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/cfg/__init__.py @@ -0,0 +1,331 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +import contextlib +import re +import shutil +import sys +from difflib import get_close_matches +from pathlib import Path +from types import SimpleNamespace +from typing import Dict, List, Union + +from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_PATH, LOGGER, ROOT, USER_CONFIG_DIR, + IterableSimpleNamespace, __version__, checks, colorstr, yaml_load, yaml_print) + +CLI_HELP_MSG = \ + f""" + Arguments received: {str(['yolo'] + sys.argv[1:])}. Ultralytics 'yolo' commands use the following syntax: + + yolo TASK MODE ARGS + + Where TASK (optional) is one of [detect, segment, classify] + MODE (required) is one of [train, val, predict, export, track] + ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults. + See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg' + + 1. Train a detection model for 10 epochs with an initial learning_rate of 0.01 + yolo train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 + + 2. Predict a YouTube video using a pretrained segmentation model at image size 320: + yolo predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + + 3. Val a pretrained detection model at batch-size 1 and image size 640: + yolo val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 + + 4. Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) + yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + + 5. Run special commands: + yolo help + yolo checks + yolo version + yolo settings + yolo copy-cfg + yolo cfg + + Docs: https://docs.ultralytics.com + Community: https://community.ultralytics.com + GitHub: https://github.com/ultralytics/ultralytics + """ + +# Define keys for arg type checks +CFG_FLOAT_KEYS = 'warmup_epochs', 'box', 'cls', 'dfl', 'degrees', 'shear', 'fl_gamma' +CFG_FRACTION_KEYS = ('dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay', 'warmup_momentum', 'warmup_bias_lr', + 'label_smoothing', 'hsv_h', 'hsv_s', 'hsv_v', 'translate', 'scale', 'perspective', 'flipud', + 'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou') # fractional floats limited to 0.0 - 1.0 +CFG_INT_KEYS = ('epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride', + 'line_thickness', 'workspace', 'nbs', 'save_period') +CFG_BOOL_KEYS = ('save', 'exist_ok', 'verbose', 'deterministic', 'single_cls', 'image_weights', 'rect', 'cos_lr', + 'overlap_mask', 'val', 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show', 'save_txt', + 'save_conf', 'save_crop', 'hide_labels', 'hide_conf', 'visualize', 'augment', 'agnostic_nms', + 'retina_masks', 'boxes', 'keras', 'optimize', 'int8', 'dynamic', 'simplify', 'nms', 'v5loader') + +# Define valid tasks and modes +MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark' +TASKS = 'detect', 'segment', 'classify' +TASK2DATA = {'detect': 'coco128.yaml', 'segment': 'coco128-seg.yaml', 'classify': 'imagenet100'} +TASK2MODEL = {'detect': 'yolov8n.pt', 'segment': 'yolov8n-seg.pt', 'classify': 'yolov8n-cls.pt'} + + +def cfg2dict(cfg): + """ + Convert a configuration object to a dictionary, whether it is a file path, a string, or a SimpleNamespace object. + + Inputs: + cfg (str) or (Path) or (SimpleNamespace): Configuration object to be converted to a dictionary. + + Returns: + cfg (dict): Configuration object in dictionary format. + """ + if isinstance(cfg, (str, Path)): + cfg = yaml_load(cfg) # load dict + elif isinstance(cfg, SimpleNamespace): + cfg = vars(cfg) # convert to dict + return cfg + + +def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, overrides: Dict = None): + """ + Load and merge configuration data from a file or dictionary. + + Args: + cfg (str) or (Path) or (Dict) or (SimpleNamespace): Configuration data. + overrides (str) or (Dict), optional: Overrides in the form of a file name or a dictionary. Default is None. + + Returns: + (SimpleNamespace): Training arguments namespace. + """ + cfg = cfg2dict(cfg) + + # Merge overrides + if overrides: + overrides = cfg2dict(overrides) + check_cfg_mismatch(cfg, overrides) + cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides) + + # Special handling for numeric project/names + for k in 'project', 'name': + if k in cfg and isinstance(cfg[k], (int, float)): + cfg[k] = str(cfg[k]) + + # Type and Value checks + for k, v in cfg.items(): + if v is not None: # None values may be from optional args + if k in CFG_FLOAT_KEYS and not isinstance(v, (int, float)): + raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')") + elif k in CFG_FRACTION_KEYS: + if not isinstance(v, (int, float)): + raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')") + if not (0.0 <= v <= 1.0): + raise ValueError(f"'{k}={v}' is an invalid value. " + f"Valid '{k}' values are between 0.0 and 1.0.") + elif k in CFG_INT_KEYS and not isinstance(v, int): + raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"'{k}' must be an int (i.e. '{k}=8')") + elif k in CFG_BOOL_KEYS and not isinstance(v, bool): + raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. " + f"'{k}' must be a bool (i.e. '{k}=True' or '{k}=False')") + + # Return instance + return IterableSimpleNamespace(**cfg) + + +def check_cfg_mismatch(base: Dict, custom: Dict, e=None): + """ + This function checks for any mismatched keys between a custom configuration list and a base configuration list. + If any mismatched keys are found, the function prints out similar keys from the base list and exits the program. + + Inputs: + - custom (Dict): a dictionary of custom configuration options + - base (Dict): a dictionary of base configuration options + """ + base, custom = (set(x.keys()) for x in (base, custom)) + mismatched = [x for x in custom if x not in base] + if mismatched: + string = '' + for x in mismatched: + matches = get_close_matches(x, base) # key list + matches = [f'{k}={DEFAULT_CFG_DICT[k]}' if DEFAULT_CFG_DICT.get(k) is not None else k for k in matches] + match_str = f'Similar arguments are i.e. {matches}.' if matches else '' + string += f"'{colorstr('red', 'bold', x)}' is not a valid YOLO argument. {match_str}\n" + raise SyntaxError(string + CLI_HELP_MSG) from e + + +def merge_equals_args(args: List[str]) -> List[str]: + """ + Merges arguments around isolated '=' args in a list of strings. + The function considers cases where the first argument ends with '=' or the second starts with '=', + as well as when the middle one is an equals sign. + + Args: + args (List[str]): A list of strings where each element is an argument. + + Returns: + List[str]: A list of strings where the arguments around isolated '=' are merged. + """ + new_args = [] + for i, arg in enumerate(args): + if arg == '=' and 0 < i < len(args) - 1: # merge ['arg', '=', 'val'] + new_args[-1] += f'={args[i + 1]}' + del args[i + 1] + elif arg.endswith('=') and i < len(args) - 1 and '=' not in args[i + 1]: # merge ['arg=', 'val'] + new_args.append(f'{arg}{args[i + 1]}') + del args[i + 1] + elif arg.startswith('=') and i > 0: # merge ['arg', '=val'] + new_args[-1] += arg + else: + new_args.append(arg) + return new_args + + +def entrypoint(debug=''): + """ + This function is the ultralytics package entrypoint, it's responsible for parsing the command line arguments passed + to the package. + + This function allows for: + - passing mandatory YOLO args as a list of strings + - specifying the task to be performed, either 'detect', 'segment' or 'classify' + - specifying the mode, either 'train', 'val', 'test', or 'predict' + - running special modes like 'checks' + - passing overrides to the package's configuration + + It uses the package's default cfg and initializes it using the passed overrides. + Then it calls the CLI function with the composed cfg + """ + args = (debug.split(' ') if debug else sys.argv)[1:] + if not args: # no arguments passed + LOGGER.info(CLI_HELP_MSG) + return + + special = { + 'help': lambda: LOGGER.info(CLI_HELP_MSG), + 'checks': checks.check_yolo, + 'version': lambda: LOGGER.info(__version__), + 'settings': lambda: yaml_print(USER_CONFIG_DIR / 'settings.yaml'), + 'cfg': lambda: yaml_print(DEFAULT_CFG_PATH), + 'copy-cfg': copy_default_cfg} + full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special} + + # Define common mis-uses of special commands, i.e. -h, -help, --help + special.update({k[0]: v for k, v in special.items()}) # singular + special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith('s')}) # singular + special = {**special, **{f'-{k}': v for k, v in special.items()}, **{f'--{k}': v for k, v in special.items()}} + + overrides = {} # basic overrides, i.e. imgsz=320 + for a in merge_equals_args(args): # merge spaces around '=' sign + if a.startswith('--'): + LOGGER.warning(f"WARNING ⚠️ '{a}' does not require leading dashes '--', updating to '{a[2:]}'.") + a = a[2:] + if a.endswith(','): + LOGGER.warning(f"WARNING ⚠️ '{a}' does not require trailing comma ',', updating to '{a[:-1]}'.") + a = a[:-1] + if '=' in a: + try: + re.sub(r' *= *', '=', a) # remove spaces around equals sign + k, v = a.split('=', 1) # split on first '=' sign + assert v, f"missing '{k}' value" + if k == 'cfg': # custom.yaml passed + LOGGER.info(f'Overriding {DEFAULT_CFG_PATH} with {v}') + overrides = {k: val for k, val in yaml_load(checks.check_yaml(v)).items() if k != 'cfg'} + else: + if v.lower() == 'none': + v = None + elif v.lower() == 'true': + v = True + elif v.lower() == 'false': + v = False + else: + with contextlib.suppress(Exception): + v = eval(v) + overrides[k] = v + except (NameError, SyntaxError, ValueError, AssertionError) as e: + check_cfg_mismatch(full_args_dict, {a: ''}, e) + + elif a in TASKS: + overrides['task'] = a + elif a in MODES: + overrides['mode'] = a + elif a in special: + special[a]() + return + elif a in DEFAULT_CFG_DICT and isinstance(DEFAULT_CFG_DICT[a], bool): + overrides[a] = True # auto-True for default bool args, i.e. 'yolo show' sets show=True + elif a in DEFAULT_CFG_DICT: + raise SyntaxError(f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign " + f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}") + else: + check_cfg_mismatch(full_args_dict, {a: ''}) + + # Check keys + check_cfg_mismatch(full_args_dict, overrides) + + # Mode + mode = overrides.get('mode', None) + if mode is None: + mode = DEFAULT_CFG.mode or 'predict' + LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.") + elif mode not in MODES: + if mode not in ('checks', checks): + raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}") + LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.") + checks.check_yolo() + return + + # Task + task = overrides.pop('task', None) + if task: + if task not in TASKS: + raise ValueError(f"Invalid 'task={task}'. Valid tasks are {TASKS}.\n{CLI_HELP_MSG}") + if 'model' not in overrides: + overrides['model'] = TASK2MODEL[task] + + # Model + model = overrides.pop('model', DEFAULT_CFG.model) + if model is None: + model = 'yolov8n.pt' + LOGGER.warning(f"WARNING ⚠️ 'model' is missing. Using default 'model={model}'.") + from ultralytics.yolo.engine.model import YOLO + overrides['model'] = model + model = YOLO(model, task=task) + if isinstance(overrides.get('pretrained'), str): + model.load(overrides['pretrained']) + + # Task Update + if task != model.task: + if task: + LOGGER.warning(f"WARNING ⚠️ conflicting 'task={task}' passed with 'task={model.task}' model. " + f"Ignoring 'task={task}' and updating to 'task={model.task}' to match model.") + task = model.task + + # Mode + if mode in ('predict', 'track') and 'source' not in overrides: + overrides['source'] = DEFAULT_CFG.source or ROOT / 'assets' if (ROOT / 'assets').exists() \ + else 'https://ultralytics.com/images/bus.jpg' + LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using default 'source={overrides['source']}'.") + elif mode in ('train', 'val'): + if 'data' not in overrides: + overrides['data'] = TASK2DATA.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data) + LOGGER.warning(f"WARNING ⚠️ 'data' is missing. Using default 'data={overrides['data']}'.") + elif mode == 'export': + if 'format' not in overrides: + overrides['format'] = DEFAULT_CFG.format or 'torchscript' + LOGGER.warning(f"WARNING ⚠️ 'format' is missing. Using default 'format={overrides['format']}'.") + + # Run command in python + # getattr(model, mode)(**vars(get_cfg(overrides=overrides))) # default args using default.yaml + getattr(model, mode)(**overrides) # default args from model + + +# Special modes -------------------------------------------------------------------------------------------------------- +def copy_default_cfg(): + new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace('.yaml', '_copy.yaml') + shutil.copy2(DEFAULT_CFG_PATH, new_file) + LOGGER.info(f'{DEFAULT_CFG_PATH} copied to {new_file}\n' + f"Example YOLO command with this new custom cfg:\n yolo cfg='{new_file}' imgsz=320 batch=8") + + +if __name__ == '__main__': + # entrypoint(debug='yolo predict model=yolov8n.pt') + entrypoint(debug='') diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/cfg/default.yaml b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/cfg/default.yaml new file mode 100644 index 0000000..f6df38b --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/cfg/default.yaml @@ -0,0 +1,115 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Default training settings and hyperparameters for medium-augmentation COCO training + +task: detect # YOLO task, i.e. detect, segment, classify, pose +mode: train # YOLO mode, i.e. train, val, predict, export, track, benchmark + +# Train settings ------------------------------------------------------------------------------------------------------- +model: # path to model file, i.e. yolov8n.pt, yolov8n.yaml +data: # path to data file, i.e. coco128.yaml +epochs: 100 # number of epochs to train for +patience: 50 # epochs to wait for no observable improvement for early stopping of training +batch: 16 # number of images per batch (-1 for AutoBatch) +imgsz: 640 # size of input images as integer or w,h +save: True # save train checkpoints and predict results +save_period: -1 # Save checkpoint every x epochs (disabled if < 1) +cache: False # True/ram, disk or False. Use cache for data loading +device: # device to run on, i.e. cuda device=0 or device=0,1,2,3 or device=cpu +workers: 8 # number of worker threads for data loading (per RANK if DDP) +project: # project name +name: # experiment name, results saved to 'project/name' directory +exist_ok: False # whether to overwrite existing experiment +pretrained: False # whether to use a pretrained model +optimizer: SGD # optimizer to use, choices=['SGD', 'Adam', 'AdamW', 'RMSProp'] +verbose: True # whether to print verbose output +seed: 0 # random seed for reproducibility +deterministic: True # whether to enable deterministic mode +single_cls: False # train multi-class data as single-class +image_weights: False # use weighted image selection for training +rect: False # support rectangular training if mode='train', support rectangular evaluation if mode='val' +cos_lr: False # use cosine learning rate scheduler +close_mosaic: 10 # disable mosaic augmentation for final 10 epochs +resume: False # resume training from last checkpoint +amp: True # Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check +# Segmentation +overlap_mask: True # masks should overlap during training (segment train only) +mask_ratio: 4 # mask downsample ratio (segment train only) +# Classification +dropout: 0.0 # use dropout regularization (classify train only) + +# Val/Test settings ---------------------------------------------------------------------------------------------------- +val: True # validate/test during training +split: val # dataset split to use for validation, i.e. 'val', 'test' or 'train' +save_json: False # save results to JSON file +save_hybrid: False # save hybrid version of labels (labels + additional predictions) +conf: # object confidence threshold for detection (default 0.25 predict, 0.001 val) +iou: 0.7 # intersection over union (IoU) threshold for NMS +max_det: 300 # maximum number of detections per image +half: False # use half precision (FP16) +dnn: False # use OpenCV DNN for ONNX inference +plots: True # save plots during train/val + +# Prediction settings -------------------------------------------------------------------------------------------------- +source: # source directory for images or videos +show: False # show results if possible +save_txt: False # save results as .txt file +save_conf: False # save results with confidence scores +save_crop: False # save cropped images with results +hide_labels: False # hide labels +hide_conf: False # hide confidence scores +vid_stride: 1 # video frame-rate stride +line_thickness: 3 # bounding box thickness (pixels) +visualize: False # visualize model features +augment: False # apply image augmentation to prediction sources +agnostic_nms: False # class-agnostic NMS +classes: # filter results by class, i.e. class=0, or class=[0,2,3] +retina_masks: False # use high-resolution segmentation masks +boxes: True # Show boxes in segmentation predictions + +# Export settings ------------------------------------------------------------------------------------------------------ +format: torchscript # format to export to +keras: False # use Keras +optimize: False # TorchScript: optimize for mobile +int8: False # CoreML/TF INT8 quantization +dynamic: False # ONNX/TF/TensorRT: dynamic axes +simplify: False # ONNX: simplify model +opset: # ONNX: opset version (optional) +workspace: 4 # TensorRT: workspace size (GB) +nms: False # CoreML: add NMS + +# Hyperparameters ------------------------------------------------------------------------------------------------------ +lr0: 0.01 # initial learning rate (i.e. SGD=1E-2, Adam=1E-3) +lrf: 0.01 # final learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 7.5 # box loss gain +cls: 0.5 # cls loss gain (scale with pixels) +dfl: 1.5 # dfl loss gain +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +label_smoothing: 0.0 # label smoothing (fraction) +nbs: 64 # nominal batch size +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) + +# Custom config.yaml --------------------------------------------------------------------------------------------------- +cfg: # for overriding defaults.yaml + +# Debug, do not modify ------------------------------------------------------------------------------------------------- +v5loader: False # use legacy YOLOv5 dataloader + +# Tracker settings ------------------------------------------------------------------------------------------------------ +tracker: botsort.yaml # tracker type, ['botsort.yaml', 'bytetrack.yaml'] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/__init__.py new file mode 100644 index 0000000..a7bf7fa --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/__init__.py @@ -0,0 +1,9 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from .base import BaseDataset +from .build import build_classification_dataloader, build_dataloader, load_inference_source +from .dataset import ClassificationDataset, SemanticDataset, YOLODataset +from .dataset_wrappers import MixAndRectDataset + +__all__ = ('BaseDataset', 'ClassificationDataset', 'MixAndRectDataset', 'SemanticDataset', 'YOLODataset', + 'build_classification_dataloader', 'build_dataloader', 'load_inference_source') diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/augment.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/augment.py new file mode 100644 index 0000000..1658e12 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/augment.py @@ -0,0 +1,781 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import math +import random +from copy import deepcopy + +import cv2 +import numpy as np +import torch +import torchvision.transforms as T + +from ..utils import LOGGER, colorstr +from ..utils.checks import check_version +from ..utils.instance import Instances +from ..utils.metrics import bbox_ioa +from ..utils.ops import segment2box +from .utils import polygons2masks, polygons2masks_overlap + + +# TODO: we might need a BaseTransform to make all these augments be compatible with both classification and semantic +class BaseTransform: + + def __init__(self) -> None: + pass + + def apply_image(self, labels): + pass + + def apply_instances(self, labels): + pass + + def apply_semantic(self, labels): + pass + + def __call__(self, labels): + self.apply_image(labels) + self.apply_instances(labels) + self.apply_semantic(labels) + + +class Compose: + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, data): + for t in self.transforms: + data = t(data) + return data + + def append(self, transform): + self.transforms.append(transform) + + def tolist(self): + return self.transforms + + def __repr__(self): + format_string = f'{self.__class__.__name__}(' + for t in self.transforms: + format_string += '\n' + format_string += f' {t}' + format_string += '\n)' + return format_string + + +class BaseMixTransform: + """This implementation is from mmyolo""" + + def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + self.dataset = dataset + self.pre_transform = pre_transform + self.p = p + + def __call__(self, labels): + if random.uniform(0, 1) > self.p: + return labels + + # get index of one or three other images + indexes = self.get_indexes() + if isinstance(indexes, int): + indexes = [indexes] + + # get images information will be used for Mosaic or MixUp + mix_labels = [self.dataset.get_label_info(i) for i in indexes] + + if self.pre_transform is not None: + for i, data in enumerate(mix_labels): + mix_labels[i] = self.pre_transform(data) + labels['mix_labels'] = mix_labels + + # Mosaic or MixUp + labels = self._mix_transform(labels) + labels.pop('mix_labels', None) + return labels + + def _mix_transform(self, labels): + raise NotImplementedError + + def get_indexes(self): + raise NotImplementedError + + +class Mosaic(BaseMixTransform): + """Mosaic augmentation. + Args: + imgsz (Sequence[int]): Image size after mosaic pipeline of single + image. The shape order should be (height, width). + Default to (640, 640). + """ + + def __init__(self, dataset, imgsz=640, p=1.0, border=(0, 0)): + assert 0 <= p <= 1.0, 'The probability should be in range [0, 1]. ' f'got {p}.' + super().__init__(dataset=dataset, p=p) + self.dataset = dataset + self.imgsz = imgsz + self.border = border + + def get_indexes(self): + return [random.randint(0, len(self.dataset) - 1) for _ in range(3)] + + def _mix_transform(self, labels): + mosaic_labels = [] + assert labels.get('rect_shape', None) is None, 'rect and mosaic is exclusive.' + assert len(labels.get('mix_labels', [])) > 0, 'There are no other images for mosaic augment.' + s = self.imgsz + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.border) # mosaic center x, y + for i in range(4): + labels_patch = (labels if i == 0 else labels['mix_labels'][i - 1]).copy() + # Load image + img = labels_patch['img'] + h, w = labels_patch.pop('resized_shape') + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels_patch = self._update_labels(labels_patch, padw, padh) + mosaic_labels.append(labels_patch) + final_labels = self._cat_labels(mosaic_labels) + final_labels['img'] = img4 + return final_labels + + def _update_labels(self, labels, padw, padh): + """Update labels""" + nh, nw = labels['img'].shape[:2] + labels['instances'].convert_bbox(format='xyxy') + labels['instances'].denormalize(nw, nh) + labels['instances'].add_padding(padw, padh) + return labels + + def _cat_labels(self, mosaic_labels): + if len(mosaic_labels) == 0: + return {} + cls = [] + instances = [] + for labels in mosaic_labels: + cls.append(labels['cls']) + instances.append(labels['instances']) + final_labels = { + 'im_file': mosaic_labels[0]['im_file'], + 'ori_shape': mosaic_labels[0]['ori_shape'], + 'resized_shape': (self.imgsz * 2, self.imgsz * 2), + 'cls': np.concatenate(cls, 0), + 'instances': Instances.concatenate(instances, axis=0), + 'mosaic_border': self.border} + final_labels['instances'].clip(self.imgsz * 2, self.imgsz * 2) + return final_labels + + +class MixUp(BaseMixTransform): + + def __init__(self, dataset, pre_transform=None, p=0.0) -> None: + super().__init__(dataset=dataset, pre_transform=pre_transform, p=p) + + def get_indexes(self): + return random.randint(0, len(self.dataset) - 1) + + def _mix_transform(self, labels): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + labels2 = labels['mix_labels'][0] + labels['img'] = (labels['img'] * r + labels2['img'] * (1 - r)).astype(np.uint8) + labels['instances'] = Instances.concatenate([labels['instances'], labels2['instances']], axis=0) + labels['cls'] = np.concatenate([labels['cls'], labels2['cls']], 0) + return labels + + +class RandomPerspective: + + def __init__(self, + degrees=0.0, + translate=0.1, + scale=0.5, + shear=0.0, + perspective=0.0, + border=(0, 0), + pre_transform=None): + self.degrees = degrees + self.translate = translate + self.scale = scale + self.shear = shear + self.perspective = perspective + # mosaic border + self.border = border + self.pre_transform = pre_transform + + def affine_transform(self, img, border): + # Center + C = np.eye(3) + + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-self.perspective, self.perspective) # x perspective (about y) + P[2, 1] = random.uniform(-self.perspective, self.perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-self.degrees, self.degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - self.scale, 1 + self.scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-self.shear, self.shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[0] # x translation (pixels) + T[1, 2] = random.uniform(0.5 - self.translate, 0.5 + self.translate) * self.size[1] # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + # affine image + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if self.perspective: + img = cv2.warpPerspective(img, M, dsize=self.size, borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=self.size, borderValue=(114, 114, 114)) + return img, M, s + + def apply_bboxes(self, bboxes, M): + """apply affine to bboxes only. + + Args: + bboxes(ndarray): list of bboxes, xyxy format, with shape (num_bboxes, 4). + M(ndarray): affine matrix. + Returns: + new_bboxes(ndarray): bboxes after affine, [num_bboxes, 4]. + """ + n = len(bboxes) + if n == 0: + return bboxes + + xy = np.ones((n * 4, 3)) + xy[:, :2] = bboxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if self.perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + return np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + def apply_segments(self, segments, M): + """apply affine to segments and generate new bboxes from segments. + + Args: + segments(ndarray): list of segments, [num_samples, 500, 2]. + M(ndarray): affine matrix. + Returns: + new_segments(ndarray): list of segments after affine, [num_samples, 500, 2]. + new_bboxes(ndarray): bboxes after affine, [N, 4]. + """ + n, num = segments.shape[:2] + if n == 0: + return [], segments + + xy = np.ones((n * num, 3)) + segments = segments.reshape(-1, 2) + xy[:, :2] = segments + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] + segments = xy.reshape(n, -1, 2) + bboxes = np.stack([segment2box(xy, self.size[0], self.size[1]) for xy in segments], 0) + return bboxes, segments + + def apply_keypoints(self, keypoints, M): + """apply affine to keypoints. + + Args: + keypoints(ndarray): keypoints, [N, 17, 2]. + M(ndarray): affine matrix. + Return: + new_keypoints(ndarray): keypoints after affine, [N, 17, 2]. + """ + n = len(keypoints) + if n == 0: + return keypoints + new_keypoints = np.ones((n * 17, 3)) + new_keypoints[:, :2] = keypoints.reshape(n * 17, 2) # num_kpt is hardcoded to 17 + new_keypoints = new_keypoints @ M.T # transform + new_keypoints = (new_keypoints[:, :2] / new_keypoints[:, 2:3]).reshape(n, 34) # perspective rescale or affine + new_keypoints[keypoints.reshape(-1, 34) == 0] = 0 + x_kpts = new_keypoints[:, list(range(0, 34, 2))] + y_kpts = new_keypoints[:, list(range(1, 34, 2))] + + x_kpts[np.logical_or.reduce((x_kpts < 0, x_kpts > self.size[0], y_kpts < 0, y_kpts > self.size[1]))] = 0 + y_kpts[np.logical_or.reduce((x_kpts < 0, x_kpts > self.size[0], y_kpts < 0, y_kpts > self.size[1]))] = 0 + new_keypoints[:, list(range(0, 34, 2))] = x_kpts + new_keypoints[:, list(range(1, 34, 2))] = y_kpts + return new_keypoints.reshape(n, 17, 2) + + def __call__(self, labels): + """ + Affine images and targets. + + Args: + labels(Dict): a dict of `bboxes`, `segments`, `keypoints`. + """ + if self.pre_transform and 'mosaic_border' not in labels: + labels = self.pre_transform(labels) + labels.pop('ratio_pad') # do not need ratio pad + + img = labels['img'] + cls = labels['cls'] + instances = labels.pop('instances') + # make sure the coord formats are right + instances.convert_bbox(format='xyxy') + instances.denormalize(*img.shape[:2][::-1]) + + border = labels.pop('mosaic_border', self.border) + self.size = img.shape[1] + border[1] * 2, img.shape[0] + border[0] * 2 # w, h + # M is affine matrix + # scale for func:`box_candidates` + img, M, scale = self.affine_transform(img, border) + + bboxes = self.apply_bboxes(instances.bboxes, M) + + segments = instances.segments + keypoints = instances.keypoints + # update bboxes if there are segments. + if len(segments): + bboxes, segments = self.apply_segments(segments, M) + + if keypoints is not None: + keypoints = self.apply_keypoints(keypoints, M) + new_instances = Instances(bboxes, segments, keypoints, bbox_format='xyxy', normalized=False) + # clip + new_instances.clip(*self.size) + + # filter instances + instances.scale(scale_w=scale, scale_h=scale, bbox_only=True) + # make the bboxes have the same scale with new_bboxes + i = self.box_candidates(box1=instances.bboxes.T, + box2=new_instances.bboxes.T, + area_thr=0.01 if len(segments) else 0.10) + labels['instances'] = new_instances[i] + labels['cls'] = cls[i] + labels['img'] = img + labels['resized_shape'] = img.shape[:2] + return labels + + def box_candidates(self, box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute box candidates: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +class RandomHSV: + + def __init__(self, hgain=0.5, sgain=0.5, vgain=0.5) -> None: + self.hgain = hgain + self.sgain = sgain + self.vgain = vgain + + def __call__(self, labels): + img = labels['img'] + if self.hgain or self.sgain or self.vgain: + r = np.random.uniform(-1, 1, 3) * [self.hgain, self.sgain, self.vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + return labels + + +class RandomFlip: + + def __init__(self, p=0.5, direction='horizontal') -> None: + assert direction in ['horizontal', 'vertical'], f'Support direction `horizontal` or `vertical`, got {direction}' + assert 0 <= p <= 1.0 + + self.p = p + self.direction = direction + + def __call__(self, labels): + img = labels['img'] + instances = labels.pop('instances') + instances.convert_bbox(format='xywh') + h, w = img.shape[:2] + h = 1 if instances.normalized else h + w = 1 if instances.normalized else w + + # Flip up-down + if self.direction == 'vertical' and random.random() < self.p: + img = np.flipud(img) + instances.flipud(h) + if self.direction == 'horizontal' and random.random() < self.p: + img = np.fliplr(img) + instances.fliplr(w) + labels['img'] = np.ascontiguousarray(img) + labels['instances'] = instances + return labels + + +class LetterBox: + """Resize image and padding for detection, instance segmentation, pose""" + + def __init__(self, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, stride=32): + self.new_shape = new_shape + self.auto = auto + self.scaleFill = scaleFill + self.scaleup = scaleup + self.stride = stride + + def __call__(self, labels=None, image=None): + if labels is None: + labels = {} + img = labels.get('img') if image is None else image + shape = img.shape[:2] # current shape [height, width] + new_shape = labels.pop('rect_shape', self.new_shape) + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not self.scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if self.auto: # minimum rectangle + dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride) # wh padding + elif self.scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + if labels.get('ratio_pad'): + labels['ratio_pad'] = (labels['ratio_pad'], (dw, dh)) # for evaluation + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, + value=(114, 114, 114)) # add border + + if len(labels): + labels = self._update_labels(labels, ratio, dw, dh) + labels['img'] = img + labels['resized_shape'] = new_shape + return labels + else: + return img + + def _update_labels(self, labels, ratio, padw, padh): + """Update labels""" + labels['instances'].convert_bbox(format='xyxy') + labels['instances'].denormalize(*labels['img'].shape[:2][::-1]) + labels['instances'].scale(*ratio) + labels['instances'].add_padding(padw, padh) + return labels + + +class CopyPaste: + + def __init__(self, p=0.5) -> None: + self.p = p + + def __call__(self, labels): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + im = labels['img'] + cls = labels['cls'] + h, w = im.shape[:2] + instances = labels.pop('instances') + instances.convert_bbox(format='xyxy') + instances.denormalize(w, h) + if self.p and len(instances.segments): + n = len(instances) + _, w, _ = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + + # calculate ioa first then select indexes randomly + ins_flip = deepcopy(instances) + ins_flip.fliplr(w) + + ioa = bbox_ioa(ins_flip.bboxes, instances.bboxes) # intersection over area, (N, M) + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + for j in random.sample(list(indexes), k=round(self.p * n)): + cls = np.concatenate((cls, cls[[j]]), axis=0) + instances = Instances.concatenate((instances, ins_flip[[j]]), axis=0) + cv2.drawContours(im_new, instances.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + labels['img'] = im + labels['cls'] = cls + labels['instances'] = instances + return labels + + +class Albumentations: + # YOLOv8 Albumentations class (optional, only used if package is installed) + def __init__(self, p=1.0): + self.p = p + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, labels): + im = labels['img'] + cls = labels['cls'] + if len(cls): + labels['instances'].convert_bbox('xywh') + labels['instances'].normalize(*im.shape[:2][::-1]) + bboxes = labels['instances'].bboxes + # TODO: add supports of segments and keypoints + if self.transform and random.random() < self.p: + new = self.transform(image=im, bboxes=bboxes, class_labels=cls) # transformed + if len(new['class_labels']) > 0: # skip update if no bbox in new im + labels['img'] = new['image'] + labels['cls'] = np.array(new['class_labels']) + bboxes = np.array(new['bboxes']) + labels['instances'].update(bboxes=bboxes) + return labels + + +# TODO: technically this is not an augmentation, maybe we should put this to another files +class Format: + + def __init__(self, + bbox_format='xywh', + normalize=True, + return_mask=False, + return_keypoint=False, + mask_ratio=4, + mask_overlap=True, + batch_idx=True): + self.bbox_format = bbox_format + self.normalize = normalize + self.return_mask = return_mask # set False when training detection only + self.return_keypoint = return_keypoint + self.mask_ratio = mask_ratio + self.mask_overlap = mask_overlap + self.batch_idx = batch_idx # keep the batch indexes + + def __call__(self, labels): + img = labels.pop('img') + h, w = img.shape[:2] + cls = labels.pop('cls') + instances = labels.pop('instances') + instances.convert_bbox(format=self.bbox_format) + instances.denormalize(w, h) + nl = len(instances) + + if self.return_mask: + if nl: + masks, instances, cls = self._format_segments(instances, cls, w, h) + masks = torch.from_numpy(masks) + else: + masks = torch.zeros(1 if self.mask_overlap else nl, img.shape[0] // self.mask_ratio, + img.shape[1] // self.mask_ratio) + labels['masks'] = masks + if self.normalize: + instances.normalize(w, h) + labels['img'] = self._format_img(img) + labels['cls'] = torch.from_numpy(cls) if nl else torch.zeros(nl) + labels['bboxes'] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4)) + if self.return_keypoint: + labels['keypoints'] = torch.from_numpy(instances.keypoints) if nl else torch.zeros((nl, 17, 2)) + # then we can use collate_fn + if self.batch_idx: + labels['batch_idx'] = torch.zeros(nl) + return labels + + def _format_img(self, img): + if len(img.shape) < 3: + img = np.expand_dims(img, -1) + img = np.ascontiguousarray(img.transpose(2, 0, 1)[::-1]) + img = torch.from_numpy(img) + return img + + def _format_segments(self, instances, cls, w, h): + """convert polygon points to bitmap""" + segments = instances.segments + if self.mask_overlap: + masks, sorted_idx = polygons2masks_overlap((h, w), segments, downsample_ratio=self.mask_ratio) + masks = masks[None] # (640, 640) -> (1, 640, 640) + instances = instances[sorted_idx] + cls = cls[sorted_idx] + else: + masks = polygons2masks((h, w), segments, color=1, downsample_ratio=self.mask_ratio) + + return masks, instances, cls + + +def v8_transforms(dataset, imgsz, hyp): + pre_transform = Compose([ + Mosaic(dataset, imgsz=imgsz, p=hyp.mosaic, border=[-imgsz // 2, -imgsz // 2]), + CopyPaste(p=hyp.copy_paste), + RandomPerspective( + degrees=hyp.degrees, + translate=hyp.translate, + scale=hyp.scale, + shear=hyp.shear, + perspective=hyp.perspective, + pre_transform=LetterBox(new_shape=(imgsz, imgsz)), + )]) + return Compose([ + pre_transform, + MixUp(dataset, pre_transform=pre_transform, p=hyp.mixup), + Albumentations(p=1.0), + RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v), + RandomFlip(direction='vertical', p=hyp.flipud), + RandomFlip(direction='horizontal', p=hyp.fliplr)]) # transforms + + +# Classification augmentations ----------------------------------------------------------------------------------------- +def classify_transforms(size=224, mean=(0.0, 0.0, 0.0), std=(1.0, 1.0, 1.0)): # IMAGENET_MEAN, IMAGENET_STD + # Transforms to apply if albumentations not installed + if not isinstance(size, int): + raise TypeError(f'classify_transforms() size {size} must be integer, not (list, tuple)') + if any(mean) or any(std): + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(mean, std, inplace=True)]) + else: + return T.Compose([CenterCrop(size), ToTensor()]) + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=(0.0, 0.0, 0.0), # IMAGENET_MEAN + std=(1.0, 1.0, 1.0), # IMAGENET_STD + auto_aug=False, +): + # YOLOv8 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + jitter = float(jitter) + T += [A.ColorJitter(jitter, jitter, jitter, 0)] # brightness, contrast, saturation, 0 hue + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +class ClassifyLetterBox: + # YOLOv8 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv8 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv8 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/base.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/base.py new file mode 100644 index 0000000..f9fd90f --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/base.py @@ -0,0 +1,225 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import glob +import math +import os +from multiprocessing.pool import ThreadPool +from pathlib import Path +from typing import Optional + +import cv2 +import numpy as np +from torch.utils.data import Dataset +from tqdm import tqdm + +from ..utils import LOCAL_RANK, NUM_THREADS, TQDM_BAR_FORMAT +from .utils import HELP_URL, IMG_FORMATS + + +class BaseDataset(Dataset): + """Base Dataset. + Args: + img_path (str): image path. + pipeline (dict): a dict of image transforms. + label_path (str): label path, this can also be an ann_file or other custom label path. + """ + + def __init__(self, + img_path, + imgsz=640, + cache=False, + augment=True, + hyp=None, + prefix='', + rect=False, + batch_size=None, + stride=32, + pad=0.5, + single_cls=False, + classes=None): + super().__init__() + self.img_path = img_path + self.imgsz = imgsz + self.augment = augment + self.single_cls = single_cls + self.prefix = prefix + + self.im_files = self.get_img_files(self.img_path) + self.labels = self.get_labels() + self.update_labels(include_class=classes) # single_cls and include_class + + self.ni = len(self.labels) + + # rect stuff + self.rect = rect + self.batch_size = batch_size + self.stride = stride + self.pad = pad + if self.rect: + assert self.batch_size is not None + self.set_rectangle() + + # cache stuff + self.ims = [None] * self.ni + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache: + self.cache_images(cache) + + # transforms + self.transforms = self.build_transforms(hyp=hyp) + + def get_img_files(self, img_path): + """Read image files.""" + try: + f = [] # image files + for p in img_path if isinstance(img_path, list) else [img_path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise FileNotFoundError(f'{self.prefix}{p} does not exist') + im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert im_files, f'{self.prefix}No images found' + except Exception as e: + raise FileNotFoundError(f'{self.prefix}Error loading data from {img_path}\n{HELP_URL}') from e + return im_files + + def update_labels(self, include_class: Optional[list]): + """include_class, filter labels to include only these classes (optional)""" + include_class_array = np.array(include_class).reshape(1, -1) + for i in range(len(self.labels)): + if include_class is not None: + cls = self.labels[i]['cls'] + bboxes = self.labels[i]['bboxes'] + segments = self.labels[i]['segments'] + j = (cls == include_class_array).any(1) + self.labels[i]['cls'] = cls[j] + self.labels[i]['bboxes'] = bboxes[j] + if segments: + self.labels[i]['segments'] = [segments[si] for si, idx in enumerate(j) if idx] + if self.single_cls: + self.labels[i]['cls'][:, 0] = 0 + + def load_image(self, i): + # Loads 1 image from dataset index 'i', returns (im, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i] + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if im is None: + raise FileNotFoundError(f'Image Not Found {f}') + h0, w0 = im.shape[:2] # orig hw + r = self.imgsz / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images(self, cache): + # cache images to memory or disk + gb = 0 # Gigabytes of cached images + self.im_hw0, self.im_hw = [None] * self.ni, [None] * self.ni + fcn = self.cache_images_to_disk if cache == 'disk' else self.load_image + with ThreadPool(NUM_THREADS) as pool: + results = pool.imap(fcn, range(self.ni)) + pbar = tqdm(enumerate(results), total=self.ni, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache == 'disk': + gb += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + gb += self.ims[i].nbytes + pbar.desc = f'{self.prefix}Caching images ({gb / 1E9:.1f}GB {cache})' + pbar.close() + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def set_rectangle(self): + bi = np.floor(np.arange(self.ni) / self.batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + + s = np.array([x.pop('shape') for x in self.labels]) # hw + ar = s[:, 0] / s[:, 1] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * self.imgsz / self.stride + self.pad).astype(int) * self.stride + self.batch = bi # batch index of image + + def __getitem__(self, index): + return self.transforms(self.get_label_info(index)) + + def get_label_info(self, index): + label = self.labels[index].copy() + label.pop('shape', None) # shape is for rect, remove it + label['img'], label['ori_shape'], label['resized_shape'] = self.load_image(index) + label['ratio_pad'] = ( + label['resized_shape'][0] / label['ori_shape'][0], + label['resized_shape'][1] / label['ori_shape'][1], + ) # for evaluation + if self.rect: + label['rect_shape'] = self.batch_shapes[self.batch[index]] + label = self.update_labels_info(label) + return label + + def __len__(self): + return len(self.labels) + + def update_labels_info(self, label): + """custom your label format here""" + return label + + def build_transforms(self, hyp=None): + """Users can custom augmentations here + like: + if self.augment: + # training transforms + return Compose([]) + else: + # val transforms + return Compose([]) + """ + raise NotImplementedError + + def get_labels(self): + """Users can custom their own format here. + Make sure your output is a list with each element like below: + dict( + im_file=im_file, + shape=shape, # format: (height, width) + cls=cls, + bboxes=bboxes, # xywh + segments=segments, # xy + keypoints=keypoints, # xy + normalized=True, # or False + bbox_format="xyxy", # or xywh, ltwh + ) + """ + raise NotImplementedError diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/build.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/build.py new file mode 100644 index 0000000..d4e0b07 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/build.py @@ -0,0 +1,195 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import os +import random +from pathlib import Path + +import numpy as np +import torch +from PIL import Image +from torch.utils.data import DataLoader, dataloader, distributed + +from ultralytics.yolo.data.dataloaders.stream_loaders import (LOADERS, LoadImages, LoadPilAndNumpy, LoadScreenshots, + LoadStreams, LoadTensor, SourceTypes, autocast_list) +from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS +from ultralytics.yolo.utils.checks import check_file + +from ..utils import LOGGER, RANK, colorstr +from ..utils.torch_utils import torch_distributed_zero_first +from .dataset import ClassificationDataset, YOLODataset +from .utils import PIN_MEMORY + + +class InfiniteDataLoader(dataloader.DataLoader): + """Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for _ in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler: + """Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +def seed_worker(worker_id): # noqa + # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def build_dataloader(cfg, batch, img_path, stride=32, rect=False, names=None, rank=-1, mode='train'): + assert mode in ['train', 'val'] + shuffle = mode == 'train' + if cfg.rect and shuffle: + LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False") + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = YOLODataset( + img_path=img_path, + imgsz=cfg.imgsz, + batch_size=batch, + augment=mode == 'train', # augmentation + hyp=cfg, # TODO: probably add a get_hyps_from_cfg function + rect=cfg.rect or rect, # rectangular batches + cache=cfg.cache or None, + single_cls=cfg.single_cls or False, + stride=int(stride), + pad=0.0 if mode == 'train' else 0.5, + prefix=colorstr(f'{mode}: '), + use_segments=cfg.task == 'segment', + use_keypoints=cfg.task == 'keypoint', + names=names, + classes=cfg.classes) + + batch = min(batch, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + workers = cfg.workers if mode == 'train' else cfg.workers * 2 + nw = min([os.cpu_count() // max(nd, 1), batch if batch > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if cfg.image_weights or cfg.close_mosaic else InfiniteDataLoader # allow attribute updates + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return loader(dataset=dataset, + batch_size=batch, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=getattr(dataset, 'collate_fn', None), + worker_init_fn=seed_worker, + generator=generator), dataset + + +# build classification +# TODO: using cfg like `build_dataloader` +def build_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + # Returns Dataloader object to be used with YOLOv5 Classifier + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) + + +def check_source(source): + webcam, screenshot, from_img, in_memory, tensor = False, False, False, False, False + if isinstance(source, (str, int, Path)): # int for local usb camera + source = str(source) + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://')) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') + if is_url and is_file: + source = check_file(source) # download + elif isinstance(source, tuple(LOADERS)): + in_memory = True + elif isinstance(source, (list, tuple)): + source = autocast_list(source) # convert all list elements to PIL or np arrays + from_img = True + elif isinstance(source, (Image.Image, np.ndarray)): + from_img = True + elif isinstance(source, torch.Tensor): + tensor = True + else: + raise TypeError('Unsupported image type. For supported types see https://docs.ultralytics.com/modes/predict') + + return source, webcam, screenshot, from_img, in_memory, tensor + + +def load_inference_source(source=None, transforms=None, imgsz=640, vid_stride=1, stride=32, auto=True): + """ + TODO: docs + """ + source, webcam, screenshot, from_img, in_memory, tensor = check_source(source) + source_type = source.source_type if in_memory else SourceTypes(webcam, screenshot, from_img, tensor) + + # Dataloader + if tensor: + dataset = LoadTensor(source) + elif in_memory: + dataset = source + elif webcam: + dataset = LoadStreams(source, + imgsz=imgsz, + stride=stride, + auto=auto, + transforms=transforms, + vid_stride=vid_stride) + + elif screenshot: + dataset = LoadScreenshots(source, imgsz=imgsz, stride=stride, auto=auto, transforms=transforms) + elif from_img: + dataset = LoadPilAndNumpy(source, imgsz=imgsz, stride=stride, auto=auto, transforms=transforms) + else: + dataset = LoadImages(source, + imgsz=imgsz, + stride=stride, + auto=auto, + transforms=transforms, + vid_stride=vid_stride) + + setattr(dataset, 'source_type', source_type) # attach source types + + return dataset diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/stream_loaders.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/stream_loaders.py new file mode 100644 index 0000000..876d44e --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/stream_loaders.py @@ -0,0 +1,377 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import glob +import math +import os +import time +from dataclasses import dataclass +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse + +import cv2 +import numpy as np +import requests +import torch +from PIL import Image + +from ultralytics.yolo.data.augment import LetterBox +from ultralytics.yolo.data.utils import IMG_FORMATS, VID_FORMATS +from ultralytics.yolo.utils import LOGGER, ROOT, is_colab, is_kaggle, ops +from ultralytics.yolo.utils.checks import check_requirements + + +@dataclass +class SourceTypes: + webcam: bool = False + screenshot: bool = False + from_img: bool = False + tensor: bool = False + + +class LoadStreams: + # YOLOv8 streamloader, i.e. `yolo predict source='rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='file.streams', imgsz=640, stride=32, auto=True, transforms=None, vid_stride=1): + torch.backends.cudnn.benchmark = True # faster for fixed-size inference + self.mode = 'stream' + self.imgsz = imgsz + self.stride = stride + self.vid_stride = vid_stride # video frame-rate stride + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] + n = len(sources) + self.sources = [ops.clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy # noqa + s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0 and (is_colab() or is_kaggle()): + raise NotImplementedError("'source=0' webcam not supported in Colab and Kaggle notebooks. " + "Try running 'source=0' in a local environment.") + cap = cv2.VideoCapture(s) + if not cap.isOpened(): + raise ConnectionError(f'{st}Failed to open {s}') + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + success, self.imgs[i] = cap.read() # guarantee first frame + if not success or self.imgs[i] is None: + raise ConnectionError(f'{st}Failed to read images from {s}') + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f'{st}Success ✅ ({self.frames[i]} frames of shape {w}x{h} at {self.fps[i]:.2f} FPS)') + self.threads[i].start() + LOGGER.info('') # newline + + # check for common shapes + s = np.stack([LetterBox(imgsz, auto, stride=stride)(image=x).shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + self.auto = auto and self.rect + self.transforms = transforms # optional + self.bs = self.__len__() + + if not self.rect: + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + # Read stream `i` frames in daemon thread + n, f = 0, self.frames[i] # frame number, frame array + while cap.isOpened() and n < f: + n += 1 + cap.grab() # .read() = .grab() followed by .retrieve() + if n % self.vid_stride == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(0.0) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + im0 = self.imgs.copy() + if self.transforms: + im = np.stack([self.transforms(x) for x in im0]) # transforms + else: + im = np.stack([LetterBox(self.imgsz, self.auto, stride=self.stride)(image=x) for x in im0]) + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + im = np.ascontiguousarray(im) # contiguous + + return self.sources, im, im0, None, '' + + def __len__(self): + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +class LoadScreenshots: + # YOLOv8 screenshot dataloader, i.e. `yolo predict source=screen` + def __init__(self, source, imgsz=640, stride=32, auto=True, transforms=None): + # source = [screen_number left top width height] (pixels) + check_requirements('mss') + import mss # noqa + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.imgsz = imgsz + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + self.bs = 1 + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} + + def __iter__(self): + return self + + def __next__(self): + # mss screen capture: get raw pixels from the screen as np array + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = LetterBox(self.imgsz, self.auto, stride=self.stride)(image=im0) + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + +class LoadImages: + # YOLOv8 image/video dataloader, i.e. `yolo predict source=image.jpg/vid.mp4` + def __init__(self, path, imgsz=640, stride=32, auto=True, transforms=None, vid_stride=1): + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line + path = Path(path).read_text().rsplit() + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.imgsz = imgsz + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + self.bs = 1 + if any(videos): + self.orientation = None # rotation degrees + self._new_video(videos[0]) # new video + else: + self.cap = None + if self.nf == 0: + raise FileNotFoundError(f'No images or videos found in {p}. ' + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}') + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + for _ in range(self.vid_stride): + self.cap.grab() + success, im0 = self.cap.retrieve() + while not success: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self._new_video(path) + success, im0 = self.cap.read() + + self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + im0 = cv2.imread(path) # BGR + if im0 is None: + raise FileNotFoundError(f'Image Not Found {path}') + s = f'image {self.count}/{self.nf} {path}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = LetterBox(self.imgsz, self.auto, stride=self.stride)(image=im0) + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + return path, im, im0, self.cap, s + + def _new_video(self, path): + # Create a new video capture object + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) + if hasattr(cv2, 'CAP_PROP_ORIENTATION_META'): # cv2<4.6.0 compatibility + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # Disable auto-orientation due to known issues in https://github.com/ultralytics/yolov5/issues/8493 + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + return self.nf # number of files + + +class LoadPilAndNumpy: + + def __init__(self, im0, imgsz=640, stride=32, auto=True, transforms=None): + if not isinstance(im0, list): + im0 = [im0] + self.paths = [getattr(im, 'filename', f'image{i}.jpg') for i, im in enumerate(im0)] + self.im0 = [self._single_check(im) for im in im0] + self.imgsz = imgsz + self.stride = stride + self.auto = auto + self.transforms = transforms + self.mode = 'image' + # generate fake paths + self.bs = len(self.im0) + + @staticmethod + def _single_check(im): + assert isinstance(im, (Image.Image, np.ndarray)), f'Expected PIL/np.ndarray image type, but got {type(im)}' + if isinstance(im, Image.Image): + if im.mode != 'RGB': + im = im.convert('RGB') + im = np.asarray(im)[:, :, ::-1] + im = np.ascontiguousarray(im) # contiguous + return im + + def _single_preprocess(self, im, auto): + if self.transforms: + im = self.transforms(im) # transforms + else: + im = LetterBox(self.imgsz, auto=auto, stride=self.stride)(image=im) + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + return im + + def __len__(self): + return len(self.im0) + + def __next__(self): + if self.count == 1: # loop only once as it's batch inference + raise StopIteration + auto = all(x.shape == self.im0[0].shape for x in self.im0) and self.auto + im = [self._single_preprocess(im, auto) for im in self.im0] + im = np.stack(im, 0) if len(im) > 1 else im[0][None] + self.count += 1 + return self.paths, im, self.im0, None, '' + + def __iter__(self): + self.count = 0 + return self + + +class LoadTensor: + + def __init__(self, imgs) -> None: + self.im0 = imgs + self.bs = imgs.shape[0] + self.mode = 'image' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == 1: + raise StopIteration + self.count += 1 + return None, self.im0, self.im0, None, '' # self.paths, im, self.im0, None, '' + + def __len__(self): + return self.bs + + +def autocast_list(source): + """ + Merges a list of source of different types into a list of numpy arrays or PIL images + """ + files = [] + for im in source: + if isinstance(im, (str, Path)): # filename or uri + files.append(Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im)) + elif isinstance(im, (Image.Image, np.ndarray)): # PIL or np Image + files.append(im) + else: + raise TypeError(f'type {type(im).__name__} is not a supported Ultralytics prediction source type. \n' + f'See https://docs.ultralytics.com/modes/predict for supported source types.') + + return files + + +LOADERS = [LoadStreams, LoadPilAndNumpy, LoadImages, LoadScreenshots] + +if __name__ == '__main__': + img = cv2.imread(str(ROOT / 'assets/bus.jpg')) + dataset = LoadPilAndNumpy(im0=img) + for d in dataset: + print(d[0]) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/v5augmentations.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/v5augmentations.py new file mode 100644 index 0000000..acd6d83 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/v5augmentations.py @@ -0,0 +1,402 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as TF + +from ultralytics.yolo.utils import LOGGER, colorstr +from ultralytics.yolo.utils.checks import check_version +from ultralytics.yolo.utils.metrics import bbox_ioa +from ultralytics.yolo.utils.ops import resample_segments, segment2box, xywhn2xyxy + +IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean +IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation + + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self, size=640): + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std + return TF.normalize(x, mean, std, inplace=inplace) + + +def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean + for i in range(3): + x[:, i] = x[:, i] * std[i] + mean[i] + return x + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, p=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if p and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + + # calculate ioa first then select indexes randomly + boxes = np.stack([w - labels[:, 3], labels[:, 2], w - labels[:, 1], labels[:, 4]], axis=-1) # (n, 4) + ioa = bbox_ioa(boxes, labels[:, 1:5]) # intersection over area + indexes = np.nonzero((ioa < 0.30).all(1))[0] # (N, ) + n = len(indexes) + for j in random.sample(list(indexes), k=round(p * n)): + l, box, s = labels[j], boxes[j], segments[j] + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments + + +def cutout(im, labels, p=0.5): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([[xmin, ymin, xmax, ymax]], dtype=np.float32) + ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h))[0] # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): + # YOLOv5 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + jitter = float(jitter) + T += [A.ColorJitter(jitter, jitter, jitter, 0)] # brightness, contrast, satuaration, 0 hue + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +def classify_transforms(size=224): + # Transforms to apply if albumentations not installed + assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' + # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + + +class LetterBox: + # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/v5loader.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/v5loader.py new file mode 100644 index 0000000..f6b6734 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataloaders/v5loader.py @@ -0,0 +1,1096 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Dataloaders and dataset utils +""" + +import contextlib +import glob +import hashlib +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse + +import cv2 +import numpy as np +import psutil +import torch +import torchvision +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed +from tqdm import tqdm + +from ultralytics.yolo.utils import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, is_colab, is_dir_writeable, + is_kaggle) +from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.yolo.utils.ops import clean_str, segments2boxes, xyn2xy, xywh2xyxy, xywhn2xyxy, xyxy2xywhn +from ultralytics.yolo.utils.torch_utils import torch_distributed_zero_first + +from .v5augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, + letterbox, mixup, random_perspective) + +# Parameters +HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.sha256(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + with contextlib.suppress(Exception): + rotation = dict(img._getexif().items())[orientation] + if rotation in [6, 8]: # rotation 270 or 90 + s = (s[1], s[0]) + return s + + +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90}.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info['exif'] = exif.tobytes() + return image + + +def seed_worker(worker_id): + # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + close_mosaic=False, + min_items=0, + prefix='', + shuffle=False, + seed=0): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabels( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + min_items=min_items, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights or close_mosaic else InfiniteDataLoader # DataLoader allows attribute updates + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + seed + RANK) + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for _ in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler: + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadScreenshots: + # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): + # source = [screen_number left top width height] (pixels) + check_requirements('mss') + import mss + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.img_size = img_size + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} + + def __iter__(self): + return self + + def __next__(self): + # mss screen capture: get raw pixels from the screen as np array + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line + path = Path(path).read_text().rsplit() + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + if any(videos): + self._new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self._new_video(path) + ret_val, im0 = self.cap.read() + + self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + im0 = cv2.imread(path) # BGR + assert im0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + return path, im, im0, self.cap, s + + def _new_video(self, path): + # Create a new video capture object + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + return self.nf # number of files + + +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + torch.backends.cudnn.benchmark = True # faster for fixed-size inference + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + self.vid_stride = vid_stride # video frame-rate stride + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] + n = len(sources) + self.sources = [clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy + s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0: + assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' + assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'{st}Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') + self.threads[i].start() + LOGGER.info('') # newline + + # check for common shapes + s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + self.auto = auto and self.rect + self.transforms = transforms # optional + if not self.rect: + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + # Read stream `i` frames in daemon thread + n, f = 0, self.frames[i] # frame number, frame array + while cap.isOpened() and n < f: + n += 1 + cap.grab() # .read() = .grab() followed by .retrieve() + if n % self.vid_stride == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(0.0) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + im0 = self.imgs.copy() + if self.transforms: + im = np.stack([self.transforms(x) for x in im0]) # transforms + else: + im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + im = np.ascontiguousarray(im) # contiguous + + return self.sources, im, im0, None, '' + + def __len__(self): + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabels(Dataset): + # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + cache_version = 0.6 # dataset labels *.cache version + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + + def __init__(self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + min_items=0, + prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + self.albumentations = Albumentations(size=img_size) if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) + else: + raise FileNotFoundError(f'{prefix}{p} does not exist') + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.im_files, f'{prefix}No images found' + except Exception as e: + raise FileNotFoundError(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e + + # Check cache + self.label_files = img2label_paths(self.im_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except (FileNotFoundError, AssertionError, AttributeError): + cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in (-1, 0): + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels, shapes, self.segments = zip(*cache.values()) + nl = len(np.concatenate(labels, 0)) # number of labels + assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' + self.labels = list(labels) + self.shapes = np.array(shapes) + self.im_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + + # Filter images + if min_items: + include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') + self.im_files = [self.im_files[i] for i in include] + self.label_files = [self.label_files[i] for i in include] + self.labels = [self.labels[i] for i in include] + self.segments = [self.segments[i] for i in include] + self.shapes = self.shapes[include] # wh + + # Create indices + n = len(self.shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Update labels + include_class = [] # filter labels to include only these classes (optional) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = [segment[si] for si, idx in enumerate(j) if idx] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.segments = [self.segments[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride + + # Cache images into RAM/disk for faster training + if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): + cache_images = False + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache_images: + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + with ThreadPool(NUM_THREADS) as pool: + results = pool.imap(fcn, range(n)) + pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache_images == 'disk': + b += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + b += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' + pbar.close() + + def check_cache_ram(self, safety_margin=0.1, prefix=''): + # Check image caching requirements vs available memory + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + n = min(self.n, 30) # extrapolate from 30 random images + for _ in range(n): + im = cv2.imread(random.choice(self.im_files)) # sample image + ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio + b += im.nbytes * ratio ** 2 + mem_required = b * self.n / n # GB required to cache dataset into RAM + mem = psutil.virtual_memory() + cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question + if not cache: + LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' + f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + return cache + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + if path.exists(): + path.unlink() # remove *.cache file if exists + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f'{prefix}Scanning {path.parent / path.stem}...' + total = len(self.im_files) + with ThreadPool(NUM_THREADS) as pool: + results = pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))) + pbar = tqdm(results, desc=desc, total=total, bar_format=TQDM_BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' + pbar.close() + + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + if is_dir_writeable(path.parent): + np.save(str(path), x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + else: + LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable') # not writeable + return x + + def __len__(self): + return len(self.im_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp['mixup']: + img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels = random_perspective(img, + labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) + + if self.augment: + # Albumentations + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + + # Flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + + # Cutouts + # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.im_files[index], shapes + + def load_image(self, i): + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) + img9, labels9 = random_perspective(img9, + labels9, + segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + @staticmethod + def collate_fn(batch): + # YOLOv8 collate function, outputs dict + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + batch_idx, cls, bboxes = torch.cat(label, 0).split((1, 1, 4), dim=1) + return { + 'ori_shape': tuple((x[0] if x else None) for x in shapes), + 'ratio_pad': tuple((x[1] if x else None) for x in shapes), + 'im_file': path, + 'img': torch.stack(im, 0), + 'cls': cls, + 'bboxes': bboxes, + 'batch_idx': batch_idx.view(-1)} + + @staticmethod + def collate_fn_old(batch): + # YOLOv5 original collate function + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(im, 0), torch.cat(label, 0), path, shapes + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def flatten_recursive(path=DATASETS_DIR / 'coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(f'{str(path)}_flat') + if os.path.exists(new_path): + shutil.rmtree(new_path) # delete output folder + os.makedirs(new_path) # make new output folder + for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() + # Convert detection dataset into classification dataset, with one directory per class + path = Path(path) # images dir + shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in IMG_FORMATS: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file) as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.dataloaders import *; autosplit() + Arguments + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + n = len(files) # number of files + random.seed(0) # for reproducibility + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + for x in txt: + if (path.parent / x).exists(): + (path.parent / x).unlink() # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path.parent / txt[i], 'a') as f: + f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + + +def verify_image_label(args): + # Verify one image-label pair + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = [segments[x] for x in i] + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if self.album_transforms: + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] + else: + sample = self.torch_transforms(im) + return sample, j + + +def create_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + # Returns Dataloader object to be used with YOLOv5 Classifier + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataset.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataset.py new file mode 100644 index 0000000..2bc7536 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataset.py @@ -0,0 +1,263 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import cv2 +import numpy as np +import torch +import torchvision +from tqdm import tqdm + +from ..utils import LOCAL_RANK, NUM_THREADS, TQDM_BAR_FORMAT, is_dir_writeable +from .augment import Compose, Format, Instances, LetterBox, classify_albumentations, classify_transforms, v8_transforms +from .base import BaseDataset +from .utils import HELP_URL, LOGGER, get_hash, img2label_paths, verify_image_label + + +class YOLODataset(BaseDataset): + cache_version = '1.0.2' # dataset labels *.cache version, >= 1.0.0 for YOLOv8 + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + """ + Dataset class for loading images object detection and/or segmentation labels in YOLO format. + + Args: + img_path (str): path to the folder containing images. + imgsz (int): image size (default: 640). + cache (bool): if True, a cache file of the labels is created to speed up future creation of dataset instances + (default: False). + augment (bool): if True, data augmentation is applied (default: True). + hyp (dict): hyperparameters to apply data augmentation (default: None). + prefix (str): prefix to print in log messages (default: ''). + rect (bool): if True, rectangular training is used (default: False). + batch_size (int): size of batches (default: None). + stride (int): stride (default: 32). + pad (float): padding (default: 0.0). + single_cls (bool): if True, single class training is used (default: False). + use_segments (bool): if True, segmentation masks are used as labels (default: False). + use_keypoints (bool): if True, keypoints are used as labels (default: False). + names (list): class names (default: None). + + Returns: + A PyTorch dataset object that can be used for training an object detection or segmentation model. + """ + + def __init__(self, + img_path, + imgsz=640, + cache=False, + augment=True, + hyp=None, + prefix='', + rect=False, + batch_size=None, + stride=32, + pad=0.0, + single_cls=False, + use_segments=False, + use_keypoints=False, + names=None, + classes=None): + self.use_segments = use_segments + self.use_keypoints = use_keypoints + self.names = names + assert not (self.use_segments and self.use_keypoints), 'Can not use both segments and keypoints.' + super().__init__(img_path, imgsz, cache, augment, hyp, prefix, rect, batch_size, stride, pad, single_cls, + classes) + + def cache_labels(self, path=Path('./labels.cache')): + """Cache dataset labels, check images and read shapes. + Args: + path (Path): path where to save the cache file (default: Path('./labels.cache')). + Returns: + (dict): labels. + """ + x = {'labels': []} + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f'{self.prefix}Scanning {path.parent / path.stem}...' + total = len(self.im_files) + with ThreadPool(NUM_THREADS) as pool: + results = pool.imap(func=verify_image_label, + iterable=zip(self.im_files, self.label_files, repeat(self.prefix), + repeat(self.use_keypoints), repeat(len(self.names)))) + pbar = tqdm(results, desc=desc, total=total, bar_format=TQDM_BAR_FORMAT) + for im_file, lb, shape, segments, keypoint, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x['labels'].append( + dict( + im_file=im_file, + shape=shape, + cls=lb[:, 0:1], # n, 1 + bboxes=lb[:, 1:], # n, 4 + segments=segments, + keypoints=keypoint, + normalized=True, + bbox_format='xywh')) + if msg: + msgs.append(msg) + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' + pbar.close() + + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{self.prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + if is_dir_writeable(path.parent): + if path.exists(): + path.unlink() # remove *.cache file if exists + np.save(str(path), x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{self.prefix}New cache created: {path}') + else: + LOGGER.warning(f'{self.prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable, cache not saved.') + return x + + def get_labels(self): + self.label_files = img2label_paths(self.im_files) + cache_path = Path(self.label_files[0]).parent.with_suffix('.cache') + try: + import gc + gc.disable() # reduce pickle load time https://github.com/ultralytics/ultralytics/pull/1585 + cache, exists = np.load(str(cache_path), allow_pickle=True).item(), True # load dict + gc.enable() + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except (FileNotFoundError, AssertionError, AttributeError): + cache, exists = self.cache_labels(cache_path), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in (-1, 0): + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' + tqdm(None, desc=self.prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + if nf == 0: # number of labels found + raise FileNotFoundError(f'{self.prefix}No labels found in {cache_path}, can not start training. {HELP_URL}') + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels = cache['labels'] + self.im_files = [lb['im_file'] for lb in labels] # update im_files + + # Check if the dataset is all boxes or all segments + lengths = ((len(lb['cls']), len(lb['bboxes']), len(lb['segments'])) for lb in labels) + len_cls, len_boxes, len_segments = (sum(x) for x in zip(*lengths)) + if len_segments and len_boxes != len_segments: + LOGGER.warning( + f'WARNING ⚠️ Box and segment counts should be equal, but got len(segments) = {len_segments}, ' + f'len(boxes) = {len_boxes}. To resolve this only boxes will be used and all segments will be removed. ' + 'To avoid this please supply either a detect or segment dataset, not a detect-segment mixed dataset.') + for lb in labels: + lb['segments'] = [] + if len_cls == 0: + raise ValueError(f'All labels empty in {cache_path}, can not start training without labels. {HELP_URL}') + return labels + + # TODO: use hyp config to set all these augmentations + def build_transforms(self, hyp=None): + if self.augment: + hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0 + hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0 + transforms = v8_transforms(self, self.imgsz, hyp) + else: + transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), scaleup=False)]) + transforms.append( + Format(bbox_format='xywh', + normalize=True, + return_mask=self.use_segments, + return_keypoint=self.use_keypoints, + batch_idx=True, + mask_ratio=hyp.mask_ratio, + mask_overlap=hyp.overlap_mask)) + return transforms + + def close_mosaic(self, hyp): + hyp.mosaic = 0.0 # set mosaic ratio=0.0 + hyp.copy_paste = 0.0 # keep the same behavior as previous v8 close-mosaic + hyp.mixup = 0.0 # keep the same behavior as previous v8 close-mosaic + self.transforms = self.build_transforms(hyp) + + def update_labels_info(self, label): + """custom your label format here""" + # NOTE: cls is not with bboxes now, classification and semantic segmentation need an independent cls label + # we can make it also support classification and semantic segmentation by add or remove some dict keys there. + bboxes = label.pop('bboxes') + segments = label.pop('segments') + keypoints = label.pop('keypoints', None) + bbox_format = label.pop('bbox_format') + normalized = label.pop('normalized') + label['instances'] = Instances(bboxes, segments, keypoints, bbox_format=bbox_format, normalized=normalized) + return label + + @staticmethod + def collate_fn(batch): + new_batch = {} + keys = batch[0].keys() + values = list(zip(*[list(b.values()) for b in batch])) + for i, k in enumerate(keys): + value = values[i] + if k == 'img': + value = torch.stack(value, 0) + if k in ['masks', 'keypoints', 'bboxes', 'cls']: + value = torch.cat(value, 0) + new_batch[k] = value + new_batch['batch_idx'] = list(new_batch['batch_idx']) + for i in range(len(new_batch['batch_idx'])): + new_batch['batch_idx'][i] += i # add target image index for build_targets() + new_batch['batch_idx'] = torch.cat(new_batch['batch_idx'], 0) + return new_batch + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if self.album_transforms: + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] + else: + sample = self.torch_transforms(im) + return {'img': sample, 'cls': j} + + def __len__(self) -> int: + return len(self.samples) + + +# TODO: support semantic segmentation +class SemanticDataset(BaseDataset): + + def __init__(self): + pass diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataset_wrappers.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataset_wrappers.py new file mode 100644 index 0000000..67c7326 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/dataset_wrappers.py @@ -0,0 +1,39 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import collections +from copy import deepcopy + +from .augment import LetterBox + + +class MixAndRectDataset: + """A wrapper of multiple images mixed dataset. + + Args: + dataset (:obj:`BaseDataset`): The dataset to be mixed. + transforms (Sequence[dict]): config dict to be composed. + """ + + def __init__(self, dataset): + self.dataset = dataset + self.imgsz = dataset.imgsz + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, index): + labels = deepcopy(self.dataset[index]) + for transform in self.dataset.transforms.tolist(): + # mosaic and mixup + if hasattr(transform, 'get_indexes'): + indexes = transform.get_indexes(self.dataset) + if not isinstance(indexes, collections.abc.Sequence): + indexes = [indexes] + mix_labels = [deepcopy(self.dataset[index]) for index in indexes] + labels['mix_labels'] = mix_labels + if self.dataset.rect and isinstance(transform, LetterBox): + transform.new_shape = self.dataset.batch_shapes[self.dataset.batch[index]] + labels = transform(labels) + if 'mix_labels' in labels: + labels.pop('mix_labels') + return labels diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/scripts/download_weights.sh b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/scripts/download_weights.sh new file mode 100644 index 0000000..c5f4706 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/data/scripts/download_weights.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Ultralytics YOLO 🚀, GPL-3.0 license +# Download latest models from https://github.com/ultralytics/assets/releases +# Example usage: bash ultralytics/yolo/data/scripts/download_weights.sh +# parent +# └── weights +# ├── yolov8n.pt ← downloads here +# ├── yolov8s.pt +# └── ... + +python - < 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb) and (not keypoint): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + if keypoint: + assert lb.shape[1] == 56, 'labels require 56 columns each' + assert (lb[:, 5::3] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert (lb[:, 6::3] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + kpts = np.zeros((lb.shape[0], 39)) + for i in range(len(lb)): + kpt = np.delete(lb[i, 5:], np.arange(2, lb.shape[1] - 5, 3)) # remove occlusion param from GT + kpts[i] = np.hstack((lb[i, :5], kpt)) + lb = kpts + assert lb.shape[1] == 39, 'labels require 39 columns each after removing occlusion parameter' + else: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb[:, 1:] <= 1).all(), \ + f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + # All labels + max_cls = int(lb[:, 0].max()) # max label count + assert max_cls <= num_cls, \ + f'Label class {max_cls} exceeds dataset class count {num_cls}. ' \ + f'Possible class labels are 0-{num_cls - 1}' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = [segments[x] for x in i] + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 39), dtype=np.float32) if keypoint else np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 39), dtype=np.float32) if keypoint else np.zeros((0, 5), dtype=np.float32) + if keypoint: + keypoints = lb[:, 5:].reshape(-1, 17, 2) + lb = lb[:, :5] + return im_file, lb, shape, segments, keypoints, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, None, nm, nf, ne, nc, msg] + + +def polygon2mask(imgsz, polygons, color=1, downsample_ratio=1): + """ + Args: + imgsz (tuple): The image size. + polygons (np.ndarray): [N, M], N is the number of polygons, M is the number of points(Be divided by 2). + color (int): color + downsample_ratio (int): downsample ratio + """ + mask = np.zeros(imgsz, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + nh, nw = (imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio) + # NOTE: fillPoly firstly then resize is trying the keep the same way + # of loss calculation when mask-ratio=1. + mask = cv2.resize(mask, (nw, nh)) + return mask + + +def polygons2masks(imgsz, polygons, color, downsample_ratio=1): + """ + Args: + imgsz (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], N is number of polygons, M is number of points (M % 2 = 0) + color (int): color + downsample_ratio (int): downsample ratio + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(imgsz, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) + + +def polygons2masks_overlap(imgsz, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((imgsz[0] // downsample_ratio, imgsz[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask(imgsz, [segments[si].reshape(-1)], downsample_ratio=downsample_ratio, color=1) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index + + +def check_det_dataset(dataset, autodownload=True): + # Download, check and/or unzip dataset if not found locally + data = check_file(dataset) + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): + new_dir = safe_download(data, dir=DATASETS_DIR, unzip=True, delete=False, curl=False) + data = next((DATASETS_DIR / new_dir).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + data = yaml_load(data, append_filename=True) # dictionary + + # Checks + for k in 'train', 'val': + if k not in data: + raise SyntaxError( + emojis(f"{dataset} '{k}:' key missing ❌.\n'train' and 'val' are required in all data YAMLs.")) + if 'names' not in data and 'nc' not in data: + raise SyntaxError(emojis(f"{dataset} key missing ❌.\n either 'names' or 'nc' are required in all data YAMLs.")) + if 'names' in data and 'nc' in data and len(data['names']) != data['nc']: + raise SyntaxError(emojis(f"{dataset} 'names' length {len(data['names'])} and 'nc: {data['nc']}' must match.")) + if 'names' not in data: + data['names'] = [f'class_{i}' for i in range(data['nc'])] + else: + data['nc'] = len(data['names']) + + data['names'] = check_class_names(data['names']) + + # Resolve paths + path = Path(extract_dir or data.get('path') or Path(data.get('yaml_file', '')).parent) # dataset root + + if not path.is_absolute(): + path = (DATASETS_DIR / path).resolve() + data['path'] = path # download scripts + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + if isinstance(data[k], str): + x = (path / data[k]).resolve() + if not x.exists() and data[k].startswith('../'): + x = (path / data[k][3:]).resolve() + data[k] = str(x) + else: + data[k] = [str((path / x).resolve()) for x in data[k]] + + # Parse yaml + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + if val: + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + m = f"\nDataset '{dataset}' images not found ⚠️, missing paths %s" % [str(x) for x in val if not x.exists()] + if s and autodownload: + LOGGER.warning(m) + else: + raise FileNotFoundError(m) + t = time.time() + if s.startswith('http') and s.endswith('.zip'): # URL + safe_download(url=s, dir=DATASETS_DIR, delete=True) + r = None # success + elif s.startswith('bash '): # bash script + LOGGER.info(f'Running {s} ...') + r = os.system(s) + else: # python script + r = exec(s, {'yaml': data}) # return None + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌' + LOGGER.info(f'Dataset download {s}\n') + check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf') # download fonts + + return data # dictionary + + +def check_cls_dataset(dataset: str): + """ + Check a classification dataset such as Imagenet. + + Copy code + This function takes a `dataset` name as input and returns a dictionary containing information about the dataset. + If the dataset is not found, it attempts to download the dataset from the internet and save it to the local file system. + + Args: + dataset (str): Name of the dataset. + + Returns: + data (dict): A dictionary containing the following keys and values: + 'train': Path object for the directory containing the training set of the dataset + 'val': Path object for the directory containing the validation set of the dataset + 'nc': Number of classes in the dataset + 'names': List of class names in the dataset + """ + data_dir = (DATASETS_DIR / dataset).resolve() + if not data_dir.is_dir(): + LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') + t = time.time() + if dataset == 'imagenet': + subprocess.run(f"bash {ROOT / 'yolo/data/scripts/get_imagenet.sh'}", shell=True, check=True) + else: + url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{dataset}.zip' + download(url, dir=data_dir.parent) + s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" + LOGGER.info(s) + train_set = data_dir / 'train' + test_set = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val + nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes + names = [x.name for x in (data_dir / 'train').iterdir() if x.is_dir()] # class names list + names = dict(enumerate(sorted(names))) + return {'train': train_set, 'val': test_set, 'nc': nc, 'names': names} + + +class HUBDatasetStats(): + """ Class for generating HUB dataset JSON and `-hub` dataset directory + + Arguments + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) + autodownload: Attempt to download dataset if not found locally + + Usage + from ultralytics.yolo.data.utils import HUBDatasetStats + stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1 + stats = HUBDatasetStats('/Users/glennjocher/Downloads/coco6.zip') # usage 2 + stats.get_json(save=False) + stats.process_images() + """ + + def __init__(self, path='coco128.yaml', autodownload=False): + # Initialize class + zipped, data_dir, yaml_path = self._unzip(Path(path)) + try: + # data = yaml_load(check_yaml(yaml_path)) # data dict + data = check_det_dataset(yaml_path, autodownload) # data dict + if zipped: + data['path'] = data_dir + except Exception as e: + raise Exception('error/HUB/dataset_stats/yaml_load') from e + + self.hub_dir = Path(str(data['path']) + '-hub') + self.im_dir = self.hub_dir / 'images' + self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images + self.stats = {'nc': len(data['names']), 'names': list(data['names'].values())} # statistics dictionary + self.data = data + + @staticmethod + def _find_yaml(dir): + # Return data.yaml file + files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive + assert files, f'No *.yaml file found in {dir}' + if len(files) > 1: + files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name + assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' + assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' + return files[0] + + def _unzip(self, path): + # Unzip data.zip + if not str(path).endswith('.zip'): # path is data.yaml + return False, None, path + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + unzip_file(path, path=path.parent) + dir = path.with_suffix('') # dataset directory == zip name + assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' + return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path + + def _hub_ops(self, f, max_dim=1920): + # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing + f_new = self.im_dir / Path(f).name # dataset-hub image filename + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new, 'JPEG', quality=50, optimize=True) # save + except Exception as e: # use OpenCV + LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new), im) + + def get_json(self, save=False, verbose=False): + # Return dataset JSON for Ultralytics HUB + # from ultralytics.yolo.data import YOLODataset + from ultralytics.yolo.data.dataloaders.v5loader import LoadImagesAndLabels + + def _round(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + self.stats[split] = None # i.e. no test set + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + x = np.array([ + np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) + for label in tqdm(dataset.labels, total=len(dataset), desc='Statistics')]) # shape(128x80) + self.stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': len(dataset), + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} + + # Save, print and return + if save: + stats_path = self.hub_dir / 'stats.json' + LOGGER.info(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(self.stats, f) # save stats.json + if verbose: + LOGGER.info(json.dumps(self.stats, indent=2, sort_keys=False)) + return self.stats + + def process_images(self): + # Compress images for Ultralytics HUB + # from ultralytics.yolo.data import YOLODataset + from ultralytics.yolo.data.dataloaders.v5loader import LoadImagesAndLabels + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + with ThreadPool(NUM_THREADS) as pool: + for _ in tqdm(pool.imap(self._hub_ops, dataset.im_files), total=len(dataset), desc=f'{split} images'): + pass + LOGGER.info(f'Done. All images saved to {self.im_dir}') + return self.im_dir diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/exporter.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/exporter.py new file mode 100644 index 0000000..a77c08e --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/exporter.py @@ -0,0 +1,874 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Export a YOLOv8 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit + +Format | `format=argument` | Model +--- | --- | --- +PyTorch | - | yolov8n.pt +TorchScript | `torchscript` | yolov8n.torchscript +ONNX | `onnx` | yolov8n.onnx +OpenVINO | `openvino` | yolov8n_openvino_model/ +TensorRT | `engine` | yolov8n.engine +CoreML | `coreml` | yolov8n.mlmodel +TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/ +TensorFlow GraphDef | `pb` | yolov8n.pb +TensorFlow Lite | `tflite` | yolov8n.tflite +TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov8n_web_model/ +PaddlePaddle | `paddle` | yolov8n_paddle_model/ + +Requirements: + $ pip install ultralytics[export] + +Python: + from ultralytics import YOLO + model = YOLO('yolov8n.pt') + results = model.export(format='onnx') + +CLI: + $ yolo mode=export model=yolov8n.pt format=onnx + +Inference: + $ yolo predict model=yolov8n.pt # PyTorch + yolov8n.torchscript # TorchScript + yolov8n.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov8n_openvino_model # OpenVINO + yolov8n.engine # TensorRT + yolov8n.mlmodel # CoreML (macOS-only) + yolov8n_saved_model # TensorFlow SavedModel + yolov8n.pb # TensorFlow GraphDef + yolov8n.tflite # TensorFlow Lite + yolov8n_edgetpu.tflite # TensorFlow Edge TPU + yolov8n_paddle_model # PaddlePaddle + +TensorFlow.js: + $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example + $ npm install + $ ln -s ../../yolov5/yolov8n_web_model public/yolov8n_web_model + $ npm start +""" +import json +import os +import platform +import subprocess +import time +import warnings +from collections import defaultdict +from copy import deepcopy +from pathlib import Path + +import torch + +from ultralytics.nn.autobackend import check_class_names +from ultralytics.nn.modules import C2f, Detect, Segment +from ultralytics.nn.tasks import DetectionModel, SegmentationModel +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.utils import (DEFAULT_CFG, LINUX, LOGGER, MACOS, __version__, callbacks, colorstr, + get_default_args, yaml_save) +from ultralytics.yolo.utils.checks import check_imgsz, check_requirements, check_version +from ultralytics.yolo.utils.files import file_size +from ultralytics.yolo.utils.ops import Profile +from ultralytics.yolo.utils.torch_utils import get_latest_opset, select_device, smart_inference_mode + +ARM64 = platform.machine() in ('arm64', 'aarch64') + + +def export_formats(): + # YOLOv8 export formats + import pandas + x = [ + ['PyTorch', '-', '.pt', True, True], + ['TorchScript', 'torchscript', '.torchscript', True, True], + ['ONNX', 'onnx', '.onnx', True, True], + ['OpenVINO', 'openvino', '_openvino_model', True, False], + ['TensorRT', 'engine', '.engine', False, True], + ['CoreML', 'coreml', '.mlmodel', True, False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], + ['TensorFlow GraphDef', 'pb', '.pb', True, True], + ['TensorFlow Lite', 'tflite', '.tflite', True, False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', True, False], + ['TensorFlow.js', 'tfjs', '_web_model', True, False], + ['PaddlePaddle', 'paddle', '_paddle_model', True, True], ] + return pandas.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) + + +def gd_outputs(gd): + # TensorFlow GraphDef model output node names + name_list, input_list = [], [] + for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef + name_list.append(node.name) + input_list.extend(node.input) + return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp')) + + +def try_export(inner_func): + # YOLOv8 export decorator, i..e @try_export + inner_args = get_default_args(inner_func) + + def outer_func(*args, **kwargs): + prefix = inner_args['prefix'] + try: + with Profile() as dt: + f, model = inner_func(*args, **kwargs) + LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') + return f, model + except Exception as e: + LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + return None, None + + return outer_func + + +class Exporter: + """ + Exporter + + A class for exporting a model. + + Attributes: + args (SimpleNamespace): Configuration for the exporter. + save_dir (Path): Directory to save results. + """ + + def __init__(self, cfg=DEFAULT_CFG, overrides=None): + """ + Initializes the Exporter class. + + Args: + cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG. + overrides (dict, optional): Configuration overrides. Defaults to None. + """ + self.args = get_cfg(cfg, overrides) + self.callbacks = defaultdict(list, callbacks.default_callbacks) # add callbacks + callbacks.add_integration_callbacks(self) + + @smart_inference_mode() + def __call__(self, model=None): + self.run_callbacks('on_export_start') + t = time.time() + format = self.args.format.lower() # to lowercase + if format in ('tensorrt', 'trt'): # engine aliases + format = 'engine' + fmts = tuple(export_formats()['Argument'][1:]) # available export formats + flags = [x == format for x in fmts] + if sum(flags) != 1: + raise ValueError(f"Invalid export format='{format}'. Valid formats are {fmts}") + jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans + + # Load PyTorch model + self.device = select_device('cpu' if self.args.device is None else self.args.device) + if self.args.half and onnx and self.device.type == 'cpu': + LOGGER.warning('WARNING ⚠️ half=True only compatible with GPU export, i.e. use device=0') + self.args.half = False + assert not self.args.dynamic, 'half=True not compatible with dynamic=True, i.e. use only one.' + + # Checks + model.names = check_class_names(model.names) + self.imgsz = check_imgsz(self.args.imgsz, stride=model.stride, min_dim=2) # check image size + if self.args.optimize: + assert self.device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' + if edgetpu and not LINUX: + raise SystemError('Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler/') + + # Input + im = torch.zeros(self.args.batch, 3, *self.imgsz).to(self.device) + file = Path(getattr(model, 'pt_path', None) or getattr(model, 'yaml_file', None) or model.yaml['yaml_file']) + if file.suffix == '.yaml': + file = Path(file.name) + + # Update model + model = deepcopy(model).to(self.device) + for p in model.parameters(): + p.requires_grad = False + model.eval() + model.float() + model = model.fuse() + for k, m in model.named_modules(): + if isinstance(m, (Detect, Segment)): + m.dynamic = self.args.dynamic + m.export = True + m.format = self.args.format + elif isinstance(m, C2f) and not any((saved_model, pb, tflite, edgetpu, tfjs)): + # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph + m.forward = m.forward_split + + y = None + for _ in range(2): + y = model(im) # dry runs + if self.args.half and (engine or onnx) and self.device.type != 'cpu': + im, model = im.half(), model.half() # to FP16 + + # Warnings + warnings.filterwarnings('ignore', category=torch.jit.TracerWarning) # suppress TracerWarning + warnings.filterwarnings('ignore', category=UserWarning) # suppress shape prim::Constant missing ONNX warning + warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress CoreML np.bool deprecation warning + + # Assign + self.im = im + self.model = model + self.file = file + self.output_shape = tuple(y.shape) if isinstance(y, torch.Tensor) else tuple(tuple(x.shape) for x in y) + self.pretty_name = Path(self.model.yaml.get('yaml_file', self.file)).stem.replace('yolo', 'YOLO') + description = f'Ultralytics {self.pretty_name} model ' + f'trained on {Path(self.args.data).name}' \ + if self.args.data else '(untrained)' + self.metadata = { + 'description': description, + 'author': 'Ultralytics', + 'license': 'GPL-3.0 https://ultralytics.com/license', + 'version': __version__, + 'stride': int(max(model.stride)), + 'task': model.task, + 'batch': self.args.batch, + 'imgsz': self.imgsz, + 'names': model.names} # model metadata + + LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with input shape {tuple(im.shape)} BCHW and " + f'output shape(s) {self.output_shape} ({file_size(file):.1f} MB)') + + # Exports + f = [''] * len(fmts) # exported filenames + if jit: # TorchScript + f[0], _ = self._export_torchscript() + if engine: # TensorRT required before ONNX + f[1], _ = self._export_engine() + if onnx or xml: # OpenVINO requires ONNX + f[2], _ = self._export_onnx() + if xml: # OpenVINO + f[3], _ = self._export_openvino() + if coreml: # CoreML + f[4], _ = self._export_coreml() + if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats + self.args.int8 |= edgetpu + f[5], s_model = self._export_saved_model() + if pb or tfjs: # pb prerequisite to tfjs + f[6], _ = self._export_pb(s_model) + if tflite: + f[7], _ = self._export_tflite(s_model, nms=False, agnostic_nms=self.args.agnostic_nms) + if edgetpu: + f[8], _ = self._export_edgetpu(tflite_model=Path(f[5]) / f'{self.file.stem}_full_integer_quant.tflite') + if tfjs: + f[9], _ = self._export_tfjs() + if paddle: # PaddlePaddle + f[10], _ = self._export_paddle() + + # Finish + f = [str(x) for x in f if x] # filter out '' and None + if any(f): + f = str(Path(f[-1])) + square = self.imgsz[0] == self.imgsz[1] + s = '' if square else f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not " \ + f"work. Use export 'imgsz={max(self.imgsz)}' if val is required." + imgsz = self.imgsz[0] if square else str(self.imgsz)[1:-1].replace(' ', '') + data = f'data={self.args.data}' if model.task == 'segment' and format == 'pb' else '' + LOGGER.info( + f'\nExport complete ({time.time() - t:.1f}s)' + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f'\nPredict: yolo predict task={model.task} model={f} imgsz={imgsz} {data}' + f'\nValidate: yolo val task={model.task} model={f} imgsz={imgsz} data={self.args.data} {s}' + f'\nVisualize: https://netron.app') + + self.run_callbacks('on_export_end') + return f # return list of exported files/dirs + + @try_export + def _export_torchscript(self, prefix=colorstr('TorchScript:')): + # YOLOv8 TorchScript model export + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') + f = self.file.with_suffix('.torchscript') + + ts = torch.jit.trace(self.model, self.im, strict=False) + extra_files = {'config.txt': json.dumps(self.metadata)} # torch._C.ExtraFilesMap() + if self.args.optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + LOGGER.info(f'{prefix} optimizing for mobile...') + from torch.utils.mobile_optimizer import optimize_for_mobile + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) + return f, None + + @try_export + def _export_onnx(self, prefix=colorstr('ONNX:')): + # YOLOv8 ONNX export + requirements = ['onnx>=1.12.0'] + if self.args.simplify: + requirements += ['onnxsim>=0.4.17', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime'] + check_requirements(requirements) + import onnx # noqa + + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + f = str(self.file.with_suffix('.onnx')) + + output_names = ['output0', 'output1'] if isinstance(self.model, SegmentationModel) else ['output0'] + dynamic = self.args.dynamic + if dynamic: + dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + if isinstance(self.model, SegmentationModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + elif isinstance(self.model, DetectionModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + + torch.onnx.export( + self.model.cpu() if dynamic else self.model, # --dynamic only compatible with cpu + self.im.cpu() if dynamic else self.im, + f, + verbose=False, + opset_version=self.args.opset or get_latest_opset(), + do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False + input_names=['images'], + output_names=output_names, + dynamic_axes=dynamic or None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + # onnx.checker.check_model(model_onnx) # check onnx model + + # Simplify + if self.args.simplify: + try: + import onnxsim + + LOGGER.info(f'{prefix} simplifying with onnxsim {onnxsim.__version__}...') + # subprocess.run(f'onnxsim {f} {f}', shell=True) + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'Simplified ONNX model could not be validated' + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + + # Metadata + for k, v in self.metadata.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + + onnx.save(model_onnx, f) + return f, model_onnx + + @try_export + def _export_openvino(self, prefix=colorstr('OpenVINO:')): + # YOLOv8 OpenVINO export + check_requirements('openvino-dev>=2022.3') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.runtime as ov # noqa + from openvino.tools import mo # noqa + + LOGGER.info(f'\n{prefix} starting export with openvino {ov.__version__}...') + f = str(self.file).replace(self.file.suffix, f'_openvino_model{os.sep}') + f_onnx = self.file.with_suffix('.onnx') + f_ov = str(Path(f) / self.file.with_suffix('.xml').name) + + ov_model = mo.convert_model(f_onnx, + model_name=self.pretty_name, + framework='onnx', + compress_to_fp16=self.args.half) # export + ov.serialize(ov_model, f_ov) # save + yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml + return f, None + + @try_export + def _export_paddle(self, prefix=colorstr('PaddlePaddle:')): + # YOLOv8 Paddle export + check_requirements(('paddlepaddle', 'x2paddle')) + import x2paddle # noqa + from x2paddle.convert import pytorch2paddle # noqa + + LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') + f = str(self.file).replace(self.file.suffix, f'_paddle_model{os.sep}') + + pytorch2paddle(module=self.model, save_dir=f, jit_type='trace', input_examples=[self.im]) # export + yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml + return f, None + + @try_export + def _export_coreml(self, prefix=colorstr('CoreML:')): + # YOLOv8 CoreML export + check_requirements('coremltools>=6.0') + import coremltools as ct # noqa + + class iOSDetectModel(torch.nn.Module): + # Wrap an Ultralytics YOLO model for iOS export + def __init__(self, model, im): + super().__init__() + b, c, h, w = im.shape # batch, channel, height, width + self.model = model + self.nc = len(model.names) # number of classes + if w == h: + self.normalize = 1.0 / w # scalar + else: + self.normalize = torch.tensor([1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h]) # broadcast (slower, smaller) + + def forward(self, x): + xywh, cls = self.model(x)[0].transpose(0, 1).split((4, self.nc), 1) + return cls, xywh * self.normalize # confidence (3780, 80), coordinates (3780, 4) + + LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') + f = self.file.with_suffix('.mlmodel') + + bias = [0.0, 0.0, 0.0] + scale = 1 / 255 + classifier_config = None + if self.model.task == 'classify': + classifier_config = ct.ClassifierConfig(list(self.model.names.values())) if self.args.nms else None + model = self.model + elif self.model.task == 'detect': + model = iOSDetectModel(self.model, self.im) if self.args.nms else self.model + elif self.model.task == 'segment': + # TODO CoreML Segmentation model pipelining + model = self.model + + ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model + ct_model = ct.convert(ts, + inputs=[ct.ImageType('image', shape=self.im.shape, scale=scale, bias=bias)], + classifier_config=classifier_config) + bits, mode = (8, 'kmeans_lut') if self.args.int8 else (16, 'linear') if self.args.half else (32, None) + if bits < 32: + if 'kmeans' in mode: + check_requirements('scikit-learn') # scikit-learn package required for k-means quantization + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + if self.args.nms and self.model.task == 'detect': + ct_model = self._pipeline_coreml(ct_model) + + m = self.metadata # metadata dict + ct_model.short_description = m.pop('description') + ct_model.author = m.pop('author') + ct_model.license = m.pop('license') + ct_model.version = m.pop('version') + ct_model.user_defined_metadata.update({k: str(v) for k, v in m.items()}) + ct_model.save(str(f)) + return f, ct_model + + @try_export + def _export_engine(self, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv8 TensorRT export https://developer.nvidia.com/tensorrt + assert self.im.device.type != 'cpu', "export running on CPU but must be on GPU, i.e. use 'device=0'" + try: + import tensorrt as trt # noqa + except ImportError: + if LINUX: + check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + import tensorrt as trt # noqa + + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=8.0.0 + self.args.simplify = True + f_onnx, _ = self._export_onnx() + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert Path(f_onnx).exists(), f'failed to export ONNX file: {f_onnx}' + f = self.file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(f_onnx): + raise RuntimeError(f'failed to load ONNX file: {f_onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + for inp in inputs: + LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') + + if self.args.dynamic: + shape = self.im.shape + if shape[0] <= 1: + LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument') + profile = builder.create_optimization_profile() + for inp in inputs: + profile.set_shape(inp.name, (1, *shape[1:]), (max(1, shape[0] // 2), *shape[1:]), shape) + config.add_optimization_profile(profile) + + LOGGER.info( + f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and self.args.half else 32} engine as {f}') + if builder.platform_has_fast_fp16 and self.args.half: + config.set_flag(trt.BuilderFlag.FP16) + + # Write file + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + # Metadata + meta = json.dumps(self.metadata) + t.write(len(meta).to_bytes(4, byteorder='little', signed=True)) + t.write(meta.encode()) + # Model + t.write(engine.serialize()) + + return f, None + + @try_export + def _export_saved_model(self, prefix=colorstr('TensorFlow SavedModel:')): + + # YOLOv8 TensorFlow SavedModel export + try: + import tensorflow as tf # noqa + except ImportError: + cuda = torch.cuda.is_available() + check_requirements(f"tensorflow{'-macos' if MACOS else '-aarch64' if ARM64 else '' if cuda else '-cpu'}") + import tensorflow as tf # noqa + check_requirements(('onnx', 'onnx2tf>=1.7.7', 'sng4onnx>=1.0.1', 'onnxsim>=0.4.17', 'onnx_graphsurgeon>=0.3.26', + 'tflite_support', 'onnxruntime-gpu' if torch.cuda.is_available() else 'onnxruntime'), + cmds='--extra-index-url https://pypi.ngc.nvidia.com') + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = Path(str(self.file).replace(self.file.suffix, '_saved_model')) + if f.is_dir(): + import shutil + shutil.rmtree(f) # delete output folder + + # Export to ONNX + self.args.simplify = True + f_onnx, _ = self._export_onnx() + + # Export to TF + int8 = '-oiqt -qt per-tensor' if self.args.int8 else '' + cmd = f'onnx2tf -i {f_onnx} -o {f} -nuo --non_verbose {int8}' + LOGGER.info(f"\n{prefix} running '{cmd.strip()}'") + subprocess.run(cmd, shell=True) + yaml_save(f / 'metadata.yaml', self.metadata) # add metadata.yaml + + # Remove/rename TFLite models + if self.args.int8: + for file in f.rglob('*_dynamic_range_quant.tflite'): + file.rename(file.with_stem(file.stem.replace('_dynamic_range_quant', '_int8'))) + for file in f.rglob('*_integer_quant_with_int16_act.tflite'): + file.unlink() # delete extra fp16 activation TFLite files + + # Add TFLite metadata + for file in f.rglob('*.tflite'): + f.unlink() if 'quant_with_int16_act.tflite' in str(f) else self._add_tflite_metadata(file) + + # Load saved_model + keras_model = tf.saved_model.load(f, tags=None, options=None) + + return str(f), keras_model + + @try_export + def _export_pb(self, keras_model, prefix=colorstr('TensorFlow GraphDef:')): + # YOLOv8 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow + import tensorflow as tf # noqa + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 # noqa + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = self.file.with_suffix('.pb') + + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + return f, None + + @try_export + def _export_tflite(self, keras_model, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): + # YOLOv8 TensorFlow Lite export + import tensorflow as tf # noqa + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + saved_model = Path(str(self.file).replace(self.file.suffix, '_saved_model')) + if self.args.int8: + f = saved_model / f'{self.file.stem}_int8.tflite' # fp32 in/out + elif self.args.half: + f = saved_model / f'{self.file.stem}_float16.tflite' # fp32 in/out + else: + f = saved_model / f'{self.file.stem}_float32.tflite' + return str(f), None + + # # OLD TFLITE EXPORT CODE BELOW ------------------------------------------------------------------------------- + # batch_size, ch, *imgsz = list(self.im.shape) # BCHW + # f = str(self.file).replace(self.file.suffix, '-fp16.tflite') + # + # converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + # converter.target_spec.supported_types = [tf.float16] + # converter.optimizations = [tf.lite.Optimize.DEFAULT] + # if self.args.int8: + # + # def representative_dataset_gen(dataset, n_images=100): + # # Dataset generator for use with converter.representative_dataset, returns a generator of np arrays + # for n, (path, img, im0s, vid_cap, string) in enumerate(dataset): + # im = np.transpose(img, [1, 2, 0]) + # im = np.expand_dims(im, axis=0).astype(np.float32) + # im /= 255 + # yield [im] + # if n >= n_images: + # break + # + # dataset = LoadImages(check_det_dataset(self.args.data)['train'], imgsz=imgsz, auto=False) + # converter.representative_dataset = lambda: representative_dataset_gen(dataset, n_images=100) + # converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + # converter.target_spec.supported_types = [] + # converter.inference_input_type = tf.uint8 # or tf.int8 + # converter.inference_output_type = tf.uint8 # or tf.int8 + # converter.experimental_new_quantizer = True + # f = str(self.file).replace(self.file.suffix, '-int8.tflite') + # if nms or agnostic_nms: + # converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) + # + # tflite_model = converter.convert() + # open(f, 'wb').write(tflite_model) + # return f, None + + @try_export + def _export_edgetpu(self, tflite_model='', prefix=colorstr('Edge TPU:')): + # YOLOv8 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ + LOGGER.warning(f'{prefix} WARNING ⚠️ Edge TPU known bug https://github.com/ultralytics/ultralytics/issues/1185') + + cmd = 'edgetpu_compiler --version' + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert LINUX, f'export only supported on Linux. See {help_url}' + if subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + for c in ( + 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): + subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] + + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(tflite_model).replace('.tflite', '_edgetpu.tflite') # Edge TPU model + + cmd = f'edgetpu_compiler -s -d -k 10 --out_dir {Path(f).parent} {tflite_model}' + LOGGER.info(f"{prefix} running '{cmd}'") + subprocess.run(cmd.split(), check=True) + self._add_tflite_metadata(f) + return f, None + + @try_export + def _export_tfjs(self, prefix=colorstr('TensorFlow.js:')): + # YOLOv8 TensorFlow.js export + check_requirements('tensorflowjs') + import tensorflow as tf + import tensorflowjs as tfjs # noqa + + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(self.file).replace(self.file.suffix, '_web_model') # js dir + f_pb = self.file.with_suffix('.pb') # *.pb path + + gd = tf.Graph().as_graph_def() # TF GraphDef + with open(f_pb, 'rb') as file: + gd.ParseFromString(file.read()) + outputs = ','.join(gd_outputs(gd)) + LOGGER.info(f'\n{prefix} output node names: {outputs}') + + cmd = f'tensorflowjs_converter --input_format=tf_frozen_model --output_node_names={outputs} {f_pb} {f}' + subprocess.run(cmd.split(), check=True) + + # f_json = Path(f) / 'model.json' # *.json path + # with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + # subst = re.sub( + # r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + # r'"Identity.?.?": {"name": "Identity.?.?"}, ' + # r'"Identity.?.?": {"name": "Identity.?.?"}, ' + # r'"Identity.?.?": {"name": "Identity.?.?"}}}', + # r'{"outputs": {"Identity": {"name": "Identity"}, ' + # r'"Identity_1": {"name": "Identity_1"}, ' + # r'"Identity_2": {"name": "Identity_2"}, ' + # r'"Identity_3": {"name": "Identity_3"}}}', + # f_json.read_text(), + # ) + # j.write(subst) + yaml_save(Path(f) / 'metadata.yaml', self.metadata) # add metadata.yaml + return f, None + + def _add_tflite_metadata(self, file): + # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata + from tflite_support import flatbuffers # noqa + from tflite_support import metadata as _metadata # noqa + from tflite_support import metadata_schema_py_generated as _metadata_fb # noqa + + # Create model info + model_meta = _metadata_fb.ModelMetadataT() + model_meta.name = self.metadata['description'] + model_meta.version = self.metadata['version'] + model_meta.author = self.metadata['author'] + model_meta.license = self.metadata['license'] + + # Label file + tmp_file = Path(file).parent / 'temp_meta.txt' + with open(tmp_file, 'w') as f: + f.write(str(self.metadata)) + + label_file = _metadata_fb.AssociatedFileT() + label_file.name = tmp_file.name + label_file.type = _metadata_fb.AssociatedFileType.TENSOR_AXIS_LABELS + + # Create input info + input_meta = _metadata_fb.TensorMetadataT() + input_meta.name = 'image' + input_meta.description = 'Input image to be detected.' + input_meta.content = _metadata_fb.ContentT() + input_meta.content.contentProperties = _metadata_fb.ImagePropertiesT() + input_meta.content.contentProperties.colorSpace = _metadata_fb.ColorSpaceType.RGB + input_meta.content.contentPropertiesType = _metadata_fb.ContentProperties.ImageProperties + + # Create output info + output1 = _metadata_fb.TensorMetadataT() + output1.name = 'output' + output1.description = 'Coordinates of detected objects, class labels, and confidence score' + output1.associatedFiles = [label_file] + if self.model.task == 'segment': + output2 = _metadata_fb.TensorMetadataT() + output2.name = 'output' + output2.description = 'Mask protos' + output2.associatedFiles = [label_file] + + # Create subgraph info + subgraph = _metadata_fb.SubGraphMetadataT() + subgraph.inputTensorMetadata = [input_meta] + subgraph.outputTensorMetadata = [output1, output2] if self.model.task == 'segment' else [output1] + model_meta.subgraphMetadata = [subgraph] + + b = flatbuffers.Builder(0) + b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) + metadata_buf = b.Output() + + populator = _metadata.MetadataPopulator.with_model_file(str(file)) + populator.load_metadata_buffer(metadata_buf) + populator.load_associated_files([str(tmp_file)]) + populator.populate() + tmp_file.unlink() + + def _pipeline_coreml(self, model, prefix=colorstr('CoreML Pipeline:')): + # YOLOv8 CoreML pipeline + import coremltools as ct # noqa + + LOGGER.info(f'{prefix} starting pipeline with coremltools {ct.__version__}...') + batch_size, ch, h, w = list(self.im.shape) # BCHW + + # Output shapes + spec = model.get_spec() + out0, out1 = iter(spec.description.output) + if MACOS: + from PIL import Image + img = Image.new('RGB', (w, h)) # img(192 width, 320 height) + # img = torch.zeros((*opt.img_size, 3)).numpy() # img size(320,192,3) iDetection + out = model.predict({'image': img}) + out0_shape = out[out0.name].shape + out1_shape = out[out1.name].shape + else: # linux and windows can not run model.predict(), get sizes from pytorch output y + out0_shape = self.output_shape[2], self.output_shape[1] - 4 # (3780, 80) + out1_shape = self.output_shape[2], 4 # (3780, 4) + + # Checks + names = self.metadata['names'] + nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height + na, nc = out0_shape + # na, nc = out0.type.multiArrayType.shape # number anchors, classes + assert len(names) == nc, f'{len(names)} names found for nc={nc}' # check + + # Define output shapes (missing) + out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80) + out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4) + # spec.neuralNetwork.preprocessing[0].featureName = '0' + + # Flexible input shapes + # from coremltools.models.neural_network import flexible_shape_utils + # s = [] # shapes + # s.append(flexible_shape_utils.NeuralNetworkImageSize(320, 192)) + # s.append(flexible_shape_utils.NeuralNetworkImageSize(640, 384)) # (height, width) + # flexible_shape_utils.add_enumerated_image_sizes(spec, feature_name='image', sizes=s) + # r = flexible_shape_utils.NeuralNetworkImageSizeRange() # shape ranges + # r.add_height_range((192, 640)) + # r.add_width_range((192, 640)) + # flexible_shape_utils.update_image_size_range(spec, feature_name='image', size_range=r) + + # Print + # print(spec.description) + + # Model from spec + model = ct.models.MLModel(spec) + + # 3. Create NMS protobuf + nms_spec = ct.proto.Model_pb2.Model() + nms_spec.specificationVersion = 5 + for i in range(2): + decoder_output = model._spec.description.output[i].SerializeToString() + nms_spec.description.input.add() + nms_spec.description.input[i].ParseFromString(decoder_output) + nms_spec.description.output.add() + nms_spec.description.output[i].ParseFromString(decoder_output) + + nms_spec.description.output[0].name = 'confidence' + nms_spec.description.output[1].name = 'coordinates' + + output_sizes = [nc, 4] + for i in range(2): + ma_type = nms_spec.description.output[i].type.multiArrayType + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[0].lowerBound = 0 + ma_type.shapeRange.sizeRanges[0].upperBound = -1 + ma_type.shapeRange.sizeRanges.add() + ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i] + ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i] + del ma_type.shape[:] + + nms = nms_spec.nonMaximumSuppression + nms.confidenceInputFeatureName = out0.name # 1x507x80 + nms.coordinatesInputFeatureName = out1.name # 1x507x4 + nms.confidenceOutputFeatureName = 'confidence' + nms.coordinatesOutputFeatureName = 'coordinates' + nms.iouThresholdInputFeatureName = 'iouThreshold' + nms.confidenceThresholdInputFeatureName = 'confidenceThreshold' + nms.iouThreshold = 0.45 + nms.confidenceThreshold = 0.25 + nms.pickTop.perClass = True + nms.stringClassLabels.vector.extend(names.values()) + nms_model = ct.models.MLModel(nms_spec) + + # 4. Pipeline models together + pipeline = ct.models.pipeline.Pipeline(input_features=[('image', ct.models.datatypes.Array(3, ny, nx)), + ('iouThreshold', ct.models.datatypes.Double()), + ('confidenceThreshold', ct.models.datatypes.Double())], + output_features=['confidence', 'coordinates']) + pipeline.add_model(model) + pipeline.add_model(nms_model) + + # Correct datatypes + pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString()) + pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString()) + pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString()) + + # Update metadata + pipeline.spec.specificationVersion = 5 + pipeline.spec.description.metadata.userDefined.update({ + 'IoU threshold': str(nms.iouThreshold), + 'Confidence threshold': str(nms.confidenceThreshold)}) + + # Save the model + model = ct.models.MLModel(pipeline.spec) + model.input_description['image'] = 'Input image' + model.input_description['iouThreshold'] = f'(optional) IOU threshold override (default: {nms.iouThreshold})' + model.input_description['confidenceThreshold'] = \ + f'(optional) Confidence threshold override (default: {nms.confidenceThreshold})' + model.output_description['confidence'] = 'Boxes × Class confidence (see user-defined metadata "classes")' + model.output_description['coordinates'] = 'Boxes × [x, y, width, height] (relative to image size)' + LOGGER.info(f'{prefix} pipeline success') + return model + + def run_callbacks(self, event: str): + for callback in self.callbacks.get(event, []): + callback(self) + + +def export(cfg=DEFAULT_CFG): + cfg.model = cfg.model or 'yolov8n.yaml' + cfg.format = cfg.format or 'torchscript' + + from ultralytics import YOLO + model = YOLO(cfg.model) + model.export(**vars(cfg)) + + +if __name__ == '__main__': + """ + CLI: + yolo mode=export model=yolov8n.yaml format=onnx + """ + export() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/model.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/model.py new file mode 100644 index 0000000..57d8e0a --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/model.py @@ -0,0 +1,382 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import sys +from pathlib import Path +from typing import Union + +from ultralytics import yolo # noqa +from ultralytics.nn.tasks import (ClassificationModel, DetectionModel, SegmentationModel, attempt_load_one_weight, + guess_model_task, nn, yaml_model_load) +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.engine.exporter import Exporter +from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, ROOT, callbacks, + is_git_dir, yaml_load) +from ultralytics.yolo.utils.checks import check_file, check_imgsz, check_pip_update_available, check_yaml +from ultralytics.yolo.utils.downloads import GITHUB_ASSET_STEMS +from ultralytics.yolo.utils.torch_utils import smart_inference_mode + +# Map head to model, trainer, validator, and predictor classes +TASK_MAP = { + 'classify': [ + ClassificationModel, yolo.v8.classify.ClassificationTrainer, yolo.v8.classify.ClassificationValidator, + yolo.v8.classify.ClassificationPredictor], + 'detect': [ + DetectionModel, yolo.v8.detect.DetectionTrainer, yolo.v8.detect.DetectionValidator, + yolo.v8.detect.DetectionPredictor], + 'segment': [ + SegmentationModel, yolo.v8.segment.SegmentationTrainer, yolo.v8.segment.SegmentationValidator, + yolo.v8.segment.SegmentationPredictor]} + + +class YOLO: + """ + YOLO (You Only Look Once) object detection model. + + Args: + model (str, Path): Path to the model file to load or create. + + Attributes: + predictor (Any): The predictor object. + model (Any): The model object. + trainer (Any): The trainer object. + task (str): The type of model task. + ckpt (Any): The checkpoint object if the model loaded from *.pt file. + cfg (str): The model configuration if loaded from *.yaml file. + ckpt_path (str): The checkpoint file path. + overrides (dict): Overrides for the trainer object. + metrics (Any): The data for metrics. + + Methods: + __call__(source=None, stream=False, **kwargs): + Alias for the predict method. + _new(cfg:str, verbose:bool=True) -> None: + Initializes a new model and infers the task type from the model definitions. + _load(weights:str, task:str='') -> None: + Initializes a new model and infers the task type from the model head. + _check_is_pytorch_model() -> None: + Raises TypeError if the model is not a PyTorch model. + reset() -> None: + Resets the model modules. + info(verbose:bool=False) -> None: + Logs the model info. + fuse() -> None: + Fuses the model for faster inference. + predict(source=None, stream=False, **kwargs) -> List[ultralytics.yolo.engine.results.Results]: + Performs prediction using the YOLO model. + + Returns: + list(ultralytics.yolo.engine.results.Results): The prediction results. + """ + + def __init__(self, model: Union[str, Path] = 'yolov8n.pt', task=None, session=None) -> None: + """ + Initializes the YOLO model. + + Args: + model (str, Path): model to load or create + """ + self._reset_callbacks() + self.predictor = None # reuse predictor + self.model = None # model object + self.trainer = None # trainer object + self.task = None # task type + self.ckpt = None # if loaded from *.pt + self.cfg = None # if loaded from *.yaml + self.ckpt_path = None + self.overrides = {} # overrides for trainer object + self.metrics = None # validation/training metrics + self.session = session # HUB session + + # Load or create new YOLO model + model = str(model).strip() # strip spaces + suffix = Path(model).suffix + if not suffix and Path(model).stem in GITHUB_ASSET_STEMS: + model, suffix = Path(model).with_suffix('.pt'), '.pt' # add suffix, i.e. yolov8n -> yolov8n.pt + if suffix == '.yaml': + self._new(model, task) + else: + self._load(model, task) + + def __call__(self, source=None, stream=False, **kwargs): + return self.predict(source, stream, **kwargs) + + def __getattr__(self, attr): + name = self.__class__.__name__ + raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") + + def _new(self, cfg: str, task=None, verbose=True): + """ + Initializes a new model and infers the task type from the model definitions. + + Args: + cfg (str): model configuration file + task (str) or (None): model task + verbose (bool): display model info on load + """ + cfg_dict = yaml_model_load(cfg) + self.cfg = cfg + self.task = task or guess_model_task(cfg_dict) + self.model = TASK_MAP[self.task][0](cfg_dict, verbose=verbose and RANK == -1) # build model + self.overrides['model'] = self.cfg + + # Below added to allow export from yamls + args = {**DEFAULT_CFG_DICT, **self.overrides} # combine model and default args, preferring model args + self.model.args = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # attach args to model + self.model.task = self.task + + def _load(self, weights: str, task=None): + """ + Initializes a new model and infers the task type from the model head. + + Args: + weights (str): model checkpoint to be loaded + task (str) or (None): model task + """ + suffix = Path(weights).suffix + if suffix == '.pt': + self.model, self.ckpt = attempt_load_one_weight(weights) + self.task = self.model.args['task'] + self.overrides = self.model.args = self._reset_ckpt_args(self.model.args) + self.ckpt_path = self.model.pt_path + else: + weights = check_file(weights) + self.model, self.ckpt = weights, None + self.task = task or guess_model_task(weights) + self.ckpt_path = weights + self.overrides['model'] = weights + self.overrides['task'] = self.task + + def _check_is_pytorch_model(self): + """ + Raises TypeError is model is not a PyTorch model + """ + if not isinstance(self.model, nn.Module): + raise TypeError(f"model='{self.model}' must be a *.pt PyTorch model, but is a different type. " + f'PyTorch models can be used to train, val, predict and export, i.e. ' + f"'yolo export model=yolov8n.pt', but exported formats like ONNX, TensorRT etc. only " + f"support 'predict' and 'val' modes, i.e. 'yolo predict model=yolov8n.onnx'.") + + @smart_inference_mode() + def reset_weights(self): + """ + Resets the model modules parameters to randomly initialized values, losing all training information. + """ + self._check_is_pytorch_model() + for m in self.model.modules(): + if hasattr(m, 'reset_parameters'): + m.reset_parameters() + for p in self.model.parameters(): + p.requires_grad = True + return self + + @smart_inference_mode() + def load(self, weights='yolov8n.pt'): + """ + Transfers parameters with matching names and shapes from 'weights' to model. + """ + self._check_is_pytorch_model() + if isinstance(weights, (str, Path)): + weights, self.ckpt = attempt_load_one_weight(weights) + self.model.load(weights) + return self + + def info(self, verbose=False): + """ + Logs model info. + + Args: + verbose (bool): Controls verbosity. + """ + self._check_is_pytorch_model() + self.model.info(verbose=verbose) + + def fuse(self): + self._check_is_pytorch_model() + self.model.fuse() + + @smart_inference_mode() + def predict(self, source=None, stream=False, **kwargs): + """ + Perform prediction using the YOLO model. + + Args: + source (str | int | PIL | np.ndarray): The source of the image to make predictions on. + Accepts all source types accepted by the YOLO model. + stream (bool): Whether to stream the predictions or not. Defaults to False. + **kwargs : Additional keyword arguments passed to the predictor. + Check the 'configuration' section in the documentation for all available options. + + Returns: + (List[ultralytics.yolo.engine.results.Results]): The prediction results. + """ + if source is None: + source = ROOT / 'assets' if is_git_dir() else 'https://ultralytics.com/images/bus.jpg' + LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using 'source={source}'.") + is_cli = (sys.argv[0].endswith('yolo') or sys.argv[0].endswith('ultralytics')) and \ + ('predict' in sys.argv or 'mode=predict' in sys.argv) + + overrides = self.overrides.copy() + overrides['conf'] = 0.25 + overrides.update(kwargs) # prefer kwargs + overrides['mode'] = kwargs.get('mode', 'predict') + assert overrides['mode'] in ['track', 'predict'] + overrides['save'] = kwargs.get('save', False) # not save files by default + if not self.predictor: + self.task = overrides.get('task') or self.task + self.predictor = TASK_MAP[self.task][3](overrides=overrides) + self.predictor.setup_model(model=self.model, verbose=is_cli) + else: # only update args if predictor is already setup + self.predictor.args = get_cfg(self.predictor.args, overrides) + return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream) + + def track(self, source=None, stream=False, **kwargs): + from ultralytics.tracker import register_tracker + register_tracker(self) + # ByteTrack-based method needs low confidence predictions as input + conf = kwargs.get('conf') or 0.1 + kwargs['conf'] = conf + kwargs['mode'] = 'track' + return self.predict(source=source, stream=stream, **kwargs) + + @smart_inference_mode() + def val(self, data=None, **kwargs): + """ + Validate a model on a given dataset . + + Args: + data (str): The dataset to validate on. Accepts all formats accepted by yolo + **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs + """ + overrides = self.overrides.copy() + overrides['rect'] = True # rect batches as default + overrides.update(kwargs) + overrides['mode'] = 'val' + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.data = data or args.data + if 'task' in overrides: + self.task = args.task + else: + args.task = self.task + if args.imgsz == DEFAULT_CFG.imgsz and not isinstance(self.model, (str, Path)): + args.imgsz = self.model.args['imgsz'] # use trained imgsz unless custom value is passed + args.imgsz = check_imgsz(args.imgsz, max_dim=1) + + validator = TASK_MAP[self.task][2](args=args) + validator(model=self.model) + self.metrics = validator.metrics + + return validator.metrics + + @smart_inference_mode() + def benchmark(self, **kwargs): + """ + Benchmark a model on all export formats. + + Args: + **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs + """ + self._check_is_pytorch_model() + from ultralytics.yolo.utils.benchmarks import benchmark + overrides = self.model.args.copy() + overrides.update(kwargs) + overrides = {**DEFAULT_CFG_DICT, **overrides} # fill in missing overrides keys with defaults + return benchmark(model=self, imgsz=overrides['imgsz'], half=overrides['half'], device=overrides['device']) + + def export(self, **kwargs): + """ + Export model. + + Args: + **kwargs : Any other args accepted by the predictors. To see all args check 'configuration' section in docs + """ + self._check_is_pytorch_model() + overrides = self.overrides.copy() + overrides.update(kwargs) + args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + args.task = self.task + if args.imgsz == DEFAULT_CFG.imgsz: + args.imgsz = self.model.args['imgsz'] # use trained imgsz unless custom value is passed + if args.batch == DEFAULT_CFG.batch: + args.batch = 1 # default to 1 if not modified + return Exporter(overrides=args)(model=self.model) + + def train(self, **kwargs): + """ + Trains the model on a given dataset. + + Args: + **kwargs (Any): Any number of arguments representing the training configuration. + """ + self._check_is_pytorch_model() + check_pip_update_available() + overrides = self.overrides.copy() + overrides.update(kwargs) + if kwargs.get('cfg'): + LOGGER.info(f"cfg file passed. Overriding default params with {kwargs['cfg']}.") + overrides = yaml_load(check_yaml(kwargs['cfg'])) + overrides['mode'] = 'train' + if not overrides.get('data'): + raise AttributeError("Dataset required but missing, i.e. pass 'data=coco128.yaml'") + if overrides.get('resume'): + overrides['resume'] = self.ckpt_path + + self.task = overrides.get('task') or self.task + self.trainer = TASK_MAP[self.task][1](overrides=overrides) + if not overrides.get('resume'): # manually set model only if not resuming + self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml) + self.model = self.trainer.model + self.trainer.hub_session = self.session # attach optional HUB session + self.trainer.train() + # update model and cfg after training + if RANK in (-1, 0): + self.model, _ = attempt_load_one_weight(str(self.trainer.best)) + self.overrides = self.model.args + self.metrics = getattr(self.trainer.validator, 'metrics', None) # TODO: no metrics returned by DDP + + def to(self, device): + """ + Sends the model to the given device. + + Args: + device (str): device + """ + self._check_is_pytorch_model() + self.model.to(device) + + @property + def names(self): + """ + Returns class names of the loaded model. + """ + return self.model.names if hasattr(self.model, 'names') else None + + @property + def device(self): + """ + Returns device if PyTorch model + """ + return next(self.model.parameters()).device if isinstance(self.model, nn.Module) else None + + @property + def transforms(self): + """ + Returns transform of the loaded model. + """ + return self.model.transforms if hasattr(self.model, 'transforms') else None + + @staticmethod + def add_callback(event: str, func): + """ + Add callback + """ + callbacks.default_callbacks[event].append(func) + + @staticmethod + def _reset_ckpt_args(args): + include = {'imgsz', 'data', 'task', 'single_cls'} # only remember these arguments when loading a PyTorch model + return {k: v for k, v in args.items() if k in include} + + @staticmethod + def _reset_callbacks(): + for event in callbacks.default_callbacks.keys(): + callbacks.default_callbacks[event] = [callbacks.default_callbacks[event][0]] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/predictor.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/predictor.py new file mode 100644 index 0000000..ee0bab0 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/predictor.py @@ -0,0 +1,284 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc. + +Usage - sources: + $ yolo mode=predict model=yolov8n.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ yolo mode=predict model=yolov8n.pt # PyTorch + yolov8n.torchscript # TorchScript + yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True + yolov8n_openvino_model # OpenVINO + yolov8n.engine # TensorRT + yolov8n.mlmodel # CoreML (macOS-only) + yolov8n_saved_model # TensorFlow SavedModel + yolov8n.pb # TensorFlow GraphDef + yolov8n.tflite # TensorFlow Lite + yolov8n_edgetpu.tflite # TensorFlow Edge TPU + yolov8n_paddle_model # PaddlePaddle +""" +import platform +from collections import defaultdict +from pathlib import Path + +import cv2 + +from ultralytics.nn.autobackend import AutoBackend +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.data import load_inference_source +from ultralytics.yolo.data.augment import classify_transforms +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, SETTINGS, callbacks, colorstr, ops +from ultralytics.yolo.utils.checks import check_imgsz, check_imshow +from ultralytics.yolo.utils.files import increment_path +from ultralytics.yolo.utils.torch_utils import select_device, smart_inference_mode + +STREAM_WARNING = """ + WARNING ⚠️ stream/video/webcam/dir predict source will accumulate results in RAM unless `stream=True` is passed, + causing potential out-of-memory errors for large sources or long-running streams/videos. + + Usage: + results = model(source=..., stream=True) # generator of Results objects + for r in results: + boxes = r.boxes # Boxes object for bbox outputs + masks = r.masks # Masks object for segment masks outputs + probs = r.probs # Class probabilities for classification outputs +""" + + +class BasePredictor: + """ + BasePredictor + + A base class for creating predictors. + + Attributes: + args (SimpleNamespace): Configuration for the predictor. + save_dir (Path): Directory to save results. + done_setup (bool): Whether the predictor has finished setup. + model (nn.Module): Model used for prediction. + data (dict): Data configuration. + device (torch.device): Device used for prediction. + dataset (Dataset): Dataset used for prediction. + vid_path (str): Path to video file. + vid_writer (cv2.VideoWriter): Video writer for saving video output. + annotator (Annotator): Annotator used for prediction. + data_path (str): Path to data. + """ + + def __init__(self, cfg=DEFAULT_CFG, overrides=None): + """ + Initializes the BasePredictor class. + + Args: + cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG. + overrides (dict, optional): Configuration overrides. Defaults to None. + """ + self.args = get_cfg(cfg, overrides) + project = self.args.project or Path(SETTINGS['runs_dir']) / self.args.task + name = self.args.name or f'{self.args.mode}' + self.save_dir = increment_path(Path(project) / name, exist_ok=self.args.exist_ok) + if self.args.conf is None: + self.args.conf = 0.25 # default conf=0.25 + self.done_warmup = False + if self.args.show: + self.args.show = check_imshow(warn=True) + + # Usable if setup is done + self.model = None + self.data = self.args.data # data_dict + self.imgsz = None + self.device = None + self.dataset = None + self.vid_path, self.vid_writer = None, None + self.annotator = None + self.data_path = None + self.source_type = None + self.batch = None + self.callbacks = defaultdict(list, callbacks.default_callbacks) # add callbacks + callbacks.add_integration_callbacks(self) + + def preprocess(self, img): + pass + + def get_annotator(self, img): + raise NotImplementedError('get_annotator function needs to be implemented') + + def write_results(self, results, batch, print_string): + raise NotImplementedError('print_results function needs to be implemented') + + def postprocess(self, preds, img, orig_img): + return preds + + def __call__(self, source=None, model=None, stream=False): + self.stream = stream + if stream: + return self.stream_inference(source, model) + else: + return list(self.stream_inference(source, model)) # merge list of Result into one + + def predict_cli(self, source=None, model=None): + # Method used for CLI prediction. It uses always generator as outputs as not required by CLI mode + gen = self.stream_inference(source, model) + for _ in gen: # running CLI inference without accumulating any outputs (do not modify) + pass + + def setup_source(self, source): + self.imgsz = check_imgsz(self.args.imgsz, stride=self.model.stride, min_dim=2) # check image size + if self.args.task == 'classify': + transforms = getattr(self.model.model, 'transforms', classify_transforms(self.imgsz[0])) + else: # predict, segment + transforms = None + self.dataset = load_inference_source(source=source, + transforms=transforms, + imgsz=self.imgsz, + vid_stride=self.args.vid_stride, + stride=self.model.stride, + auto=self.model.pt) + self.source_type = self.dataset.source_type + if not getattr(self, 'stream', True) and (self.dataset.mode == 'stream' or # streams + len(self.dataset) > 1000 or # images + any(getattr(self.dataset, 'video_flag', [False]))): # videos + LOGGER.warning(STREAM_WARNING) + self.vid_path, self.vid_writer = [None] * self.dataset.bs, [None] * self.dataset.bs + + @smart_inference_mode() + def stream_inference(self, source=None, model=None): + if self.args.verbose: + LOGGER.info('') + + # setup model + if not self.model: + self.setup_model(model) + # setup source every time predict is called + self.setup_source(source if source is not None else self.args.source) + + # check if save_dir/ label file exists + if self.args.save or self.args.save_txt: + (self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) + # warmup model + if not self.done_warmup: + self.model.warmup(imgsz=(1 if self.model.pt or self.model.triton else self.dataset.bs, 3, *self.imgsz)) + self.done_warmup = True + + self.seen, self.windows, self.dt, self.batch = 0, [], (ops.Profile(), ops.Profile(), ops.Profile()), None + self.run_callbacks('on_predict_start') + for batch in self.dataset: + self.run_callbacks('on_predict_batch_start') + self.batch = batch + path, im, im0s, vid_cap, s = batch + visualize = increment_path(self.save_dir / Path(path).stem, mkdir=True) if self.args.visualize else False + + # preprocess + with self.dt[0]: + im = self.preprocess(im) + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # inference + with self.dt[1]: + preds = self.model(im, augment=self.args.augment, visualize=visualize) + + # postprocess + with self.dt[2]: + self.results = self.postprocess(preds, im, im0s) + self.run_callbacks('on_predict_postprocess_end') + + # visualize, save, write results + n = len(im) + for i in range(n): + self.results[i].speed = { + 'preprocess': self.dt[0].dt * 1E3 / n, + 'inference': self.dt[1].dt * 1E3 / n, + 'postprocess': self.dt[2].dt * 1E3 / n} + if self.source_type.tensor: # skip write, show and plot operations if input is raw tensor + continue + p, im0 = (path[i], im0s[i].copy()) if self.source_type.webcam or self.source_type.from_img \ + else (path, im0s.copy()) + p = Path(p) + + if self.args.verbose or self.args.save or self.args.save_txt or self.args.show: + s += self.write_results(i, self.results, (p, im, im0)) + + if self.args.show: + self.show(p) + + if self.args.save: + self.save_preds(vid_cap, i, str(self.save_dir / p.name)) + self.run_callbacks('on_predict_batch_end') + yield from self.results + + # Print time (inference-only) + if self.args.verbose: + LOGGER.info(f'{s}{self.dt[1].dt * 1E3:.1f}ms') + + # Release assets + if isinstance(self.vid_writer[-1], cv2.VideoWriter): + self.vid_writer[-1].release() # release final video writer + + # Print results + if self.args.verbose and self.seen: + t = tuple(x.t / self.seen * 1E3 for x in self.dt) # speeds per image + LOGGER.info(f'Speed: %.1fms preprocess, %.1fms inference, %.1fms postprocess per image at shape ' + f'{(1, 3, *self.imgsz)}' % t) + if self.args.save or self.args.save_txt or self.args.save_crop: + nl = len(list(self.save_dir.glob('labels/*.txt'))) # number of labels + s = f"\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}" if self.args.save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}{s}") + + self.run_callbacks('on_predict_end') + + def setup_model(self, model, verbose=True): + device = select_device(self.args.device, verbose=verbose) + model = model or self.args.model + self.args.half &= device.type != 'cpu' # half precision only supported on CUDA + self.model = AutoBackend(model, + device=device, + dnn=self.args.dnn, + data=self.args.data, + fp16=self.args.half, + verbose=verbose) + self.device = device + self.model.eval() + + def show(self, p): + im0 = self.annotator.result() + if platform.system() == 'Linux' and p not in self.windows: + self.windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + cv2.waitKey(500 if self.batch[4].startswith('image') else 1) # 1 millisecond + + def save_preds(self, vid_cap, idx, save_path): + im0 = self.annotator.result() + # save imgs + if self.dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if self.vid_path[idx] != save_path: # new video + self.vid_path[idx] = save_path + if isinstance(self.vid_writer[idx], cv2.VideoWriter): + self.vid_writer[idx].release() # release previous video writer + if vid_cap: # video + fps = int(vid_cap.get(cv2.CAP_PROP_FPS)) # integer required, floats produce error in MP4 codec + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + self.vid_writer[idx] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + self.vid_writer[idx].write(im0) + + def run_callbacks(self, event: str): + for callback in self.callbacks.get(event, []): + callback(self) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/results.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/results.py new file mode 100644 index 0000000..8f4357f --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/results.py @@ -0,0 +1,306 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Ultralytics Results, Boxes and Masks classes for handling inference results + +Usage: See https://docs.ultralytics.com/modes/predict/ +""" + +from copy import deepcopy +from functools import lru_cache + +import numpy as np +import torch +import torchvision.transforms.functional as F + +from ultralytics.yolo.utils import LOGGER, SimpleClass, ops +from ultralytics.yolo.utils.plotting import Annotator, colors + + +class Results(SimpleClass): + """ + A class for storing and manipulating inference results. + + Args: + orig_img (numpy.ndarray): The original image as a numpy array. + path (str): The path to the image file. + names (List[str]): A list of class names. + boxes (List[List[float]], optional): A list of bounding box coordinates for each detection. + masks (numpy.ndarray, optional): A 3D numpy array of detection masks, where each mask is a binary image. + probs (numpy.ndarray, optional): A 2D numpy array of detection probabilities for each class. + + Attributes: + orig_img (numpy.ndarray): The original image as a numpy array. + orig_shape (tuple): The original image shape in (height, width) format. + boxes (Boxes, optional): A Boxes object containing the detection bounding boxes. + masks (Masks, optional): A Masks object containing the detection masks. + probs (numpy.ndarray, optional): A 2D numpy array of detection probabilities for each class. + names (List[str]): A list of class names. + path (str): The path to the image file. + _keys (tuple): A tuple of attribute names for non-empty attributes. + """ + + def __init__(self, orig_img, path, names, boxes=None, masks=None, probs=None) -> None: + self.orig_img = orig_img + self.orig_shape = orig_img.shape[:2] + self.boxes = Boxes(boxes, self.orig_shape) if boxes is not None else None # native size boxes + self.masks = Masks(masks, self.orig_shape) if masks is not None else None # native size or imgsz masks + self.probs = probs if probs is not None else None + self.names = names + self.path = path + self._keys = ('boxes', 'masks', 'probs') + + def pandas(self): + pass + # TODO masks.pandas + boxes.pandas + cls.pandas + + def __getitem__(self, idx): + r = Results(orig_img=self.orig_img, path=self.path, names=self.names) + for k in self.keys: + setattr(r, k, getattr(self, k)[idx]) + return r + + def update(self, boxes=None, masks=None, probs=None): + if boxes is not None: + self.boxes = Boxes(boxes, self.orig_shape) + if masks is not None: + self.masks = Masks(masks, self.orig_shape) + if boxes is not None: + self.probs = probs + + def cpu(self): + r = Results(orig_img=self.orig_img, path=self.path, names=self.names) + for k in self.keys: + setattr(r, k, getattr(self, k).cpu()) + return r + + def numpy(self): + r = Results(orig_img=self.orig_img, path=self.path, names=self.names) + for k in self.keys: + setattr(r, k, getattr(self, k).numpy()) + return r + + def cuda(self): + r = Results(orig_img=self.orig_img, path=self.path, names=self.names) + for k in self.keys: + setattr(r, k, getattr(self, k).cuda()) + return r + + def to(self, *args, **kwargs): + r = Results(orig_img=self.orig_img, path=self.path, names=self.names) + for k in self.keys: + setattr(r, k, getattr(self, k).to(*args, **kwargs)) + return r + + def __len__(self): + for k in self.keys: + return len(getattr(self, k)) + + @property + def keys(self): + return [k for k in self._keys if getattr(self, k) is not None] + + def plot(self, show_conf=True, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + """ + Plots the detection results on an input RGB image. Accepts a numpy array (cv2) or a PIL Image. + + Args: + show_conf (bool): Whether to show the detection confidence score. + line_width (float, optional): The line width of the bounding boxes. If None, it is scaled to the image size. + font_size (float, optional): The font size of the text. If None, it is scaled to the image size. + font (str): The font to use for the text. + pil (bool): Whether to return the image as a PIL Image. + example (str): An example string to display. Useful for indicating the expected format of the output. + + Returns: + (None) or (PIL.Image): If `pil` is True, a PIL Image is returned. Otherwise, nothing is returned. + """ + annotator = Annotator(deepcopy(self.orig_img), line_width, font_size, font, pil, example) + boxes = self.boxes + masks = self.masks + probs = self.probs + names = self.names + hide_labels, hide_conf = False, not show_conf + if boxes is not None: + for d in reversed(boxes): + c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item()) + name = ('' if id is None else f'id:{id} ') + names[c] + label = None if hide_labels else (name if hide_conf else f'{name} {conf:.2f}') + annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True)) + + if masks is not None: + im = torch.as_tensor(annotator.im, dtype=torch.float16, device=masks.data.device).permute(2, 0, 1).flip(0) + im = F.resize(im.contiguous(), masks.data.shape[1:]) / 255 + annotator.masks(masks.data, colors=[colors(x, True) for x in boxes.cls], im_gpu=im) + + if probs is not None: + n5 = min(len(names), 5) + top5i = probs.argsort(0, descending=True)[:n5].tolist() # top 5 indices + text = f"{', '.join(f'{names[j] if names else j} {probs[j]:.2f}' for j in top5i)}, " + annotator.text((32, 32), text, txt_color=(255, 255, 255)) # TODO: allow setting colors + + return np.asarray(annotator.im) if annotator.pil else annotator.im + + +class Boxes(SimpleClass): + """ + A class for storing and manipulating detection boxes. + + Args: + boxes (torch.Tensor) or (numpy.ndarray): A tensor or numpy array containing the detection boxes, + with shape (num_boxes, 6). The last two columns should contain confidence and class values. + orig_shape (tuple): Original image size, in the format (height, width). + + Attributes: + boxes (torch.Tensor) or (numpy.ndarray): A tensor or numpy array containing the detection boxes, + with shape (num_boxes, 6). + orig_shape (torch.Tensor) or (numpy.ndarray): Original image size, in the format (height, width). + is_track (bool): True if the boxes also include track IDs, False otherwise. + + Properties: + xyxy (torch.Tensor) or (numpy.ndarray): The boxes in xyxy format. + conf (torch.Tensor) or (numpy.ndarray): The confidence values of the boxes. + cls (torch.Tensor) or (numpy.ndarray): The class values of the boxes. + id (torch.Tensor) or (numpy.ndarray): The track IDs of the boxes (if available). + xywh (torch.Tensor) or (numpy.ndarray): The boxes in xywh format. + xyxyn (torch.Tensor) or (numpy.ndarray): The boxes in xyxy format normalized by original image size. + xywhn (torch.Tensor) or (numpy.ndarray): The boxes in xywh format normalized by original image size. + data (torch.Tensor): The raw bboxes tensor + + Methods: + cpu(): Move the object to CPU memory. + numpy(): Convert the object to a numpy array. + cuda(): Move the object to CUDA memory. + to(*args, **kwargs): Move the object to the specified device. + pandas(): Convert the object to a pandas DataFrame (not yet implemented). + """ + + def __init__(self, boxes, orig_shape) -> None: + if boxes.ndim == 1: + boxes = boxes[None, :] + n = boxes.shape[-1] + assert n in (6, 7), f'expected `n` in [6, 7], but got {n}' # xyxy, (track_id), conf, cls + # TODO + self.is_track = n == 7 + self.boxes = boxes + self.orig_shape = torch.as_tensor(orig_shape, device=boxes.device) if isinstance(boxes, torch.Tensor) \ + else np.asarray(orig_shape) + + @property + def xyxy(self): + return self.boxes[:, :4] + + @property + def conf(self): + return self.boxes[:, -2] + + @property + def cls(self): + return self.boxes[:, -1] + + @property + def id(self): + return self.boxes[:, -3] if self.is_track else None + + @property + @lru_cache(maxsize=2) # maxsize 1 should suffice + def xywh(self): + return ops.xyxy2xywh(self.xyxy) + + @property + @lru_cache(maxsize=2) + def xyxyn(self): + return self.xyxy / self.orig_shape[[1, 0, 1, 0]] + + @property + @lru_cache(maxsize=2) + def xywhn(self): + return self.xywh / self.orig_shape[[1, 0, 1, 0]] + + def cpu(self): + return Boxes(self.boxes.cpu(), self.orig_shape) + + def numpy(self): + return Boxes(self.boxes.numpy(), self.orig_shape) + + def cuda(self): + return Boxes(self.boxes.cuda(), self.orig_shape) + + def to(self, *args, **kwargs): + return Boxes(self.boxes.to(*args, **kwargs), self.orig_shape) + + def pandas(self): + LOGGER.info('results.pandas() method not yet implemented') + + @property + def shape(self): + return self.boxes.shape + + @property + def data(self): + return self.boxes + + def __len__(self): # override len(results) + return len(self.boxes) + + def __getitem__(self, idx): + return Boxes(self.boxes[idx], self.orig_shape) + + +class Masks(SimpleClass): + """ + A class for storing and manipulating detection masks. + + Args: + masks (torch.Tensor): A tensor containing the detection masks, with shape (num_masks, height, width). + orig_shape (tuple): Original image size, in the format (height, width). + + Attributes: + masks (torch.Tensor): A tensor containing the detection masks, with shape (num_masks, height, width). + orig_shape (tuple): Original image size, in the format (height, width). + + Properties: + segments (list): A list of segments which includes x, y, w, h, label, confidence, and mask of each detection. + + Methods: + cpu(): Returns a copy of the masks tensor on CPU memory. + numpy(): Returns a copy of the masks tensor as a numpy array. + cuda(): Returns a copy of the masks tensor on GPU memory. + to(): Returns a copy of the masks tensor with the specified device and dtype. + """ + + def __init__(self, masks, orig_shape) -> None: + self.masks = masks # N, h, w + self.orig_shape = orig_shape + + @property + @lru_cache(maxsize=1) + def segments(self): + return [ + ops.scale_segments(self.masks.shape[1:], x, self.orig_shape, normalize=True) + for x in ops.masks2segments(self.masks)] + + @property + def shape(self): + return self.masks.shape + + @property + def data(self): + return self.masks + + def cpu(self): + return Masks(self.masks.cpu(), self.orig_shape) + + def numpy(self): + return Masks(self.masks.numpy(), self.orig_shape) + + def cuda(self): + return Masks(self.masks.cuda(), self.orig_shape) + + def to(self, *args, **kwargs): + return Masks(self.masks.to(*args, **kwargs), self.orig_shape) + + def __len__(self): # override len(results) + return len(self.masks) + + def __getitem__(self, idx): + return Masks(self.masks[idx], self.orig_shape) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/trainer.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/trainer.py new file mode 100644 index 0000000..988faa8 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/trainer.py @@ -0,0 +1,655 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Train a model on a dataset + +Usage: + $ yolo mode=train model=yolov8n.pt data=coco128.yaml imgsz=640 epochs=100 batch=16 +""" +import os +import subprocess +import time +from collections import defaultdict +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +from torch.cuda import amp +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.optim import lr_scheduler +from tqdm import tqdm + +from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.data.utils import check_cls_dataset, check_det_dataset +from ultralytics.yolo.utils import (DEFAULT_CFG, LOGGER, ONLINE, RANK, ROOT, SETTINGS, TQDM_BAR_FORMAT, __version__, + callbacks, colorstr, emojis, yaml_save) +from ultralytics.yolo.utils.autobatch import check_train_batch_size +from ultralytics.yolo.utils.checks import check_file, check_imgsz, print_args +from ultralytics.yolo.utils.dist import ddp_cleanup, generate_ddp_command +from ultralytics.yolo.utils.files import get_latest_run, increment_path +from ultralytics.yolo.utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, init_seeds, one_cycle, + select_device, strip_optimizer) + + +class BaseTrainer: + """ + BaseTrainer + + A base class for creating trainers. + + Attributes: + args (SimpleNamespace): Configuration for the trainer. + check_resume (method): Method to check if training should be resumed from a saved checkpoint. + validator (BaseValidator): Validator instance. + model (nn.Module): Model instance. + callbacks (defaultdict): Dictionary of callbacks. + save_dir (Path): Directory to save results. + wdir (Path): Directory to save weights. + last (Path): Path to last checkpoint. + best (Path): Path to best checkpoint. + save_period (int): Save checkpoint every x epochs (disabled if < 1). + batch_size (int): Batch size for training. + epochs (int): Number of epochs to train for. + start_epoch (int): Starting epoch for training. + device (torch.device): Device to use for training. + amp (bool): Flag to enable AMP (Automatic Mixed Precision). + scaler (amp.GradScaler): Gradient scaler for AMP. + data (str): Path to data. + trainset (torch.utils.data.Dataset): Training dataset. + testset (torch.utils.data.Dataset): Testing dataset. + ema (nn.Module): EMA (Exponential Moving Average) of the model. + lf (nn.Module): Loss function. + scheduler (torch.optim.lr_scheduler._LRScheduler): Learning rate scheduler. + best_fitness (float): The best fitness value achieved. + fitness (float): Current fitness value. + loss (float): Current loss value. + tloss (float): Total loss value. + loss_names (list): List of loss names. + csv (Path): Path to results CSV file. + """ + + def __init__(self, cfg=DEFAULT_CFG, overrides=None): + """ + Initializes the BaseTrainer class. + + Args: + cfg (str, optional): Path to a configuration file. Defaults to DEFAULT_CFG. + overrides (dict, optional): Configuration overrides. Defaults to None. + """ + self.args = get_cfg(cfg, overrides) + self.device = select_device(self.args.device, self.args.batch) + self.check_resume() + self.validator = None + self.model = None + self.metrics = None + init_seeds(self.args.seed + 1 + RANK, deterministic=self.args.deterministic) + + # Dirs + project = self.args.project or Path(SETTINGS['runs_dir']) / self.args.task + name = self.args.name or f'{self.args.mode}' + if hasattr(self.args, 'save_dir'): + self.save_dir = Path(self.args.save_dir) + else: + self.save_dir = Path( + increment_path(Path(project) / name, exist_ok=self.args.exist_ok if RANK in (-1, 0) else True)) + self.wdir = self.save_dir / 'weights' # weights dir + if RANK in (-1, 0): + self.wdir.mkdir(parents=True, exist_ok=True) # make dir + self.args.save_dir = str(self.save_dir) + yaml_save(self.save_dir / 'args.yaml', vars(self.args)) # save run args + self.last, self.best = self.wdir / 'last.pt', self.wdir / 'best.pt' # checkpoint paths + self.save_period = self.args.save_period + + self.batch_size = self.args.batch + self.epochs = self.args.epochs + self.start_epoch = 0 + if RANK == -1: + print_args(vars(self.args)) + + # Device + if self.device.type == 'cpu': + self.args.workers = 0 # faster CPU training as time dominated by inference, not dataloading + + # Model and Dataloaders. + self.model = self.args.model + try: + if self.args.task == 'classify': + self.data = check_cls_dataset(self.args.data) + elif self.args.data.endswith('.yaml') or self.args.task in ('detect', 'segment'): + self.data = check_det_dataset(self.args.data) + if 'yaml_file' in self.data: + self.args.data = self.data['yaml_file'] # for validating 'yolo train data=url.zip' usage + except Exception as e: + raise RuntimeError(emojis(f"Dataset '{self.args.data}' error ❌ {e}")) from e + + self.trainset, self.testset = self.get_dataset(self.data) + self.ema = None + + # Optimization utils init + self.lf = None + self.scheduler = None + + # Epoch level metrics + self.best_fitness = None + self.fitness = None + self.loss = None + self.tloss = None + self.loss_names = ['Loss'] + self.csv = self.save_dir / 'results.csv' + self.plot_idx = [0, 1, 2] + + # Callbacks + self.callbacks = defaultdict(list, callbacks.default_callbacks) # add callbacks + if RANK in (-1, 0): + callbacks.add_integration_callbacks(self) + + def add_callback(self, event: str, callback): + """ + Appends the given callback. + """ + self.callbacks[event].append(callback) + + def set_callback(self, event: str, callback): + """ + Overrides the existing callbacks with the given callback. + """ + self.callbacks[event] = [callback] + + def run_callbacks(self, event: str): + for callback in self.callbacks.get(event, []): + callback(self) + + def train(self): + # Allow device='', device=None on Multi-GPU systems to default to device=0 + if isinstance(self.args.device, int) or self.args.device: # i.e. device=0 or device=[0,1,2,3] + world_size = torch.cuda.device_count() + elif torch.cuda.is_available(): # i.e. device=None or device='' + world_size = 1 # default to device 0 + else: # i.e. device='cpu' or 'mps' + world_size = 0 + + # Run subprocess if DDP training, else train normally + if world_size > 1 and 'LOCAL_RANK' not in os.environ: + # Argument checks + if self.args.rect: + LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with Multi-GPU training, setting rect=False") + self.args.rect = False + # Command + cmd, file = generate_ddp_command(world_size, self) + try: + LOGGER.info(f'Running DDP command {cmd}') + subprocess.run(cmd, check=True) + except Exception as e: + raise e + finally: + ddp_cleanup(self, str(file)) + else: + self._do_train(world_size) + + def _setup_ddp(self, world_size): + torch.cuda.set_device(RANK) + self.device = torch.device('cuda', RANK) + LOGGER.info(f'DDP settings: RANK {RANK}, WORLD_SIZE {world_size}, DEVICE {self.device}') + dist.init_process_group('nccl' if dist.is_nccl_available() else 'gloo', rank=RANK, world_size=world_size) + + def _setup_train(self, world_size): + """ + Builds dataloaders and optimizer on correct rank process. + """ + # Model + self.run_callbacks('on_pretrain_routine_start') + ckpt = self.setup_model() + self.model = self.model.to(self.device) + self.set_model_attributes() + # Check AMP + self.amp = torch.tensor(self.args.amp).to(self.device) # True or False + if self.amp and RANK in (-1, 0): # Single-GPU and DDP + callbacks_backup = callbacks.default_callbacks.copy() # backup callbacks as check_amp() resets them + self.amp = torch.tensor(check_amp(self.model), device=self.device) + callbacks.default_callbacks = callbacks_backup # restore callbacks + if RANK > -1: # DDP + dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None) + self.amp = bool(self.amp) # as boolean + self.scaler = amp.GradScaler(enabled=self.amp) + if world_size > 1: + self.model = DDP(self.model, device_ids=[RANK]) + # Check imgsz + gs = max(int(self.model.stride.max() if hasattr(self.model, 'stride') else 32), 32) # grid size (max stride) + self.args.imgsz = check_imgsz(self.args.imgsz, stride=gs, floor=gs, max_dim=1) + # Batch size + if self.batch_size == -1: + if RANK == -1: # single-GPU only, estimate best batch size + self.batch_size = check_train_batch_size(self.model, self.args.imgsz, self.amp) + else: + SyntaxError('batch=-1 to use AutoBatch is only available in Single-GPU training. ' + 'Please pass a valid batch size value for Multi-GPU DDP training, i.e. batch=16') + + # Optimizer + self.accumulate = max(round(self.args.nbs / self.batch_size), 1) # accumulate loss before optimizing + weight_decay = self.args.weight_decay * self.batch_size * self.accumulate / self.args.nbs # scale weight_decay + self.optimizer = self.build_optimizer(model=self.model, + name=self.args.optimizer, + lr=self.args.lr0, + momentum=self.args.momentum, + decay=weight_decay) + # Scheduler + if self.args.cos_lr: + self.lf = one_cycle(1, self.args.lrf, self.epochs) # cosine 1->hyp['lrf'] + else: + self.lf = lambda x: (1 - x / self.epochs) * (1.0 - self.args.lrf) + self.args.lrf # linear + self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda=self.lf) + self.stopper, self.stop = EarlyStopping(patience=self.args.patience), False + + # dataloaders + batch_size = self.batch_size // world_size if world_size > 1 else self.batch_size + self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode='train') + if RANK in (-1, 0): + self.test_loader = self.get_dataloader(self.testset, batch_size=batch_size * 2, rank=-1, mode='val') + self.validator = self.get_validator() + metric_keys = self.validator.metrics.keys + self.label_loss_items(prefix='val') + self.metrics = dict(zip(metric_keys, [0] * len(metric_keys))) # TODO: init metrics for plot_results()? + self.ema = ModelEMA(self.model) + if self.args.plots and not self.args.v5loader: + self.plot_training_labels() + self.resume_training(ckpt) + self.scheduler.last_epoch = self.start_epoch - 1 # do not move + self.run_callbacks('on_pretrain_routine_end') + + def _do_train(self, world_size=1): + if world_size > 1: + self._setup_ddp(world_size) + + self._setup_train(world_size) + + self.epoch_time = None + self.epoch_time_start = time.time() + self.train_time_start = time.time() + nb = len(self.train_loader) # number of batches + nw = max(round(self.args.warmup_epochs * nb), 100) # number of warmup iterations + last_opt_step = -1 + self.run_callbacks('on_train_start') + LOGGER.info(f'Image sizes {self.args.imgsz} train, {self.args.imgsz} val\n' + f'Using {self.train_loader.num_workers * (world_size or 1)} dataloader workers\n' + f"Logging results to {colorstr('bold', self.save_dir)}\n" + f'Starting training for {self.epochs} epochs...') + if self.args.close_mosaic: + base_idx = (self.epochs - self.args.close_mosaic) * nb + self.plot_idx.extend([base_idx, base_idx + 1, base_idx + 2]) + for epoch in range(self.start_epoch, self.epochs): + self.epoch = epoch + self.run_callbacks('on_train_epoch_start') + self.model.train() + if RANK != -1: + self.train_loader.sampler.set_epoch(epoch) + pbar = enumerate(self.train_loader) + # Update dataloader attributes (optional) + if epoch == (self.epochs - self.args.close_mosaic): + LOGGER.info('Closing dataloader mosaic') + if hasattr(self.train_loader.dataset, 'mosaic'): + self.train_loader.dataset.mosaic = False + if hasattr(self.train_loader.dataset, 'close_mosaic'): + self.train_loader.dataset.close_mosaic(hyp=self.args) + + if RANK in (-1, 0): + LOGGER.info(self.progress_string()) + pbar = tqdm(enumerate(self.train_loader), total=nb, bar_format=TQDM_BAR_FORMAT) + self.tloss = None + self.optimizer.zero_grad() + for i, batch in pbar: + self.run_callbacks('on_train_batch_start') + # Warmup + ni = i + nb * epoch + if ni <= nw: + xi = [0, nw] # x interp + self.accumulate = max(1, np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round()) + for j, x in enumerate(self.optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp( + ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x['initial_lr'] * self.lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum]) + + # Forward + with torch.cuda.amp.autocast(self.amp): + batch = self.preprocess_batch(batch) + preds = self.model(batch['img']) + self.loss, self.loss_items = self.criterion(preds, batch) + if RANK != -1: + self.loss *= world_size + self.tloss = (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None \ + else self.loss_items + + # Backward + self.scaler.scale(self.loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= self.accumulate: + self.optimizer_step() + last_opt_step = ni + + # Log + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + loss_len = self.tloss.shape[0] if len(self.tloss.size()) else 1 + losses = self.tloss if loss_len > 1 else torch.unsqueeze(self.tloss, 0) + if RANK in (-1, 0): + pbar.set_description( + ('%11s' * 2 + '%11.4g' * (2 + loss_len)) % + (f'{epoch + 1}/{self.epochs}', mem, *losses, batch['cls'].shape[0], batch['img'].shape[-1])) + self.run_callbacks('on_batch_end') + if self.args.plots and ni in self.plot_idx: + self.plot_training_samples(batch, ni) + + self.run_callbacks('on_train_batch_end') + + self.lr = {f'lr/pg{ir}': x['lr'] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers + + self.scheduler.step() + self.run_callbacks('on_train_epoch_end') + + if RANK in (-1, 0): + + # Validation + self.ema.update_attr(self.model, include=['yaml', 'nc', 'args', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == self.epochs) or self.stopper.possible_stop + + if self.args.val or final_epoch: + self.metrics, self.fitness = self.validate() + self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr}) + self.stop = self.stopper(epoch + 1, self.fitness) + + # Save model + if self.args.save or (epoch + 1 == self.epochs): + self.save_model() + self.run_callbacks('on_model_save') + + tnow = time.time() + self.epoch_time = tnow - self.epoch_time_start + self.epoch_time_start = tnow + self.run_callbacks('on_fit_epoch_end') + + # Early Stopping + if RANK != -1: # if DDP training + broadcast_list = [self.stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + self.stop = broadcast_list[0] + if self.stop: + break # must break all DDP ranks + + if RANK in (-1, 0): + # Do final val with best.pt + LOGGER.info(f'\n{epoch - self.start_epoch + 1} epochs completed in ' + f'{(time.time() - self.train_time_start) / 3600:.3f} hours.') + self.final_eval() + if self.args.plots: + self.plot_metrics() + self.run_callbacks('on_train_end') + torch.cuda.empty_cache() + self.run_callbacks('teardown') + + def save_model(self): + ckpt = { + 'epoch': self.epoch, + 'best_fitness': self.best_fitness, + 'model': deepcopy(de_parallel(self.model)).half(), + 'ema': deepcopy(self.ema.ema).half(), + 'updates': self.ema.updates, + 'optimizer': self.optimizer.state_dict(), + 'train_args': vars(self.args), # save as dict + 'date': datetime.now().isoformat(), + 'version': __version__} + + # Save last, best and delete + torch.save(ckpt, self.last) + if self.best_fitness == self.fitness: + torch.save(ckpt, self.best) + if (self.epoch > 0) and (self.save_period > 0) and (self.epoch % self.save_period == 0): + torch.save(ckpt, self.wdir / f'epoch{self.epoch}.pt') + del ckpt + + @staticmethod + def get_dataset(data): + """ + Get train, val path from data dict if it exists. Returns None if data format is not recognized. + """ + return data['train'], data.get('val') or data.get('test') + + def setup_model(self): + """ + load/create/download model for any task. + """ + if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed + return + + model, weights = self.model, None + ckpt = None + if str(model).endswith('.pt'): + weights, ckpt = attempt_load_one_weight(model) + cfg = ckpt['model'].yaml + else: + cfg = model + self.model = self.get_model(cfg=cfg, weights=weights, verbose=RANK == -1) # calls Model(cfg, weights) + return ckpt + + def optimizer_step(self): + self.scaler.unscale_(self.optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0) # clip gradients + self.scaler.step(self.optimizer) + self.scaler.update() + self.optimizer.zero_grad() + if self.ema: + self.ema.update(self.model) + + def preprocess_batch(self, batch): + """ + Allows custom preprocessing model inputs and ground truths depending on task type. + """ + return batch + + def validate(self): + """ + Runs validation on test set using self.validator. The returned dict is expected to contain "fitness" key. + """ + metrics = self.validator(self) + fitness = metrics.pop('fitness', -self.loss.detach().cpu().numpy()) # use loss as fitness measure if not found + if not self.best_fitness or self.best_fitness < fitness: + self.best_fitness = fitness + return metrics, fitness + + def get_model(self, cfg=None, weights=None, verbose=True): + raise NotImplementedError("This task trainer doesn't support loading cfg files") + + def get_validator(self): + raise NotImplementedError('get_validator function not implemented in trainer') + + def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'): + """ + Returns dataloader derived from torch.data.Dataloader. + """ + raise NotImplementedError('get_dataloader function not implemented in trainer') + + def criterion(self, preds, batch): + """ + Returns loss and individual loss items as Tensor. + """ + raise NotImplementedError('criterion function not implemented in trainer') + + def label_loss_items(self, loss_items=None, prefix='train'): + """ + Returns a loss dict with labelled training loss items tensor + """ + # Not needed for classification but necessary for segmentation & detection + return {'loss': loss_items} if loss_items is not None else ['loss'] + + def set_model_attributes(self): + """ + To set or update model parameters before training. + """ + self.model.names = self.data['names'] + + def build_targets(self, preds, targets): + pass + + def progress_string(self): + return '' + + # TODO: may need to put these following functions into callback + def plot_training_samples(self, batch, ni): + pass + + def plot_training_labels(self): + pass + + def save_metrics(self, metrics): + keys, vals = list(metrics.keys()), list(metrics.values()) + n = len(metrics) + 1 # number of cols + s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header + with open(self.csv, 'a') as f: + f.write(s + ('%23.5g,' * n % tuple([self.epoch] + vals)).rstrip(',') + '\n') + + def plot_metrics(self): + pass + + def final_eval(self): + for f in self.last, self.best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is self.best: + LOGGER.info(f'\nValidating {f}...') + self.metrics = self.validator(model=f) + self.metrics.pop('fitness', None) + self.run_callbacks('on_fit_epoch_end') + + def check_resume(self): + resume = self.args.resume + if resume: + try: + last = Path( + check_file(resume) if isinstance(resume, (str, + Path)) and Path(resume).exists() else get_latest_run()) + self.args = get_cfg(attempt_load_weights(last).args) + self.args.model, resume = str(last), True # reinstate + except Exception as e: + raise FileNotFoundError('Resume checkpoint not found. Please pass a valid checkpoint to resume from, ' + "i.e. 'yolo train resume model=path/to/last.pt'") from e + self.resume = resume + + def resume_training(self, ckpt): + if ckpt is None: + return + best_fitness = 0.0 + start_epoch = ckpt['epoch'] + 1 + if ckpt['optimizer'] is not None: + self.optimizer.load_state_dict(ckpt['optimizer']) # optimizer + best_fitness = ckpt['best_fitness'] + if self.ema and ckpt.get('ema'): + self.ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA + self.ema.updates = ckpt['updates'] + if self.resume: + assert start_epoch > 0, \ + f'{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n' \ + f"Start a new training without --resume, i.e. 'yolo task=... mode=train model={self.args.model}'" + LOGGER.info( + f'Resuming training from {self.args.model} from epoch {start_epoch + 1} to {self.epochs} total epochs') + if self.epochs < start_epoch: + LOGGER.info( + f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs.") + self.epochs += ckpt['epoch'] # finetune additional epochs + self.best_fitness = best_fitness + self.start_epoch = start_epoch + if start_epoch > (self.epochs - self.args.close_mosaic): + LOGGER.info('Closing dataloader mosaic') + if hasattr(self.train_loader.dataset, 'mosaic'): + self.train_loader.dataset.mosaic = False + if hasattr(self.train_loader.dataset, 'close_mosaic'): + self.train_loader.dataset.close_mosaic(hyp=self.args) + + @staticmethod + def build_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): + """ + Builds an optimizer with the specified parameters and parameter groups. + + Args: + model (nn.Module): model to optimize + name (str): name of the optimizer to use + lr (float): learning rate + momentum (float): momentum + decay (float): weight decay + + Returns: + optimizer (torch.optim.Optimizer): the built optimizer + """ + g = [], [], [] # optimizer parameter groups + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + for v in model.modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): # bias (no decay) + g[2].append(v.bias) + if isinstance(v, bn): # weight (no decay) + g[1].append(v.weight) + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): # weight (with decay) + g[0].append(v.weight) + + if name == 'Adam': + optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum + elif name == 'AdamW': + optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) + elif name == 'RMSProp': + optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum) + elif name == 'SGD': + optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) + else: + raise NotImplementedError(f'Optimizer {name} not implemented.') + + optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay + optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias') + return optimizer + + +def check_amp(model): + """ + This function checks the PyTorch Automatic Mixed Precision (AMP) functionality of a YOLOv8 model. + If the checks fail, it means there are anomalies with AMP on the system that may cause NaN losses or zero-mAP + results, so AMP will be disabled during training. + + Args: + model (nn.Module): A YOLOv8 model instance. + + Returns: + bool: Returns True if the AMP functionality works correctly with YOLOv8 model, else False. + + Raises: + AssertionError: If the AMP checks fail, indicating anomalies with the AMP functionality on the system. + """ + device = next(model.parameters()).device # get model device + if device.type in ('cpu', 'mps'): + return False # AMP only used on CUDA devices + + def amp_allclose(m, im): + # All close FP32 vs AMP results + a = m(im, device=device, verbose=False)[0].boxes.boxes # FP32 inference + with torch.cuda.amp.autocast(True): + b = m(im, device=device, verbose=False)[0].boxes.boxes # AMP inference + del m + return a.shape == b.shape and torch.allclose(a, b.float(), atol=0.5) # close to 0.5 absolute tolerance + + f = ROOT / 'assets/bus.jpg' # image to check + im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if ONLINE else np.ones((640, 640, 3)) + prefix = colorstr('AMP: ') + LOGGER.info(f'{prefix}running Automatic Mixed Precision (AMP) checks with YOLOv8n...') + try: + from ultralytics import YOLO + assert amp_allclose(YOLO('yolov8n.pt'), im) + LOGGER.info(f'{prefix}checks passed ✅') + except ConnectionError: + LOGGER.warning(f"{prefix}checks skipped ⚠️, offline and unable to download YOLOv8n. Setting 'amp=True'.") + except AssertionError: + LOGGER.warning(f'{prefix}checks failed ❌. Anomalies were detected with AMP on your system that may lead to ' + f'NaN losses or zero-mAP results, so AMP will be disabled during training.') + return False + return True diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/validator.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/validator.py new file mode 100644 index 0000000..dddca2a --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/engine/validator.py @@ -0,0 +1,247 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Check a model's accuracy on a test or val split of a dataset + +Usage: + $ yolo mode=val model=yolov8n.pt data=coco128.yaml imgsz=640 + +Usage - formats: + $ yolo mode=val model=yolov8n.pt # PyTorch + yolov8n.torchscript # TorchScript + yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True + yolov8n_openvino_model # OpenVINO + yolov8n.engine # TensorRT + yolov8n.mlmodel # CoreML (macOS-only) + yolov8n_saved_model # TensorFlow SavedModel + yolov8n.pb # TensorFlow GraphDef + yolov8n.tflite # TensorFlow Lite + yolov8n_edgetpu.tflite # TensorFlow Edge TPU + yolov8n_paddle_model # PaddlePaddle +""" +import json +from collections import defaultdict +from pathlib import Path + +import torch +from tqdm import tqdm + +from ultralytics.nn.autobackend import AutoBackend +from ultralytics.yolo.cfg import get_cfg +from ultralytics.yolo.data.utils import check_cls_dataset, check_det_dataset +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, SETTINGS, TQDM_BAR_FORMAT, callbacks, colorstr, emojis +from ultralytics.yolo.utils.checks import check_imgsz +from ultralytics.yolo.utils.files import increment_path +from ultralytics.yolo.utils.ops import Profile +from ultralytics.yolo.utils.torch_utils import de_parallel, select_device, smart_inference_mode + + +class BaseValidator: + """ + BaseValidator + + A base class for creating validators. + + Attributes: + dataloader (DataLoader): Dataloader to use for validation. + pbar (tqdm): Progress bar to update during validation. + args (SimpleNamespace): Configuration for the validator. + model (nn.Module): Model to validate. + data (dict): Data dictionary. + device (torch.device): Device to use for validation. + batch_i (int): Current batch index. + training (bool): Whether the model is in training mode. + speed (float): Batch processing speed in seconds. + jdict (dict): Dictionary to store validation results. + save_dir (Path): Directory to save results. + """ + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None): + """ + Initializes a BaseValidator instance. + + Args: + dataloader (torch.utils.data.DataLoader): Dataloader to be used for validation. + save_dir (Path): Directory to save results. + pbar (tqdm.tqdm): Progress bar for displaying progress. + args (SimpleNamespace): Configuration for the validator. + """ + self.dataloader = dataloader + self.pbar = pbar + self.args = args or get_cfg(DEFAULT_CFG) + self.model = None + self.data = None + self.device = None + self.batch_i = None + self.training = True + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + self.jdict = None + + project = self.args.project or Path(SETTINGS['runs_dir']) / self.args.task + name = self.args.name or f'{self.args.mode}' + self.save_dir = save_dir or increment_path(Path(project) / name, + exist_ok=self.args.exist_ok if RANK in (-1, 0) else True) + (self.save_dir / 'labels' if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True) + + if self.args.conf is None: + self.args.conf = 0.001 # default conf=0.001 + + self.callbacks = defaultdict(list, callbacks.default_callbacks) # add callbacks + + @smart_inference_mode() + def __call__(self, trainer=None, model=None): + """ + Supports validation of a pre-trained model if passed or a model being trained + if trainer is passed (trainer gets priority). + """ + self.training = trainer is not None + if self.training: + self.device = trainer.device + self.data = trainer.data + model = trainer.ema.ema or trainer.model + self.args.half = self.device.type != 'cpu' # force FP16 val during training + model = model.half() if self.args.half else model.float() + self.model = model + self.loss = torch.zeros_like(trainer.loss_items, device=trainer.device) + self.args.plots = trainer.stopper.possible_stop or (trainer.epoch == trainer.epochs - 1) + model.eval() + else: + callbacks.add_integration_callbacks(self) + self.run_callbacks('on_val_start') + assert model is not None, 'Either trainer or model is needed for validation' + self.device = select_device(self.args.device, self.args.batch) + self.args.half &= self.device.type != 'cpu' + model = AutoBackend(model, device=self.device, dnn=self.args.dnn, data=self.args.data, fp16=self.args.half) + self.model = model + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_imgsz(self.args.imgsz, stride=stride) + if engine: + self.args.batch = model.batch_size + else: + self.device = model.device + if not pt and not jit: + self.args.batch = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing batch=1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + if isinstance(self.args.data, str) and self.args.data.endswith('.yaml'): + self.data = check_det_dataset(self.args.data) + elif self.args.task == 'classify': + self.data = check_cls_dataset(self.args.data) + else: + raise FileNotFoundError(emojis(f"Dataset '{self.args.data}' for task={self.args.task} not found ❌")) + + if self.device.type == 'cpu': + self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading + if not pt: + self.args.rect = False + self.dataloader = self.dataloader or self.get_dataloader(self.data.get(self.args.split), self.args.batch) + + model.eval() + model.warmup(imgsz=(1 if pt else self.args.batch, 3, imgsz, imgsz)) # warmup + + dt = Profile(), Profile(), Profile(), Profile() + n_batches = len(self.dataloader) + desc = self.get_desc() + # NOTE: keeping `not self.training` in tqdm will eliminate pbar after segmentation evaluation during training, + # which may affect classification task since this arg is in yolov5/classify/val.py. + # bar = tqdm(self.dataloader, desc, n_batches, not self.training, bar_format=TQDM_BAR_FORMAT) + bar = tqdm(self.dataloader, desc, n_batches, bar_format=TQDM_BAR_FORMAT) + self.init_metrics(de_parallel(model)) + self.jdict = [] # empty before each val + for batch_i, batch in enumerate(bar): + self.run_callbacks('on_val_batch_start') + self.batch_i = batch_i + # preprocess + with dt[0]: + batch = self.preprocess(batch) + + # inference + with dt[1]: + preds = model(batch['img']) + + # loss + with dt[2]: + if self.training: + self.loss += trainer.criterion(preds, batch)[1] + + # postprocess + with dt[3]: + preds = self.postprocess(preds) + + self.update_metrics(preds, batch) + if self.args.plots and batch_i < 3: + self.plot_val_samples(batch, batch_i) + self.plot_predictions(batch, preds, batch_i) + + self.run_callbacks('on_val_batch_end') + stats = self.get_stats() + self.check_stats(stats) + self.print_results() + self.speed = dict(zip(self.speed.keys(), (x.t / len(self.dataloader.dataset) * 1E3 for x in dt))) + self.finalize_metrics() + self.run_callbacks('on_val_end') + if self.training: + model.float() + results = {**stats, **trainer.label_loss_items(self.loss.cpu() / len(self.dataloader), prefix='val')} + return {k: round(float(v), 5) for k, v in results.items()} # return results as 5 decimal place floats + else: + LOGGER.info('Speed: %.1fms preprocess, %.1fms inference, %.1fms loss, %.1fms postprocess per image' % + tuple(self.speed.values())) + if self.args.save_json and self.jdict: + with open(str(self.save_dir / 'predictions.json'), 'w') as f: + LOGGER.info(f'Saving {f.name}...') + json.dump(self.jdict, f) # flatten and save + stats = self.eval_json(stats) # update stats + if self.args.plots or self.args.save_json: + LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}") + return stats + + def run_callbacks(self, event: str): + for callback in self.callbacks.get(event, []): + callback(self) + + def get_dataloader(self, dataset_path, batch_size): + raise NotImplementedError('get_dataloader function not implemented for this validator') + + def preprocess(self, batch): + return batch + + def postprocess(self, preds): + return preds + + def init_metrics(self, model): + pass + + def update_metrics(self, preds, batch): + pass + + def finalize_metrics(self, *args, **kwargs): + pass + + def get_stats(self): + return {} + + def check_stats(self, stats): + pass + + def print_results(self): + pass + + def get_desc(self): + pass + + @property + def metric_keys(self): + return [] + + # TODO: may need to put these following functions into callback + def plot_val_samples(self, batch, ni): + pass + + def plot_predictions(self, batch, preds, ni): + pass + + def pred_to_json(self, preds, batch): + pass + + def eval_json(self, stats): + pass diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/__init__.py new file mode 100644 index 0000000..e4f9b88 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/__init__.py @@ -0,0 +1,654 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import contextlib +import inspect +import logging.config +import os +import platform +import re +import subprocess +import sys +import tempfile +import threading +import uuid +from pathlib import Path +from types import SimpleNamespace +from typing import Union + +import cv2 +import numpy as np +import torch +import yaml + +from ultralytics import __version__ + +# PyTorch Multi-GPU DDP Constants +RANK = int(os.getenv('RANK', -1)) +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + +# Other Constants +FILE = Path(__file__).resolve() +ROOT = FILE.parents[2] # YOLO +DEFAULT_CFG_PATH = ROOT / 'yolo/cfg/default.yaml' +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +AUTOINSTALL = str(os.getenv('YOLO_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode +VERBOSE = str(os.getenv('YOLO_VERBOSE', True)).lower() == 'true' # global verbose mode +TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format +LOGGING_NAME = 'ultralytics' +MACOS, LINUX, WINDOWS = (platform.system() == x for x in ['Darwin', 'Linux', 'Windows']) # environment booleans +HELP_MSG = \ + """ + Usage examples for running YOLOv8: + + 1. Install the ultralytics package: + + pip install ultralytics + + 2. Use the Python SDK: + + from ultralytics import YOLO + + # Load a model + model = YOLO('yolov8n.yaml') # build a new model from scratch + model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) + + # Use the model + results = model.train(data="coco128.yaml", epochs=3) # train the model + results = model.val() # evaluate model performance on the validation set + results = model('https://ultralytics.com/images/bus.jpg') # predict on an image + success = model.export(format='onnx') # export the model to ONNX format + + 3. Use the command line interface (CLI): + + YOLOv8 'yolo' CLI commands use the following syntax: + + yolo TASK MODE ARGS + + Where TASK (optional) is one of [detect, segment, classify] + MODE (required) is one of [train, val, predict, export] + ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults. + See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg' + + - Train a detection model for 10 epochs with an initial learning_rate of 0.01 + yolo detect train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01 + + - Predict a YouTube video using a pretrained segmentation model at image size 320: + yolo segment predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320 + + - Val a pretrained detection model at batch-size 1 and image size 640: + yolo detect val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640 + + - Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required) + yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128 + + - Run special commands: + yolo help + yolo checks + yolo version + yolo settings + yolo copy-cfg + yolo cfg + + Docs: https://docs.ultralytics.com + Community: https://community.ultralytics.com + GitHub: https://github.com/ultralytics/ultralytics + """ + +# Settings +torch.set_printoptions(linewidth=320, precision=4, profile='default') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' # for deterministic training + + +class SimpleClass: + """ + Ultralytics SimpleClass is a base class providing helpful string representation, error reporting, and attribute + access methods for easier debugging and usage. + """ + + def __str__(self): + """Return a human-readable string representation of the object.""" + attr = [] + for a in dir(self): + v = getattr(self, a) + if not callable(v) and not a.startswith('__'): + if isinstance(v, SimpleClass): + # Display only the module and class name for subclasses + s = f'{a}: {v.__module__}.{v.__class__.__name__} object' + else: + s = f'{a}: {repr(v)}' + attr.append(s) + return f'{self.__module__}.{self.__class__.__name__} object with attributes:\n\n' + '\n'.join(attr) + + def __repr__(self): + """Return a machine-readable string representation of the object.""" + return self.__str__() + + def __getattr__(self, attr): + """Custom attribute access error message with helpful information.""" + name = self.__class__.__name__ + raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}") + + +class IterableSimpleNamespace(SimpleNamespace): + """ + Ultralytics IterableSimpleNamespace is an extension class of SimpleNamespace that adds iterable functionality and + enables usage with dict() and for loops. + """ + + def __iter__(self): + """Return an iterator of key-value pairs from the namespace's attributes.""" + return iter(vars(self).items()) + + def __str__(self): + """Return a human-readable string representation of the object.""" + return '\n'.join(f'{k}={v}' for k, v in vars(self).items()) + + def __getattr__(self, attr): + """Custom attribute access error message with helpful information.""" + name = self.__class__.__name__ + raise AttributeError(f""" + '{name}' object has no attribute '{attr}'. This may be caused by a modified or out of date ultralytics + 'default.yaml' file.\nPlease update your code with 'pip install -U ultralytics' and if necessary replace + {DEFAULT_CFG_PATH} with the latest version from + https://github.com/ultralytics/ultralytics/blob/main/ultralytics/yolo/cfg/default.yaml + """) + + def get(self, key, default=None): + """Return the value of the specified key if it exists; otherwise, return the default value.""" + return getattr(self, key, default) + + +def set_logging(name=LOGGING_NAME, verbose=True): + # sets up logging for the given name + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in (-1, 0) else logging.ERROR + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + name: { + 'format': '%(message)s'}}, + 'handlers': { + name: { + 'class': 'logging.StreamHandler', + 'formatter': name, + 'level': level}}, + 'loggers': { + name: { + 'level': level, + 'handlers': [name], + 'propagate': False}}}) + + +# Set logger +set_logging(LOGGING_NAME, verbose=VERBOSE) # run before defining LOGGER +LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) +if WINDOWS: # emoji-safe logging + info_fn, warning_fn = LOGGER.info, LOGGER.warning + setattr(LOGGER, info_fn.__name__, lambda x: info_fn(emojis(x))) + setattr(LOGGER, warning_fn.__name__, lambda x: warning_fn(emojis(x))) + + +def yaml_save(file='data.yaml', data=None): + """ + Save YAML data to a file. + + Args: + file (str, optional): File name. Default is 'data.yaml'. + data (dict, optional): Data to save in YAML format. Default is None. + + Returns: + None: Data is saved to the specified file. + """ + file = Path(file) + if not file.parent.exists(): + # Create parent directories if they don't exist + file.parent.mkdir(parents=True, exist_ok=True) + + with open(file, 'w') as f: + # Dump data to file in YAML format, converting Path objects to strings + yaml.safe_dump({k: str(v) if isinstance(v, Path) else v + for k, v in data.items()}, + f, + sort_keys=False, + allow_unicode=True) + + +def yaml_load(file='data.yaml', append_filename=False): + """ + Load YAML data from a file. + + Args: + file (str, optional): File name. Default is 'data.yaml'. + append_filename (bool): Add the YAML filename to the YAML dictionary. Default is False. + + Returns: + dict: YAML data and file name. + """ + with open(file, errors='ignore', encoding='utf-8') as f: + s = f.read() # string + + # Remove special characters + if not s.isprintable(): + s = re.sub(r'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]+', '', s) + + # Add YAML filename to dict and return + return {**yaml.safe_load(s), 'yaml_file': str(file)} if append_filename else yaml.safe_load(s) + + +def yaml_print(yaml_file: Union[str, Path, dict]) -> None: + """ + Pretty prints a yaml file or a yaml-formatted dictionary. + + Args: + yaml_file: The file path of the yaml file or a yaml-formatted dictionary. + + Returns: + None + """ + yaml_dict = yaml_load(yaml_file) if isinstance(yaml_file, (str, Path)) else yaml_file + dump = yaml.dump(yaml_dict, sort_keys=False, allow_unicode=True) + LOGGER.info(f"Printing '{colorstr('bold', 'black', yaml_file)}'\n\n{dump}") + + +# Default configuration +DEFAULT_CFG_DICT = yaml_load(DEFAULT_CFG_PATH) +for k, v in DEFAULT_CFG_DICT.items(): + if isinstance(v, str) and v.lower() == 'none': + DEFAULT_CFG_DICT[k] = None +DEFAULT_CFG_KEYS = DEFAULT_CFG_DICT.keys() +DEFAULT_CFG = IterableSimpleNamespace(**DEFAULT_CFG_DICT) + + +def is_colab(): + """ + Check if the current script is running inside a Google Colab notebook. + + Returns: + bool: True if running inside a Colab notebook, False otherwise. + """ + return 'COLAB_RELEASE_TAG' in os.environ or 'COLAB_BACKEND_VERSION' in os.environ + + +def is_kaggle(): + """ + Check if the current script is running inside a Kaggle kernel. + + Returns: + bool: True if running inside a Kaggle kernel, False otherwise. + """ + return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + + +def is_jupyter(): + """ + Check if the current script is running inside a Jupyter Notebook. + Verified on Colab, Jupyterlab, Kaggle, Paperspace. + + Returns: + bool: True if running inside a Jupyter Notebook, False otherwise. + """ + with contextlib.suppress(Exception): + from IPython import get_ipython + return get_ipython() is not None + return False + + +def is_docker() -> bool: + """ + Determine if the script is running inside a Docker container. + + Returns: + bool: True if the script is running inside a Docker container, False otherwise. + """ + file = Path('/proc/self/cgroup') + if file.exists(): + with open(file) as f: + return 'docker' in f.read() + else: + return False + + +def is_online() -> bool: + """ + Check internet connectivity by attempting to connect to a known online host. + + Returns: + bool: True if connection is successful, False otherwise. + """ + import socket + with contextlib.suppress(Exception): + host = socket.gethostbyname('www.github.com') + socket.create_connection((host, 80), timeout=2) + return True + return False + + +ONLINE = is_online() + + +def is_pip_package(filepath: str = __name__) -> bool: + """ + Determines if the file at the given filepath is part of a pip package. + + Args: + filepath (str): The filepath to check. + + Returns: + bool: True if the file is part of a pip package, False otherwise. + """ + import importlib.util + + # Get the spec for the module + spec = importlib.util.find_spec(filepath) + + # Return whether the spec is not None and the origin is not None (indicating it is a package) + return spec is not None and spec.origin is not None + + +def is_dir_writeable(dir_path: Union[str, Path]) -> bool: + """ + Check if a directory is writeable. + + Args: + dir_path (str) or (Path): The path to the directory. + + Returns: + bool: True if the directory is writeable, False otherwise. + """ + try: + with tempfile.TemporaryFile(dir=dir_path): + pass + return True + except OSError: + return False + + +def is_pytest_running(): + """ + Determines whether pytest is currently running or not. + + Returns: + (bool): True if pytest is running, False otherwise. + """ + return ('PYTEST_CURRENT_TEST' in os.environ) or ('pytest' in sys.modules) or ('pytest' in Path(sys.argv[0]).stem) + + +def is_github_actions_ci() -> bool: + """ + Determine if the current environment is a GitHub Actions CI Python runner. + + Returns: + (bool): True if the current environment is a GitHub Actions CI Python runner, False otherwise. + """ + return 'GITHUB_ACTIONS' in os.environ and 'RUNNER_OS' in os.environ and 'RUNNER_TOOL_CACHE' in os.environ + + +def is_git_dir(): + """ + Determines whether the current file is part of a git repository. + If the current file is not part of a git repository, returns None. + + Returns: + (bool): True if current file is part of a git repository. + """ + return get_git_dir() is not None + + +def get_git_dir(): + """ + Determines whether the current file is part of a git repository and if so, returns the repository root directory. + If the current file is not part of a git repository, returns None. + + Returns: + (Path) or (None): Git root directory if found or None if not found. + """ + for d in Path(__file__).parents: + if (d / '.git').is_dir(): + return d + return None # no .git dir found + + +def get_git_origin_url(): + """ + Retrieves the origin URL of a git repository. + + Returns: + (str) or (None): The origin URL of the git repository. + """ + if is_git_dir(): + with contextlib.suppress(subprocess.CalledProcessError): + origin = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url']) + return origin.decode().strip() + return None # if not git dir or on error + + +def get_git_branch(): + """ + Returns the current git branch name. If not in a git repository, returns None. + + Returns: + (str) or (None): The current git branch name. + """ + if is_git_dir(): + with contextlib.suppress(subprocess.CalledProcessError): + origin = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + return origin.decode().strip() + return None # if not git dir or on error + + +def get_default_args(func): + """Returns a dictionary of default arguments for a function. + + Args: + func (callable): The function to inspect. + + Returns: + dict: A dictionary where each key is a parameter name, and each value is the default value of that parameter. + """ + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} + + +def get_user_config_dir(sub_dir='Ultralytics'): + """ + Get the user config directory. + + Args: + sub_dir (str): The name of the subdirectory to create. + + Returns: + Path: The path to the user config directory. + """ + # Return the appropriate config directory for each operating system + if WINDOWS: + path = Path.home() / 'AppData' / 'Roaming' / sub_dir + elif MACOS: # macOS + path = Path.home() / 'Library' / 'Application Support' / sub_dir + elif LINUX: + path = Path.home() / '.config' / sub_dir + else: + raise ValueError(f'Unsupported operating system: {platform.system()}') + + # GCP and AWS lambda fix, only /tmp is writeable + if not is_dir_writeable(str(path.parent)): + path = Path('/tmp') / sub_dir + + # Create the subdirectory if it does not exist + path.mkdir(parents=True, exist_ok=True) + + return path + + +USER_CONFIG_DIR = os.getenv('YOLO_CONFIG_DIR', get_user_config_dir()) # Ultralytics settings dir + + +def emojis(string=''): + # Return platform-dependent emoji-safe version of string + return string.encode().decode('ascii', 'ignore') if WINDOWS else string + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = { + 'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +class TryExcept(contextlib.ContextDecorator): + # YOLOv8 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager + def __init__(self, msg='', verbose=True): + self.msg = msg + self.verbose = verbose + + def __enter__(self): + pass + + def __exit__(self, exc_type, value, traceback): + if self.verbose and value: + print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) + return True + + +def threaded(func): + # Multi-threads a target function and returns thread. Usage: @threaded decorator + def wrapper(*args, **kwargs): + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + + return wrapper + + +def set_sentry(): + """ + Initialize the Sentry SDK for error tracking and reporting if pytest is not currently running. + """ + + def before_send(event, hint): + if 'exc_info' in hint: + exc_type, exc_value, tb = hint['exc_info'] + if exc_type in (KeyboardInterrupt, FileNotFoundError) \ + or 'out of memory' in str(exc_value): + return None # do not send event + + event['tags'] = { + 'sys_argv': sys.argv[0], + 'sys_argv_name': Path(sys.argv[0]).name, + 'install': 'git' if is_git_dir() else 'pip' if is_pip_package() else 'other', + 'os': ENVIRONMENT} + return event + + if SETTINGS['sync'] and \ + RANK in (-1, 0) and \ + Path(sys.argv[0]).name == 'yolo' and \ + not TESTS_RUNNING and \ + ONLINE and \ + ((is_pip_package() and not is_git_dir()) or + (get_git_origin_url() == 'https://github.com/ultralytics/ultralytics.git' and get_git_branch() == 'main')): + + import sentry_sdk # noqa + sentry_sdk.init( + dsn='https://f805855f03bb4363bc1e16cb7d87b654@o4504521589325824.ingest.sentry.io/4504521592406016', + debug=False, + traces_sample_rate=1.0, + release=__version__, + environment='production', # 'dev' or 'production' + before_send=before_send, + ignore_errors=[KeyboardInterrupt, FileNotFoundError]) + sentry_sdk.set_user({'id': SETTINGS['uuid']}) + + # Disable all sentry logging + for logger in 'sentry_sdk', 'sentry_sdk.errors': + logging.getLogger(logger).setLevel(logging.CRITICAL) + + +def get_settings(file=USER_CONFIG_DIR / 'settings.yaml', version='0.0.2'): + """ + Loads a global Ultralytics settings YAML file or creates one with default values if it does not exist. + + Args: + file (Path): Path to the Ultralytics settings YAML file. Defaults to 'settings.yaml' in the USER_CONFIG_DIR. + version (str): Settings version. If min settings version not met, new default settings will be saved. + + Returns: + dict: Dictionary of settings key-value pairs. + """ + import hashlib + + from ultralytics.yolo.utils.checks import check_version + from ultralytics.yolo.utils.torch_utils import torch_distributed_zero_first + + git_dir = get_git_dir() + root = git_dir or Path() + datasets_root = (root.parent if git_dir and is_dir_writeable(root.parent) else root).resolve() + defaults = { + 'datasets_dir': str(datasets_root / 'datasets'), # default datasets directory. + 'weights_dir': str(root / 'weights'), # default weights directory. + 'runs_dir': str(root / 'runs'), # default runs directory. + 'sync': True, # sync analytics to help with YOLO development + 'uuid': hashlib.sha256(str(uuid.getnode()).encode()).hexdigest(), # anonymized uuid hash + 'settings_version': version} # Ultralytics settings version + + with torch_distributed_zero_first(RANK): + if not file.exists(): + yaml_save(file, defaults) + settings = yaml_load(file) + + # Check that settings keys and types match defaults + correct = \ + settings.keys() == defaults.keys() \ + and all(type(a) == type(b) for a, b in zip(settings.values(), defaults.values())) \ + and check_version(settings['settings_version'], version) + if not correct: + LOGGER.warning('WARNING ⚠️ Ultralytics settings reset to defaults. This is normal and may be due to a ' + 'recent ultralytics package update, but may have overwritten previous settings. ' + f"\nView and update settings with 'yolo settings' or at '{file}'") + settings = defaults # merge **defaults with **settings (prefer **settings) + yaml_save(file, settings) # save updated defaults + + return settings + + +def set_settings(kwargs, file=USER_CONFIG_DIR / 'settings.yaml'): + """ + Function that runs on a first-time ultralytics package installation to set up global settings and create necessary + directories. + """ + SETTINGS.update(kwargs) + yaml_save(file, SETTINGS) + + +# Run below code on yolo/utils init ------------------------------------------------------------------------------------ + +# Check first-install steps +PREFIX = colorstr('Ultralytics: ') +SETTINGS = get_settings() +DATASETS_DIR = Path(SETTINGS['datasets_dir']) # global datasets directory +ENVIRONMENT = 'Colab' if is_colab() else 'Kaggle' if is_kaggle() else 'Jupyter' if is_jupyter() else \ + 'Docker' if is_docker() else platform.system() +TESTS_RUNNING = is_pytest_running() or is_github_actions_ci() +set_sentry() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/autobatch.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/autobatch.py new file mode 100644 index 0000000..d730a98 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/autobatch.py @@ -0,0 +1,90 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Functions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch. +""" + +from copy import deepcopy + +import numpy as np +import torch + +from ultralytics.yolo.utils import LOGGER, colorstr +from ultralytics.yolo.utils.torch_utils import profile + + +def check_train_batch_size(model, imgsz=640, amp=True): + """ + Check YOLO training batch size using the autobatch() function. + + Args: + model (torch.nn.Module): YOLO model to check batch size for. + imgsz (int): Image size used for training. + amp (bool): If True, use automatic mixed precision (AMP) for training. + + Returns: + int: Optimal batch size computed using the autobatch() function. + """ + + with torch.cuda.amp.autocast(amp): + return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size + + +def autobatch(model, imgsz=640, fraction=0.67, batch_size=16): + """ + Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory. + + Args: + model: YOLO model to compute batch size for. + imgsz (int, optional): The image size used as input for the YOLO model. Defaults to 640. + fraction (float, optional): The fraction of available CUDA memory to use. Defaults to 0.67. + batch_size (int, optional): The default batch size to use if an error is detected. Defaults to 16. + + Returns: + int: The optimal batch size. + """ + + # Check device + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for imgsz={imgsz}') + device = next(model.parameters()).device # get model device + if device.type == 'cpu': + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + return batch_size + if torch.backends.cudnn.benchmark: + LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') + return batch_size + + # Inspect CUDA memory + gb = 1 << 30 # bytes to GiB (1024 ** 3) + d = str(device).upper() # 'CUDA:0' + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / gb # GiB total + r = torch.cuda.memory_reserved(device) / gb # GiB reserved + a = torch.cuda.memory_allocated(device) / gb # GiB allocated + f = t - (r + a) # GiB free + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + + # Profile batch sizes + batch_sizes = [1, 2, 4, 8, 16] + try: + img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] + results = profile(img, model, n=3, device=device) + + # Fit a solution + y = [x[2] for x in results if x] # memory [2] + p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit + b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) + if None in results: # some sizes failed + i = results.index(None) # first fail index + if b >= batch_sizes[i]: # y intercept above failure point + b = batch_sizes[max(i - 1, 0)] # select prior safe point + if b < 1 or b > 1024: # b outside of safe range + b = batch_size + LOGGER.info(f'{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.') + + fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') + return b + except Exception as e: + LOGGER.warning(f'{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.') + return batch_size diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/benchmarks.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/benchmarks.py new file mode 100644 index 0000000..20a1853 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/benchmarks.py @@ -0,0 +1,114 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Benchmark a YOLO model formats for speed and accuracy + +Usage: + from ultralytics.yolo.utils.benchmarks import run_benchmarks + run_benchmarks(model='yolov8n.pt', imgsz=160) + +Format | `format=argument` | Model +--- | --- | --- +PyTorch | - | yolov8n.pt +TorchScript | `torchscript` | yolov8n.torchscript +ONNX | `onnx` | yolov8n.onnx +OpenVINO | `openvino` | yolov8n_openvino_model/ +TensorRT | `engine` | yolov8n.engine +CoreML | `coreml` | yolov8n.mlmodel +TensorFlow SavedModel | `saved_model` | yolov8n_saved_model/ +TensorFlow GraphDef | `pb` | yolov8n.pb +TensorFlow Lite | `tflite` | yolov8n.tflite +TensorFlow Edge TPU | `edgetpu` | yolov8n_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov8n_web_model/ +PaddlePaddle | `paddle` | yolov8n_paddle_model/ +""" + +import platform +import time +from pathlib import Path + +from ultralytics import YOLO +from ultralytics.yolo.engine.exporter import export_formats +from ultralytics.yolo.utils import LINUX, LOGGER, MACOS, ROOT, SETTINGS +from ultralytics.yolo.utils.checks import check_yolo +from ultralytics.yolo.utils.downloads import download +from ultralytics.yolo.utils.files import file_size +from ultralytics.yolo.utils.torch_utils import select_device + + +def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, half=False, device='cpu', hard_fail=False): + import pandas as pd + pd.options.display.max_columns = 10 + pd.options.display.width = 120 + device = select_device(device, verbose=False) + if isinstance(model, (str, Path)): + model = YOLO(model) + + y = [] + t0 = time.time() + for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU) + emoji, filename = '❌', None # export defaults + try: + if model.task == 'classify': + assert i != 11, 'paddle cls exports coming soon' + assert i != 9 or LINUX, 'Edge TPU export only supported on Linux' + if i == 10: + assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux' + if 'cpu' in device.type: + assert cpu, 'inference not supported on CPU' + if 'cuda' in device.type: + assert gpu, 'inference not supported on GPU' + + # Export + if format == '-': + filename = model.ckpt_path or model.cfg + export = model # PyTorch format + else: + filename = model.export(imgsz=imgsz, format=format, half=half, device=device) # all others + export = YOLO(filename, task=model.task) + assert suffix in str(filename), 'export failed' + emoji = '❎' # indicates export succeeded + + # Predict + assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML + if not (ROOT / 'assets/bus.jpg').exists(): + download(url='https://ultralytics.com/images/bus.jpg', dir=ROOT / 'assets') + export.predict(ROOT / 'assets/bus.jpg', imgsz=imgsz, device=device, half=half) + + # Validate + if model.task == 'detect': + data, key = 'coco128.yaml', 'metrics/mAP50-95(B)' + elif model.task == 'segment': + data, key = 'coco128-seg.yaml', 'metrics/mAP50-95(M)' + elif model.task == 'classify': + data, key = 'imagenet100', 'metrics/accuracy_top5' + + results = export.val(data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, verbose=False) + metric, speed = results.results_dict[key], results.speed['inference'] + y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)]) + except Exception as e: + if hard_fail: + assert type(e) is AssertionError, f'Benchmark hard_fail for {name}: {e}' + LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}') + y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference + + # Print results + check_yolo(device=device) # print system info + df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)']) + + name = Path(model.ckpt_path).name + s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n' + LOGGER.info(s) + with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f: + f.write(s) + + if hard_fail and isinstance(hard_fail, float): + metrics = df[key].array # values to compare to floor + floor = hard_fail # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n + assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: one or more metric(s) < floor {floor}' + + return df + + +if __name__ == '__main__': + benchmark() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/__init__.py new file mode 100644 index 0000000..fb5dfe2 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/__init__.py @@ -0,0 +1,3 @@ +from .base import add_integration_callbacks, default_callbacks + +__all__ = 'add_integration_callbacks', 'default_callbacks' diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/base.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/base.py new file mode 100644 index 0000000..b1253e9 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/base.py @@ -0,0 +1,155 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Base callbacks +""" + + +# Trainer callbacks ---------------------------------------------------------------------------------------------------- +def on_pretrain_routine_start(trainer): + pass + + +def on_pretrain_routine_end(trainer): + pass + + +def on_train_start(trainer): + pass + + +def on_train_epoch_start(trainer): + pass + + +def on_train_batch_start(trainer): + pass + + +def optimizer_step(trainer): + pass + + +def on_before_zero_grad(trainer): + pass + + +def on_train_batch_end(trainer): + pass + + +def on_train_epoch_end(trainer): + pass + + +def on_fit_epoch_end(trainer): + pass + + +def on_model_save(trainer): + pass + + +def on_train_end(trainer): + pass + + +def on_params_update(trainer): + pass + + +def teardown(trainer): + pass + + +# Validator callbacks -------------------------------------------------------------------------------------------------- +def on_val_start(validator): + pass + + +def on_val_batch_start(validator): + pass + + +def on_val_batch_end(validator): + pass + + +def on_val_end(validator): + pass + + +# Predictor callbacks -------------------------------------------------------------------------------------------------- +def on_predict_start(predictor): + pass + + +def on_predict_batch_start(predictor): + pass + + +def on_predict_batch_end(predictor): + pass + + +def on_predict_postprocess_end(predictor): + pass + + +def on_predict_end(predictor): + pass + + +# Exporter callbacks --------------------------------------------------------------------------------------------------- +def on_export_start(exporter): + pass + + +def on_export_end(exporter): + pass + + +default_callbacks = { + # Run in trainer + 'on_pretrain_routine_start': [on_pretrain_routine_start], + 'on_pretrain_routine_end': [on_pretrain_routine_end], + 'on_train_start': [on_train_start], + 'on_train_epoch_start': [on_train_epoch_start], + 'on_train_batch_start': [on_train_batch_start], + 'optimizer_step': [optimizer_step], + 'on_before_zero_grad': [on_before_zero_grad], + 'on_train_batch_end': [on_train_batch_end], + 'on_train_epoch_end': [on_train_epoch_end], + 'on_fit_epoch_end': [on_fit_epoch_end], # fit = train + val + 'on_model_save': [on_model_save], + 'on_train_end': [on_train_end], + 'on_params_update': [on_params_update], + 'teardown': [teardown], + + # Run in validator + 'on_val_start': [on_val_start], + 'on_val_batch_start': [on_val_batch_start], + 'on_val_batch_end': [on_val_batch_end], + 'on_val_end': [on_val_end], + + # Run in predictor + 'on_predict_start': [on_predict_start], + 'on_predict_batch_start': [on_predict_batch_start], + 'on_predict_postprocess_end': [on_predict_postprocess_end], + 'on_predict_batch_end': [on_predict_batch_end], + 'on_predict_end': [on_predict_end], + + # Run in exporter + 'on_export_start': [on_export_start], + 'on_export_end': [on_export_end]} + + +def add_integration_callbacks(instance): + from .clearml import callbacks as clearml_callbacks + from .comet import callbacks as comet_callbacks + from .hub import callbacks as hub_callbacks + from .tensorboard import callbacks as tb_callbacks + + for x in clearml_callbacks, comet_callbacks, hub_callbacks, tb_callbacks: + for k, v in x.items(): + if v not in instance.callbacks[k]: # prevent duplicate callbacks addition + instance.callbacks[k].append(v) # callback[name].append(func) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/clearml.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/clearml.py new file mode 100644 index 0000000..b91e3fc --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/clearml.py @@ -0,0 +1,60 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING +from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params + +try: + import clearml + from clearml import Task + + assert clearml.__version__ # verify package is not directory + assert not TESTS_RUNNING # do not log pytest +except (ImportError, AssertionError, AttributeError): + clearml = None + + +def _log_images(imgs_dict, group='', step=0): + task = Task.current_task() + if task: + for k, v in imgs_dict.items(): + task.get_logger().report_image(group, k, step, v) + + +def on_pretrain_routine_start(trainer): + try: + task = Task.init(project_name=trainer.args.project or 'YOLOv8', + task_name=trainer.args.name, + tags=['YOLOv8'], + output_uri=True, + reuse_last_task_id=False, + auto_connect_frameworks={'pytorch': False}) + task.connect(vars(trainer.args), name='General') + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}') + + +def on_train_epoch_end(trainer): + if trainer.epoch == 1: + _log_images({f.stem: str(f) for f in trainer.save_dir.glob('train_batch*.jpg')}, 'Mosaic', trainer.epoch) + + +def on_fit_epoch_end(trainer): + task = Task.current_task() + if task and trainer.epoch == 0: + model_info = { + 'model/parameters': get_num_params(trainer.model), + 'model/GFLOPs': round(get_flops(trainer.model), 3), + 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)} + task.connect(model_info, name='Model') + + +def on_train_end(trainer): + task = Task.current_task() + if task: + task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False) + + +callbacks = { + 'on_pretrain_routine_start': on_pretrain_routine_start, + 'on_train_epoch_end': on_train_epoch_end, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_train_end': on_train_end} if clearml else {} diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/comet.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/comet.py new file mode 100644 index 0000000..9f193d6 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/comet.py @@ -0,0 +1,54 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING +from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params + +try: + import comet_ml + + assert not TESTS_RUNNING # do not log pytest + assert comet_ml.__version__ # verify package is not directory +except (ImportError, AssertionError, AttributeError): + comet_ml = None + + +def on_pretrain_routine_start(trainer): + try: + experiment = comet_ml.Experiment(project_name=trainer.args.project or 'YOLOv8') + experiment.set_name(trainer.args.name) + experiment.log_parameters(vars(trainer.args)) + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ Comet installed but not initialized correctly, not logging this run. {e}') + + +def on_train_epoch_end(trainer): + experiment = comet_ml.get_global_experiment() + if experiment: + experiment.log_metrics(trainer.label_loss_items(trainer.tloss, prefix='train'), step=trainer.epoch + 1) + if trainer.epoch == 1: + for f in trainer.save_dir.glob('train_batch*.jpg'): + experiment.log_image(f, name=f.stem, step=trainer.epoch + 1) + + +def on_fit_epoch_end(trainer): + experiment = comet_ml.get_global_experiment() + if experiment: + experiment.log_metrics(trainer.metrics, step=trainer.epoch + 1) + if trainer.epoch == 0: + model_info = { + 'model/parameters': get_num_params(trainer.model), + 'model/GFLOPs': round(get_flops(trainer.model), 3), + 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)} + experiment.log_metrics(model_info, step=trainer.epoch + 1) + + +def on_train_end(trainer): + experiment = comet_ml.get_global_experiment() + if experiment: + experiment.log_model('YOLOv8', file_or_folder=str(trainer.best), file_name='best.pt', overwrite=True) + + +callbacks = { + 'on_pretrain_routine_start': on_pretrain_routine_start, + 'on_train_epoch_end': on_train_epoch_end, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_train_end': on_train_end} if comet_ml else {} diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/hub.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/hub.py new file mode 100644 index 0000000..7d127cd --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/hub.py @@ -0,0 +1,83 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import json +from time import time + +from ultralytics.hub.utils import PREFIX, traces +from ultralytics.yolo.utils import LOGGER +from ultralytics.yolo.utils.torch_utils import get_flops, get_num_params + + +def on_pretrain_routine_end(trainer): + session = getattr(trainer, 'hub_session', None) + if session: + # Start timer for upload rate limit + LOGGER.info(f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀') + session.timers = {'metrics': time(), 'ckpt': time()} # start timer on session.rate_limit + + +def on_fit_epoch_end(trainer): + session = getattr(trainer, 'hub_session', None) + if session: + # Upload metrics after val end + all_plots = {**trainer.label_loss_items(trainer.tloss, prefix='train'), **trainer.metrics} + if trainer.epoch == 0: + model_info = { + 'model/parameters': get_num_params(trainer.model), + 'model/GFLOPs': round(get_flops(trainer.model), 3), + 'model/speed(ms)': round(trainer.validator.speed['inference'], 3)} + all_plots = {**all_plots, **model_info} + session.metrics_queue[trainer.epoch] = json.dumps(all_plots) + if time() - session.timers['metrics'] > session.rate_limits['metrics']: + session.upload_metrics() + session.timers['metrics'] = time() # reset timer + session.metrics_queue = {} # reset queue + + +def on_model_save(trainer): + session = getattr(trainer, 'hub_session', None) + if session: + # Upload checkpoints with rate limiting + is_best = trainer.best_fitness == trainer.fitness + if time() - session.timers['ckpt'] > session.rate_limits['ckpt']: + LOGGER.info(f'{PREFIX}Uploading checkpoint {session.model_id}') + session.upload_model(trainer.epoch, trainer.last, is_best) + session.timers['ckpt'] = time() # reset timer + + +def on_train_end(trainer): + session = getattr(trainer, 'hub_session', None) + if session: + # Upload final model and metrics with exponential standoff + LOGGER.info(f'{PREFIX}Syncing final model...') + session.upload_model(trainer.epoch, trainer.best, map=trainer.metrics.get('metrics/mAP50-95(B)', 0), final=True) + session.alive = False # stop heartbeats + LOGGER.info(f'{PREFIX}Done ✅\n' + f'{PREFIX}View model at https://hub.ultralytics.com/models/{session.model_id} 🚀') + + +def on_train_start(trainer): + traces(trainer.args, traces_sample_rate=1.0) + + +def on_val_start(validator): + traces(validator.args, traces_sample_rate=1.0) + + +def on_predict_start(predictor): + traces(predictor.args, traces_sample_rate=1.0) + + +def on_export_start(exporter): + traces(exporter.args, traces_sample_rate=1.0) + + +callbacks = { + 'on_pretrain_routine_end': on_pretrain_routine_end, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_model_save': on_model_save, + 'on_train_end': on_train_end, + 'on_train_start': on_train_start, + 'on_val_start': on_val_start, + 'on_predict_start': on_predict_start, + 'on_export_start': on_export_start} diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/tensorboard.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/tensorboard.py new file mode 100644 index 0000000..07d8347 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/callbacks/tensorboard.py @@ -0,0 +1,42 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +from ultralytics.yolo.utils import LOGGER, TESTS_RUNNING, colorstr + +try: + from torch.utils.tensorboard import SummaryWriter + + assert not TESTS_RUNNING # do not log pytest +except (ImportError, AssertionError): + SummaryWriter = None + +writer = None # TensorBoard SummaryWriter instance + + +def _log_scalars(scalars, step=0): + if writer: + for k, v in scalars.items(): + writer.add_scalar(k, v, step) + + +def on_pretrain_routine_start(trainer): + if SummaryWriter: + try: + global writer + writer = SummaryWriter(str(trainer.save_dir)) + prefix = colorstr('TensorBoard: ') + LOGGER.info(f"{prefix}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/") + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ TensorBoard not initialized correctly, not logging this run. {e}') + + +def on_batch_end(trainer): + _log_scalars(trainer.label_loss_items(trainer.tloss, prefix='train'), trainer.epoch + 1) + + +def on_fit_epoch_end(trainer): + _log_scalars(trainer.metrics, trainer.epoch + 1) + + +callbacks = { + 'on_pretrain_routine_start': on_pretrain_routine_start, + 'on_fit_epoch_end': on_fit_epoch_end, + 'on_batch_end': on_batch_end} diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/checks.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/checks.py new file mode 100644 index 0000000..cf54ebd --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/checks.py @@ -0,0 +1,350 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +import contextlib +import glob +import inspect +import math +import os +import platform +import re +import shutil +import subprocess +import urllib +from pathlib import Path +from typing import Optional + +import cv2 +import numpy as np +import pkg_resources as pkg +import psutil +import requests +import torch +from matplotlib import font_manager + +from ultralytics.yolo.utils import (AUTOINSTALL, LOGGER, ONLINE, ROOT, USER_CONFIG_DIR, TryExcept, colorstr, downloads, + emojis, is_colab, is_docker, is_kaggle, is_online, is_pip_package) + + +def is_ascii(s) -> bool: + """ + Check if a string is composed of only ASCII characters. + + Args: + s (str): String to be checked. + + Returns: + bool: True if the string is composed only of ASCII characters, False otherwise. + """ + # Convert list, tuple, None, etc. to string + s = str(s) + + # Check if the string is composed of only ASCII characters + return all(ord(c) < 128 for c in s) + + +def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0): + """ + Verify image size is a multiple of the given stride in each dimension. If the image size is not a multiple of the + stride, update it to the nearest multiple of the stride that is greater than or equal to the given floor value. + + Args: + imgsz (int or List[int]): Image size. + stride (int): Stride value. + min_dim (int): Minimum number of dimensions. + floor (int): Minimum allowed value for image size. + + Returns: + List[int]: Updated image size. + """ + # Convert stride to integer if it is a tensor + stride = int(stride.max() if isinstance(stride, torch.Tensor) else stride) + + # Convert image size to list if it is an integer + if isinstance(imgsz, int): + imgsz = [imgsz] + elif isinstance(imgsz, (list, tuple)): + imgsz = list(imgsz) + else: + raise TypeError(f"'imgsz={imgsz}' is of invalid type {type(imgsz).__name__}. " + f"Valid imgsz types are int i.e. 'imgsz=640' or list i.e. 'imgsz=[640,640]'") + + # Apply max_dim + if len(imgsz) > max_dim: + msg = "'train' and 'val' imgsz must be an integer, while 'predict' and 'export' imgsz may be a [h, w] list " \ + "or an integer, i.e. 'yolo export imgsz=640,480' or 'yolo export imgsz=640'" + if max_dim != 1: + raise ValueError(f'imgsz={imgsz} is not a valid image size. {msg}') + LOGGER.warning(f"WARNING ⚠️ updating to 'imgsz={max(imgsz)}'. {msg}") + imgsz = [max(imgsz)] + # Make image size a multiple of the stride + sz = [max(math.ceil(x / stride) * stride, floor) for x in imgsz] + + # Print warning message if image size was updated + if sz != imgsz: + LOGGER.warning(f'WARNING ⚠️ imgsz={imgsz} must be multiple of max stride {stride}, updating to {sz}') + + # Add missing dimensions if necessary + sz = [sz[0], sz[0]] if min_dim == 2 and len(sz) == 1 else sz[0] if min_dim == 1 and len(sz) == 1 else sz + + return sz + + +def check_version(current: str = '0.0.0', + minimum: str = '0.0.0', + name: str = 'version ', + pinned: bool = False, + hard: bool = False, + verbose: bool = False) -> bool: + """ + Check current version against the required minimum version. + + Args: + current (str): Current version. + minimum (str): Required minimum version. + name (str): Name to be used in warning message. + pinned (bool): If True, versions must match exactly. If False, minimum version must be satisfied. + hard (bool): If True, raise an AssertionError if the minimum version is not met. + verbose (bool): If True, print warning message if minimum version is not met. + + Returns: + bool: True if minimum version is met, False otherwise. + """ + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) # bool + warning_message = f'WARNING ⚠️ {name}{minimum} is required by YOLOv8, but {name}{current} is currently installed' + if hard: + assert result, emojis(warning_message) # assert min requirements met + if verbose and not result: + LOGGER.warning(warning_message) + return result + + +def check_latest_pypi_version(package_name='ultralytics'): + """ + Returns the latest version of a PyPI package without downloading or installing it. + + Parameters: + package_name (str): The name of the package to find the latest version for. + + Returns: + str: The latest version of the package. + """ + response = requests.get(f'https://pypi.org/pypi/{package_name}/json') + if response.status_code == 200: + return response.json()['info']['version'] + return None + + +def check_pip_update_available(): + """ + Checks if a new version of the ultralytics package is available on PyPI. + + Returns: + bool: True if an update is available, False otherwise. + """ + if ONLINE and is_pip_package(): + with contextlib.suppress(ConnectionError): + from ultralytics import __version__ + latest = check_latest_pypi_version() + if pkg.parse_version(__version__) < pkg.parse_version(latest): # update is available + LOGGER.info(f'New https://pypi.org/project/ultralytics/{latest} available 😃 ' + f"Update with 'pip install -U ultralytics'") + return True + return False + + +def check_font(font='Arial.ttf'): + """ + Find font locally or download to user's configuration directory if it does not already exist. + + Args: + font (str): Path or name of font. + + Returns: + file (Path): Resolved font file path. + """ + name = Path(font).name + + # Check USER_CONFIG_DIR + file = USER_CONFIG_DIR / name + if file.exists(): + return file + + # Check system fonts + matches = [s for s in font_manager.findSystemFonts() if font in s] + if any(matches): + return matches[0] + + # Download to USER_CONFIG_DIR if missing + url = f'https://ultralytics.com/assets/{name}' + if downloads.is_url(url): + downloads.safe_download(url=url, file=file) + return file + + +def check_python(minimum: str = '3.7.0') -> bool: + """ + Check current python version against the required minimum version. + + Args: + minimum (str): Required minimum version of python. + + Returns: + None + """ + return check_version(platform.python_version(), minimum, name='Python ', hard=True) + + +@TryExcept() +def check_requirements(requirements=ROOT.parent / 'requirements.txt', exclude=(), install=True, cmds=''): + # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) + prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version + file = None + if isinstance(requirements, Path): # requirements.txt file + file = requirements.resolve() + assert file.exists(), f'{prefix} {file} not found, check failed.' + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] + elif isinstance(requirements, str): + requirements = [requirements] + + s = '' + n = 0 + for r in requirements: + try: + pkg.require(r) + except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met + try: # attempt to import (slower but more accurate) + import importlib + importlib.import_module(next(pkg.parse_requirements(r)).name) + except ImportError: + s += f'"{r}" ' + n += 1 + + if s and install and AUTOINSTALL: # check environment variable + LOGGER.info(f"{prefix} YOLOv8 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") + try: + assert is_online(), 'AutoUpdate skipped (offline)' + LOGGER.info(subprocess.check_output(f'pip install {s} {cmds}', shell=True).decode()) + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {file or requirements}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + LOGGER.info(s) + except Exception as e: + LOGGER.warning(f'{prefix} ❌ {e}') + + +def check_suffix(file='yolov8n.pt', suffix='.pt', msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = (suffix, ) + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower().strip() # file suffix + if len(s): + assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}, not {s}' + + +def check_yolov5u_filename(file: str, verbose: bool = True): + # Replace legacy YOLOv5 filenames with updated YOLOv5u filenames + if ('yolov3' in file or 'yolov5' in file) and 'u' not in file: + original_file = file + file = re.sub(r'(.*yolov5([nsmlx]))\.pt', '\\1u.pt', file) # i.e. yolov5n.pt -> yolov5nu.pt + file = re.sub(r'(.*yolov5([nsmlx])6)\.pt', '\\1u.pt', file) # i.e. yolov5n6.pt -> yolov5n6u.pt + file = re.sub(r'(.*yolov3(|-tiny|-spp))\.pt', '\\1u.pt', file) # i.e. yolov3-spp.pt -> yolov3-sppu.pt + if file != original_file and verbose: + LOGGER.info(f"PRO TIP 💡 Replace 'model={original_file}' with new 'model={file}'.\nYOLOv5 'u' models are " + f'trained with https://github.com/ultralytics/ultralytics and feature improved performance vs ' + f'standard YOLOv5 models trained with https://github.com/ultralytics/yolov5.\n') + return file + + +def check_file(file, suffix='', download=True, hard=True): + # Search/download file (if necessary) and return path + check_suffix(file, suffix) # optional + file = str(file).strip() # convert to string and strip spaces + file = check_yolov5u_filename(file) # yolov5n -> yolov5nu + if not file or ('://' not in file and Path(file).exists()): # exists ('://' check required in Windows Python<3.10) + return file + elif download and file.lower().startswith(('https://', 'http://', 'rtsp://', 'rtmp://')): # download + url = file # warning: Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth + if Path(file).exists(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + downloads.safe_download(url=url, file=file, unzip=False) + return file + else: # search + files = [] + for d in 'models', 'datasets', 'tracker/cfg', 'yolo/cfg': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file + if not files and hard: + raise FileNotFoundError(f"'{file}' does not exist") + elif len(files) > 1 and hard: + raise FileNotFoundError(f"Multiple files match '{file}', specify exact path: {files}") + return files[0] if len(files) else [] # return file + + +def check_yaml(file, suffix=('.yaml', '.yml'), hard=True): + # Search/download YAML file (if necessary) and return path, checking suffix + return check_file(file, suffix, hard=hard) + + +def check_imshow(warn=False): + # Check if environment supports image displays + try: + assert not any((is_colab(), is_kaggle(), is_docker())) + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + if warn: + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') + return False + + +def check_yolo(verbose=True, device=''): + from ultralytics.yolo.utils.torch_utils import select_device + + if is_colab(): + shutil.rmtree('sample_data', ignore_errors=True) # remove colab /sample_data directory + + if verbose: + # System info + gib = 1 << 30 # bytes per GiB + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage('/') + s = f'({os.cpu_count()} CPUs, {ram / gib:.1f} GB RAM, {(total - free) / gib:.1f}/{total / gib:.1f} GB disk)' + with contextlib.suppress(Exception): # clear display if ipython is installed + from IPython import display + display.clear_output() + else: + s = '' + + select_device(device=device, newline=False) + LOGGER.info(f'Setup complete ✅ {s}') + + +def git_describe(path=ROOT): # path must be a directory + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + try: + assert (Path(path) / '.git').is_dir() + return subprocess.check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except AssertionError: + return '' + + +def print_args(args: Optional[dict] = None, show_file=True, show_func=False): + # Print function arguments (optional args dict) + x = inspect.currentframe().f_back # previous frame + file, _, func, _, _ = inspect.getframeinfo(x) + if args is None: # get args automatically + args, _, _, frm = inspect.getargvalues(x) + args = {k: v for k, v in frm.items() if k in args} + try: + file = Path(file).resolve().relative_to(ROOT).with_suffix('') + except ValueError: + file = Path(file).stem + s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') + LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/dist.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/dist.py new file mode 100644 index 0000000..5a49819 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/dist.py @@ -0,0 +1,64 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import os +import re +import shutil +import socket +import sys +import tempfile +from pathlib import Path + +from . import USER_CONFIG_DIR +from .torch_utils import TORCH_1_9 + + +def find_free_network_port() -> int: + """Finds a free port on localhost. + + It is useful in single-node training when we don't want to connect to a real main node but have to set the + `MASTER_PORT` environment variable. + """ + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('127.0.0.1', 0)) + return s.getsockname()[1] # port + + +def generate_ddp_file(trainer): + module, name = f'{trainer.__class__.__module__}.{trainer.__class__.__name__}'.rsplit('.', 1) + + content = f'''cfg = {vars(trainer.args)} \nif __name__ == "__main__": + from {module} import {name} + + trainer = {name}(cfg=cfg) + trainer.train()''' + (USER_CONFIG_DIR / 'DDP').mkdir(exist_ok=True) + with tempfile.NamedTemporaryFile(prefix='_temp_', + suffix=f'{id(trainer)}.py', + mode='w+', + encoding='utf-8', + dir=USER_CONFIG_DIR / 'DDP', + delete=False) as file: + file.write(content) + return file.name + + +def generate_ddp_command(world_size, trainer): + import __main__ # noqa local import to avoid https://github.com/Lightning-AI/lightning/issues/15218 + if not trainer.resume: + shutil.rmtree(trainer.save_dir) # remove the save_dir + file = str(Path(sys.argv[0]).resolve()) + safe_pattern = re.compile(r'^[a-zA-Z0-9_. /\\-]{1,128}$') # allowed characters and maximum of 100 characters + if not (safe_pattern.match(file) and Path(file).exists() and file.endswith('.py')): # using CLI + file = generate_ddp_file(trainer) + dist_cmd = 'torch.distributed.run' if TORCH_1_9 else 'torch.distributed.launch' + port = find_free_network_port() + exclude_args = ['save_dir'] + args = [f'{k}={v}' for k, v in vars(trainer.args).items() if k not in exclude_args] + cmd = [sys.executable, '-m', dist_cmd, '--nproc_per_node', f'{world_size}', '--master_port', f'{port}', file] + args + return cmd, file + + +def ddp_cleanup(trainer, file): + # delete temp file if created + if f'{id(trainer)}.py' in file: # if temp_file suffix in file + os.remove(file) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/downloads.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/downloads.py new file mode 100644 index 0000000..25137ec --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/downloads.py @@ -0,0 +1,200 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import contextlib +import subprocess +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from urllib import parse, request +from zipfile import BadZipFile, ZipFile, is_zipfile + +import requests +import torch +from tqdm import tqdm + +from ultralytics.yolo.utils import LOGGER, checks, is_online + +GITHUB_ASSET_NAMES = [f'yolov8{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] + \ + [f'yolov5{size}u.pt' for size in 'nsmlx'] + \ + [f'yolov3{size}u.pt' for size in ('', '-spp', '-tiny')] +GITHUB_ASSET_STEMS = [Path(k).stem for k in GITHUB_ASSET_NAMES] + + +def is_url(url, check=True): + # Check if string is URL and check if URL exists + with contextlib.suppress(Exception): + url = str(url) + result = parse.urlparse(url) + assert all([result.scheme, result.netloc]) # check if is url + if check: + with request.urlopen(url) as response: + return response.getcode() == 200 # check if exists online + return True + return False + + +def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): + """ + Unzip a *.zip file to path/, excluding files containing strings in exclude list + Replaces: ZipFile(file).extractall(path=path) + """ + if not (Path(file).exists() and is_zipfile(file)): + raise BadZipFile(f"File '{file}' does not exist or is a bad zip file.") + if path is None: + path = Path(file).parent # default path + with ZipFile(file) as zipObj: + for f in zipObj.namelist(): # list all archived filenames in the zip + if all(x not in f for x in exclude): + zipObj.extract(f, path=path) + return zipObj.namelist()[0] # return unzip dir + + +def safe_download(url, + file=None, + dir=None, + unzip=True, + delete=False, + curl=False, + retry=3, + min_bytes=1E0, + progress=True): + """ + Function for downloading files from a URL, with options for retrying, unzipping, and deleting the downloaded file. + + Args: + url: str: The URL of the file to be downloaded. + file: str, optional: The filename of the downloaded file. + If not provided, the file will be saved with the same name as the URL. + dir: str, optional: The directory to save the downloaded file. + If not provided, the file will be saved in the current working directory. + unzip: bool, optional: Whether to unzip the downloaded file. Default: True. + delete: bool, optional: Whether to delete the downloaded file after unzipping. Default: False. + curl: bool, optional: Whether to use curl command line tool for downloading. Default: False. + retry: int, optional: The number of times to retry the download in case of failure. Default: 3. + min_bytes: float, optional: The minimum number of bytes that the downloaded file should have, to be considered + a successful download. Default: 1E0. + progress: bool, optional: Whether to display a progress bar during the download. Default: True. + """ + if '://' not in str(url) and Path(url).is_file(): # exists ('://' check required in Windows Python<3.10) + f = Path(url) # filename + else: # does not exist + assert dir or file, 'dir or file required for download' + f = dir / Path(url).name if dir else Path(file) + desc = f'Downloading {url} to {f}' + LOGGER.info(f'{desc}...') + f.parent.mkdir(parents=True, exist_ok=True) # make directory if missing + for i in range(retry + 1): + try: + if curl or i > 0: # curl download with retry, continue + s = 'sS' * (not progress) # silent + r = subprocess.run(['curl', '-#', f'-{s}L', url, '-o', f, '--retry', '3', '-C', '-']).returncode + assert r == 0, f'Curl return value {r}' + else: # urllib download + method = 'torch' + if method == 'torch': + torch.hub.download_url_to_file(url, f, progress=progress) + else: + from ultralytics.yolo.utils import TQDM_BAR_FORMAT + with request.urlopen(url) as response, tqdm(total=int(response.getheader('Content-Length', 0)), + desc=desc, + disable=not progress, + unit='B', + unit_scale=True, + unit_divisor=1024, + bar_format=TQDM_BAR_FORMAT) as pbar: + with open(f, 'wb') as f_opened: + for data in response: + f_opened.write(data) + pbar.update(len(data)) + + if f.exists(): + if f.stat().st_size > min_bytes: + break # success + f.unlink() # remove partial downloads + except Exception as e: + if i == 0 and not is_online(): + raise ConnectionError(f'❌ Download failure for {url}. Environment is not online.') from e + elif i >= retry: + raise ConnectionError(f'❌ Download failure for {url}. Retry limit reached.') from e + LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') + + if unzip and f.exists() and f.suffix in ('.zip', '.tar', '.gz'): + unzip_dir = dir or f.parent # unzip to dir if provided else unzip in place + LOGGER.info(f'Unzipping {f} to {unzip_dir}...') + if f.suffix == '.zip': + unzip_dir = unzip_file(file=f, path=unzip_dir) # unzip + elif f.suffix == '.tar': + subprocess.run(['tar', 'xf', f, '--directory', unzip_dir], check=True) # unzip + elif f.suffix == '.gz': + subprocess.run(['tar', 'xfz', f, '--directory', unzip_dir], check=True) # unzip + if delete: + f.unlink() # remove zip + return unzip_dir + + +def attempt_download_asset(file, repo='ultralytics/assets', release='v0.0.0'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v6.2', etc. + from ultralytics.yolo.utils import SETTINGS # scoped for circular import + + def github_assets(repository, version='latest'): + # Return GitHub repo tag and assets (i.e. ['yolov8n.pt', 'yolov8s.pt', ...]) + if version != 'latest': + version = f'tags/{version}' # i.e. tags/v6.2 + response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api + return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets + + # YOLOv3/5u updates + file = str(file) + file = checks.check_yolov5u_filename(file) + file = Path(file.strip().replace("'", '')) + if file.exists(): + return str(file) + elif (SETTINGS['weights_dir'] / file).exists(): + return str(SETTINGS['weights_dir'] / file) + else: + # URL specified + name = Path(parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + safe_download(url=url, file=file, min_bytes=1E5) + return file + + # GitHub assets + assets = GITHUB_ASSET_NAMES + try: + tag, assets = github_assets(repo, release) + except Exception: + try: + tag, assets = github_assets(repo) # latest release + except Exception: + try: + tag = subprocess.check_output(['git', 'tag']).decode().split()[-1] + except Exception: + tag = release + + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) + if name in assets: + safe_download(url=f'https://github.com/{repo}/releases/download/{tag}/{name}', file=file, min_bytes=1E5) + + return str(file) + + +def download(url, dir=Path.cwd(), unzip=True, delete=False, curl=False, threads=1, retry=3): + # Multithreaded file download and unzip function, used in data.yaml for autodownload + dir = Path(dir) + dir.mkdir(parents=True, exist_ok=True) # make directory + if threads > 1: + with ThreadPool(threads) as pool: + pool.map( + lambda x: safe_download( + url=x[0], dir=x[1], unzip=unzip, delete=delete, curl=curl, retry=retry, progress=threads <= 1), + zip(url, repeat(dir))) + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + safe_download(url=u, dir=dir, unzip=unzip, delete=delete, curl=curl, retry=retry) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/files.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/files.py new file mode 100644 index 0000000..72ebdab --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/files.py @@ -0,0 +1,92 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import contextlib +import glob +import os +import urllib +from datetime import datetime +from pathlib import Path + + +class WorkingDirectory(contextlib.ContextDecorator): + # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + def __init__(self, new_dir): + self.dir = new_dir # new dir + self.cwd = Path.cwd().resolve() # current dir + + def __enter__(self): + os.chdir(self.dir) + + def __exit__(self, exc_type, exc_val, exc_tb): + os.chdir(self.cwd) + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): + """ + Increments a file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. + + If the path exists and exist_ok is not set to True, the path will be incremented by appending a number and sep to + the end of the path. If the path is a file, the file extension will be preserved. If the path is a directory, the + number will be appended directly to the end of the path. If mkdir is set to True, the path will be created as a + directory if it does not already exist. + + Args: + path (str or pathlib.Path): Path to increment. + exist_ok (bool, optional): If True, the path will not be incremented and will be returned as-is. Defaults to False. + sep (str, optional): Separator to use between the path and the incrementation number. Defaults to an empty string. + mkdir (bool, optional): If True, the path will be created as a directory if it does not exist. Defaults to False. + + Returns: + pathlib.Path: Incremented path. + """ + path = Path(path) # os-agnostic + if path.exists() and not exist_ok: + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + + # Method 1 + for n in range(2, 9999): + p = f'{path}{sep}{n}{suffix}' # increment path + if not os.path.exists(p): # + break + path = Path(p) + + if mkdir: + path.mkdir(parents=True, exist_ok=True) # make directory + + return path + + +def file_age(path=__file__): + # Return days since last file update + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_date(path=__file__): + # Return human-readable file modification date, i.e. '2021-3-26' + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def file_size(path): + # Return file/dir size (MB) + if isinstance(path, (str, Path)): + mb = 1 << 20 # bytes to MiB (1024 ** 2) + path = Path(path) + if path.is_file(): + return path.stat().st_size / mb + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + return 0.0 + + +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/instance.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/instance.py new file mode 100644 index 0000000..95a62ca --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/instance.py @@ -0,0 +1,336 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from collections import abc +from itertools import repeat +from numbers import Number +from typing import List + +import numpy as np + +from .ops import ltwh2xywh, ltwh2xyxy, resample_segments, xywh2ltwh, xywh2xyxy, xyxy2ltwh, xyxy2xywh + + +def _ntuple(n): + # From PyTorch internals + def parse(x): + return x if isinstance(x, abc.Iterable) else tuple(repeat(x, n)) + + return parse + + +to_4tuple = _ntuple(4) + +# `xyxy` means left top and right bottom +# `xywh` means center x, center y and width, height(yolo format) +# `ltwh` means left top and width, height(coco format) +_formats = ['xyxy', 'xywh', 'ltwh'] + +__all__ = 'Bboxes', # tuple or list + + +class Bboxes: + """Now only numpy is supported""" + + def __init__(self, bboxes, format='xyxy') -> None: + assert format in _formats + bboxes = bboxes[None, :] if bboxes.ndim == 1 else bboxes + assert bboxes.ndim == 2 + assert bboxes.shape[1] == 4 + self.bboxes = bboxes + self.format = format + # self.normalized = normalized + + # def convert(self, format): + # assert format in _formats + # if self.format == format: + # bboxes = self.bboxes + # elif self.format == "xyxy": + # if format == "xywh": + # bboxes = xyxy2xywh(self.bboxes) + # else: + # bboxes = xyxy2ltwh(self.bboxes) + # elif self.format == "xywh": + # if format == "xyxy": + # bboxes = xywh2xyxy(self.bboxes) + # else: + # bboxes = xywh2ltwh(self.bboxes) + # else: + # if format == "xyxy": + # bboxes = ltwh2xyxy(self.bboxes) + # else: + # bboxes = ltwh2xywh(self.bboxes) + # + # return Bboxes(bboxes, format) + + def convert(self, format): + assert format in _formats + if self.format == format: + return + elif self.format == 'xyxy': + bboxes = xyxy2xywh(self.bboxes) if format == 'xywh' else xyxy2ltwh(self.bboxes) + elif self.format == 'xywh': + bboxes = xywh2xyxy(self.bboxes) if format == 'xyxy' else xywh2ltwh(self.bboxes) + else: + bboxes = ltwh2xyxy(self.bboxes) if format == 'xyxy' else ltwh2xywh(self.bboxes) + self.bboxes = bboxes + self.format = format + + def areas(self): + self.convert('xyxy') + return (self.bboxes[:, 2] - self.bboxes[:, 0]) * (self.bboxes[:, 3] - self.bboxes[:, 1]) + + # def denormalize(self, w, h): + # if not self.normalized: + # return + # assert (self.bboxes <= 1.0).all() + # self.bboxes[:, 0::2] *= w + # self.bboxes[:, 1::2] *= h + # self.normalized = False + # + # def normalize(self, w, h): + # if self.normalized: + # return + # assert (self.bboxes > 1.0).any() + # self.bboxes[:, 0::2] /= w + # self.bboxes[:, 1::2] /= h + # self.normalized = True + + def mul(self, scale): + """ + Args: + scale (tuple | List | int): the scale for four coords. + """ + if isinstance(scale, Number): + scale = to_4tuple(scale) + assert isinstance(scale, (tuple, list)) + assert len(scale) == 4 + self.bboxes[:, 0] *= scale[0] + self.bboxes[:, 1] *= scale[1] + self.bboxes[:, 2] *= scale[2] + self.bboxes[:, 3] *= scale[3] + + def add(self, offset): + """ + Args: + offset (tuple | List | int): the offset for four coords. + """ + if isinstance(offset, Number): + offset = to_4tuple(offset) + assert isinstance(offset, (tuple, list)) + assert len(offset) == 4 + self.bboxes[:, 0] += offset[0] + self.bboxes[:, 1] += offset[1] + self.bboxes[:, 2] += offset[2] + self.bboxes[:, 3] += offset[3] + + def __len__(self): + return len(self.bboxes) + + @classmethod + def concatenate(cls, boxes_list: List['Bboxes'], axis=0) -> 'Bboxes': + """ + Concatenates a list of Boxes into a single Bboxes + + Arguments: + boxes_list (list[Bboxes]) + + Returns: + Bboxes: the concatenated Boxes + """ + assert isinstance(boxes_list, (list, tuple)) + if not boxes_list: + return cls(np.empty(0)) + assert all(isinstance(box, Bboxes) for box in boxes_list) + + if len(boxes_list) == 1: + return boxes_list[0] + return cls(np.concatenate([b.bboxes for b in boxes_list], axis=axis)) + + def __getitem__(self, index) -> 'Bboxes': + """ + Args: + index: int, slice, or a BoolArray + + Returns: + Bboxes: Create a new :class:`Bboxes` by indexing. + """ + if isinstance(index, int): + return Bboxes(self.bboxes[index].view(1, -1)) + b = self.bboxes[index] + assert b.ndim == 2, f'Indexing on Bboxes with {index} failed to return a matrix!' + return Bboxes(b) + + +class Instances: + + def __init__(self, bboxes, segments=None, keypoints=None, bbox_format='xywh', normalized=True) -> None: + """ + Args: + bboxes (ndarray): bboxes with shape [N, 4]. + segments (list | ndarray): segments. + keypoints (ndarray): keypoints with shape [N, 17, 2]. + """ + if segments is None: + segments = [] + self._bboxes = Bboxes(bboxes=bboxes, format=bbox_format) + self.keypoints = keypoints + self.normalized = normalized + + if len(segments) > 0: + # list[np.array(1000, 2)] * num_samples + segments = resample_segments(segments) + # (N, 1000, 2) + segments = np.stack(segments, axis=0) + else: + segments = np.zeros((0, 1000, 2), dtype=np.float32) + self.segments = segments + + def convert_bbox(self, format): + self._bboxes.convert(format=format) + + def bbox_areas(self): + self._bboxes.areas() + + def scale(self, scale_w, scale_h, bbox_only=False): + """this might be similar with denormalize func but without normalized sign""" + self._bboxes.mul(scale=(scale_w, scale_h, scale_w, scale_h)) + if bbox_only: + return + self.segments[..., 0] *= scale_w + self.segments[..., 1] *= scale_h + if self.keypoints is not None: + self.keypoints[..., 0] *= scale_w + self.keypoints[..., 1] *= scale_h + + def denormalize(self, w, h): + if not self.normalized: + return + self._bboxes.mul(scale=(w, h, w, h)) + self.segments[..., 0] *= w + self.segments[..., 1] *= h + if self.keypoints is not None: + self.keypoints[..., 0] *= w + self.keypoints[..., 1] *= h + self.normalized = False + + def normalize(self, w, h): + if self.normalized: + return + self._bboxes.mul(scale=(1 / w, 1 / h, 1 / w, 1 / h)) + self.segments[..., 0] /= w + self.segments[..., 1] /= h + if self.keypoints is not None: + self.keypoints[..., 0] /= w + self.keypoints[..., 1] /= h + self.normalized = True + + def add_padding(self, padw, padh): + # handle rect and mosaic situation + assert not self.normalized, 'you should add padding with absolute coordinates.' + self._bboxes.add(offset=(padw, padh, padw, padh)) + self.segments[..., 0] += padw + self.segments[..., 1] += padh + if self.keypoints is not None: + self.keypoints[..., 0] += padw + self.keypoints[..., 1] += padh + + def __getitem__(self, index) -> 'Instances': + """ + Args: + index: int, slice, or a BoolArray + + Returns: + Instances: Create a new :class:`Instances` by indexing. + """ + segments = self.segments[index] if len(self.segments) else self.segments + keypoints = self.keypoints[index] if self.keypoints is not None else None + bboxes = self.bboxes[index] + bbox_format = self._bboxes.format + return Instances( + bboxes=bboxes, + segments=segments, + keypoints=keypoints, + bbox_format=bbox_format, + normalized=self.normalized, + ) + + def flipud(self, h): + if self._bboxes.format == 'xyxy': + y1 = self.bboxes[:, 1].copy() + y2 = self.bboxes[:, 3].copy() + self.bboxes[:, 1] = h - y2 + self.bboxes[:, 3] = h - y1 + else: + self.bboxes[:, 1] = h - self.bboxes[:, 1] + self.segments[..., 1] = h - self.segments[..., 1] + if self.keypoints is not None: + self.keypoints[..., 1] = h - self.keypoints[..., 1] + + def fliplr(self, w): + if self._bboxes.format == 'xyxy': + x1 = self.bboxes[:, 0].copy() + x2 = self.bboxes[:, 2].copy() + self.bboxes[:, 0] = w - x2 + self.bboxes[:, 2] = w - x1 + else: + self.bboxes[:, 0] = w - self.bboxes[:, 0] + self.segments[..., 0] = w - self.segments[..., 0] + if self.keypoints is not None: + self.keypoints[..., 0] = w - self.keypoints[..., 0] + + def clip(self, w, h): + ori_format = self._bboxes.format + self.convert_bbox(format='xyxy') + self.bboxes[:, [0, 2]] = self.bboxes[:, [0, 2]].clip(0, w) + self.bboxes[:, [1, 3]] = self.bboxes[:, [1, 3]].clip(0, h) + if ori_format != 'xyxy': + self.convert_bbox(format=ori_format) + self.segments[..., 0] = self.segments[..., 0].clip(0, w) + self.segments[..., 1] = self.segments[..., 1].clip(0, h) + if self.keypoints is not None: + self.keypoints[..., 0] = self.keypoints[..., 0].clip(0, w) + self.keypoints[..., 1] = self.keypoints[..., 1].clip(0, h) + + def update(self, bboxes, segments=None, keypoints=None): + new_bboxes = Bboxes(bboxes, format=self._bboxes.format) + self._bboxes = new_bboxes + if segments is not None: + self.segments = segments + if keypoints is not None: + self.keypoints = keypoints + + def __len__(self): + return len(self.bboxes) + + @classmethod + def concatenate(cls, instances_list: List['Instances'], axis=0) -> 'Instances': + """ + Concatenates a list of Boxes into a single Bboxes + + Arguments: + instances_list (list[Bboxes]) + axis + + Returns: + Boxes: the concatenated Boxes + """ + assert isinstance(instances_list, (list, tuple)) + if not instances_list: + return cls(np.empty(0)) + assert all(isinstance(instance, Instances) for instance in instances_list) + + if len(instances_list) == 1: + return instances_list[0] + + use_keypoint = instances_list[0].keypoints is not None + bbox_format = instances_list[0]._bboxes.format + normalized = instances_list[0].normalized + + cat_boxes = np.concatenate([ins.bboxes for ins in instances_list], axis=axis) + cat_segments = np.concatenate([b.segments for b in instances_list], axis=axis) + cat_keypoints = np.concatenate([b.keypoints for b in instances_list], axis=axis) if use_keypoint else None + return cls(cat_boxes, cat_segments, cat_keypoints, bbox_format, normalized) + + @property + def bboxes(self): + return self._bboxes.bboxes diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/loss.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/loss.py new file mode 100644 index 0000000..e365006 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/loss.py @@ -0,0 +1,56 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .metrics import bbox_iou +from .tal import bbox2dist + + +class VarifocalLoss(nn.Module): + # Varifocal loss by Zhang et al. https://arxiv.org/abs/2008.13367 + def __init__(self): + super().__init__() + + def forward(self, pred_score, gt_score, label, alpha=0.75, gamma=2.0): + weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label + with torch.cuda.amp.autocast(enabled=False): + loss = (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction='none') * + weight).sum() + return loss + + +class BboxLoss(nn.Module): + + def __init__(self, reg_max, use_dfl=False): + super().__init__() + self.reg_max = reg_max + self.use_dfl = use_dfl + + def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask): + # IoU loss + weight = torch.masked_select(target_scores.sum(-1), fg_mask).unsqueeze(-1) + iou = bbox_iou(pred_bboxes[fg_mask], target_bboxes[fg_mask], xywh=False, CIoU=True) + loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum + + # DFL loss + if self.use_dfl: + target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max) + loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight + loss_dfl = loss_dfl.sum() / target_scores_sum + else: + loss_dfl = torch.tensor(0.0).to(pred_dist.device) + + return loss_iou, loss_dfl + + @staticmethod + def _df_loss(pred_dist, target): + # Return sum of left and right DFL losses + # Distribution Focal Loss (DFL) proposed in Generalized Focal Loss https://ieeexplore.ieee.org/document/9792391 + tl = target.long() # target left + tr = tl + 1 # target right + wl = tr - target # weight left + wr = 1 - wl # weight right + return (F.cross_entropy(pred_dist, tl.view(-1), reduction='none').view(tl.shape) * wl + + F.cross_entropy(pred_dist, tr.view(-1), reduction='none').view(tl.shape) * wr).mean(-1, keepdim=True) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/metrics.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/metrics.py new file mode 100644 index 0000000..26e2b0e --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/metrics.py @@ -0,0 +1,755 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +""" +Model validation metrics +""" +import math +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch +import torch.nn as nn + +from ultralytics.yolo.utils import LOGGER, SimpleClass, TryExcept + + +# boxes +def box_area(box): + # box = xyxy(4,n) + return (box[2] - box[0]) * (box[3] - box[1]) + + +def bbox_ioa(box1, box2, eps=1e-7): + """Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(nx4) + box2: np.array of shape(mx4) + returns: np.array of shape(nxm) + """ + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1.T + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T + + # Intersection area + inter_area = (np.minimum(b1_x2[:, None], b2_x2) - np.maximum(b1_x1[:, None], b2_x1)).clip(0) * \ + (np.minimum(b1_y2[:, None], b2_y2) - np.maximum(b1_y1[:, None], b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def box_iou(box1, box2, eps=1e-7): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Based on https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + eps + + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 + """ + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) + inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) + + # IoU = inter / (area1 + area2 - inter) + return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) + + +def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) + + # Get the coordinates of bounding boxes + if xywh: # transform from xywh to xyxy + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) + w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 + b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ + b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ + else: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + + # Intersection area + inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \ + (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0) + + # Union Area + union = w1 * h1 + w2 * h2 - inter + eps + + # IoU + iou = inter / union + if CIoU or DIoU or GIoU: + cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width + ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU + + +def mask_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [M, n] m2 means number of gt objects + Note: n means image_w x image_h + Returns: masks iou, [N, M] + """ + intersection = torch.matmul(mask1, mask2.t()).clamp(0) + union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [N, n] m2 means number of gt objects + Note: n means image_w x image_h + Returns: masks iou, (N, ) + """ + intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) + union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +# losses +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + if detections is None: + gt_classes = labels.int() + for gc in gt_classes: + self.matrix[self.nc, gc] += 1 # background FN + return + + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(int) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # true background + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # predicted background + + def matrix(self): + return self.matrix + + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') + def plot(self, normalize=True, save_dir='', names=()): + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + ticklabels = (names + ['background']) if labels else 'auto' + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, + ax=ax, + annot=nc < 30, + annot_kws={ + 'size': 8}, + cmap='Blues', + fmt='.2f', + square=True, + vmin=0.0, + xticklabels=ticklabels, + yticklabels=ticklabels).set_facecolor((1, 1, 1)) + ax.set_xlabel('True') + ax.set_ylabel('Predicted') + ax.set_title('Confusion Matrix') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close(fig) + + def print(self): + for i in range(self.nc + 1): + LOGGER.info(' '.join(map(str, self.matrix[i]))) + + +def smooth(y, f=0.05): + # Box filter of fraction f + nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) + p = np.ones(nf // 2) # ones padding + yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded + return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed + + +def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') + ax.set_title('Precision-Recall Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) + + +def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = smooth(py.mean(0), 0.05) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') + ax.set_title(f'{ylabel}-Confidence Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + Arguments: + recall: The recall curve (list) + precision: The precision curve (list) + Returns: + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x-axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir=Path(), names=(), eps=1e-16, prefix=''): + """ + Computes the average precision per class for object detection evaluation. + + Args: + tp (np.ndarray): Binary array indicating whether the detection is correct (True) or not (False). + conf (np.ndarray): Array of confidence scores of the detections. + pred_cls (np.ndarray): Array of predicted classes of the detections. + target_cls (np.ndarray): Array of true classes of the detections. + plot (bool, optional): Whether to plot PR curves or not. Defaults to False. + save_dir (Path, optional): Directory to save the PR curves. Defaults to an empty path. + names (tuple, optional): Tuple of class names to plot PR curves. Defaults to an empty tuple. + eps (float, optional): A small value to avoid division by zero. Defaults to 1e-16. + prefix (str, optional): A prefix string for saving the plot files. Defaults to an empty string. + + Returns: + (tuple): A tuple of six arrays and one array of unique classes, where: + tp (np.ndarray): True positive counts for each class. + fp (np.ndarray): False positive counts for each class. + p (np.ndarray): Precision values at each confidence threshold. + r (np.ndarray): Recall values at each confidence threshold. + f1 (np.ndarray): F1-score values at each confidence threshold. + ap (np.ndarray): Average precision for each class at different IoU thresholds. + unique_classes (np.ndarray): An array of unique classes that have data. + + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + if n_p == 0 or n_l == 0: + continue + + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = dict(enumerate(names)) # to dict + if plot: + plot_pr_curve(px, py, ap, save_dir / f'{prefix}PR_curve.png', names) + plot_mc_curve(px, f1, save_dir / f'{prefix}F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, save_dir / f'{prefix}P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, save_dir / f'{prefix}R_curve.png', names, ylabel='Recall') + + i = smooth(f1.mean(0), 0.1).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype(int) + + +class Metric(SimpleClass): + """ + Class for computing evaluation metrics for YOLOv8 model. + + Attributes: + p (list): Precision for each class. Shape: (nc,). + r (list): Recall for each class. Shape: (nc,). + f1 (list): F1 score for each class. Shape: (nc,). + all_ap (list): AP scores for all classes and all IoU thresholds. Shape: (nc, 10). + ap_class_index (list): Index of class for each AP score. Shape: (nc,). + nc (int): Number of classes. + + Methods: + ap50(): AP at IoU threshold of 0.5 for all classes. Returns: List of AP scores. Shape: (nc,) or []. + ap(): AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: List of AP scores. Shape: (nc,) or []. + mp(): Mean precision of all classes. Returns: Float. + mr(): Mean recall of all classes. Returns: Float. + map50(): Mean AP at IoU threshold of 0.5 for all classes. Returns: Float. + map75(): Mean AP at IoU threshold of 0.75 for all classes. Returns: Float. + map(): Mean AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: Float. + mean_results(): Mean of results, returns mp, mr, map50, map. + class_result(i): Class-aware result, returns p[i], r[i], ap50[i], ap[i]. + maps(): mAP of each class. Returns: Array of mAP scores, shape: (nc,). + fitness(): Model fitness as a weighted combination of metrics. Returns: Float. + update(results): Update metric attributes with new evaluation results. + + """ + + def __init__(self) -> None: + self.p = [] # (nc, ) + self.r = [] # (nc, ) + self.f1 = [] # (nc, ) + self.all_ap = [] # (nc, 10) + self.ap_class_index = [] # (nc, ) + self.nc = 0 + + @property + def ap50(self): + """AP@0.5 of all classes. + Returns: + (nc, ) or []. + """ + return self.all_ap[:, 0] if len(self.all_ap) else [] + + @property + def ap(self): + """AP@0.5:0.95 + Returns: + (nc, ) or []. + """ + return self.all_ap.mean(1) if len(self.all_ap) else [] + + @property + def mp(self): + """mean precision of all classes. + Returns: + float. + """ + return self.p.mean() if len(self.p) else 0.0 + + @property + def mr(self): + """mean recall of all classes. + Returns: + float. + """ + return self.r.mean() if len(self.r) else 0.0 + + @property + def map50(self): + """Mean AP@0.5 of all classes. + Returns: + float. + """ + return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 + + @property + def map75(self): + """Mean AP@0.75 of all classes. + Returns: + float. + """ + return self.all_ap[:, 5].mean() if len(self.all_ap) else 0.0 + + @property + def map(self): + """Mean AP@0.5:0.95 of all classes. + Returns: + float. + """ + return self.all_ap.mean() if len(self.all_ap) else 0.0 + + def mean_results(self): + """Mean of results, return mp, mr, map50, map""" + return [self.mp, self.mr, self.map50, self.map] + + def class_result(self, i): + """class-aware result, return p[i], r[i], ap50[i], ap[i]""" + return self.p[i], self.r[i], self.ap50[i], self.ap[i] + + @property + def maps(self): + """mAP of each class""" + maps = np.zeros(self.nc) + self.map + for i, c in enumerate(self.ap_class_index): + maps[c] = self.ap[i] + return maps + + def fitness(self): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (np.array(self.mean_results()) * w).sum() + + def update(self, results): + """ + Args: + results: tuple(p, r, ap, f1, ap_class) + """ + self.p, self.r, self.f1, self.all_ap, self.ap_class_index = results + + +class DetMetrics(SimpleClass): + """ + This class is a utility class for computing detection metrics such as precision, recall, and mean average precision + (mAP) of an object detection model. + + Args: + save_dir (Path): A path to the directory where the output plots will be saved. Defaults to current directory. + plot (bool): A flag that indicates whether to plot precision-recall curves for each class. Defaults to False. + names (tuple of str): A tuple of strings that represents the names of the classes. Defaults to an empty tuple. + + Attributes: + save_dir (Path): A path to the directory where the output plots will be saved. + plot (bool): A flag that indicates whether to plot the precision-recall curves for each class. + names (tuple of str): A tuple of strings that represents the names of the classes. + box (Metric): An instance of the Metric class for storing the results of the detection metrics. + speed (dict): A dictionary for storing the execution time of different parts of the detection process. + + Methods: + process(tp, conf, pred_cls, target_cls): Updates the metric results with the latest batch of predictions. + keys: Returns a list of keys for accessing the computed detection metrics. + mean_results: Returns a list of mean values for the computed detection metrics. + class_result(i): Returns a list of values for the computed detection metrics for a specific class. + maps: Returns a dictionary of mean average precision (mAP) values for different IoU thresholds. + fitness: Computes the fitness score based on the computed detection metrics. + ap_class_index: Returns a list of class indices sorted by their average precision (AP) values. + results_dict: Returns a dictionary that maps detection metric keys to their computed values. + """ + + def __init__(self, save_dir=Path('.'), plot=False, names=()) -> None: + self.save_dir = save_dir + self.plot = plot + self.names = names + self.box = Metric() + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + + def process(self, tp, conf, pred_cls, target_cls): + results = ap_per_class(tp, conf, pred_cls, target_cls, plot=self.plot, save_dir=self.save_dir, + names=self.names)[2:] + self.box.nc = len(self.names) + self.box.update(results) + + @property + def keys(self): + return ['metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)'] + + def mean_results(self): + return self.box.mean_results() + + def class_result(self, i): + return self.box.class_result(i) + + @property + def maps(self): + return self.box.maps + + @property + def fitness(self): + return self.box.fitness() + + @property + def ap_class_index(self): + return self.box.ap_class_index + + @property + def results_dict(self): + return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness])) + + +class SegmentMetrics(SimpleClass): + """ + Calculates and aggregates detection and segmentation metrics over a given set of classes. + + Args: + save_dir (Path): Path to the directory where the output plots should be saved. Default is the current directory. + plot (bool): Whether to save the detection and segmentation plots. Default is False. + names (list): List of class names. Default is an empty list. + + Attributes: + save_dir (Path): Path to the directory where the output plots should be saved. + plot (bool): Whether to save the detection and segmentation plots. + names (list): List of class names. + box (Metric): An instance of the Metric class to calculate box detection metrics. + seg (Metric): An instance of the Metric class to calculate mask segmentation metrics. + speed (dict): Dictionary to store the time taken in different phases of inference. + + Methods: + process(tp_m, tp_b, conf, pred_cls, target_cls): Processes metrics over the given set of predictions. + mean_results(): Returns the mean of the detection and segmentation metrics over all the classes. + class_result(i): Returns the detection and segmentation metrics of class `i`. + maps: Returns the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95. + fitness: Returns the fitness scores, which are a single weighted combination of metrics. + ap_class_index: Returns the list of indices of classes used to compute Average Precision (AP). + results_dict: Returns the dictionary containing all the detection and segmentation metrics and fitness score. + """ + + def __init__(self, save_dir=Path('.'), plot=False, names=()) -> None: + self.save_dir = save_dir + self.plot = plot + self.names = names + self.box = Metric() + self.seg = Metric() + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + + def process(self, tp_m, tp_b, conf, pred_cls, target_cls): + """ + Processes the detection and segmentation metrics over the given set of predictions. + + Args: + tp_m (list): List of True Positive masks. + tp_b (list): List of True Positive boxes. + conf (list): List of confidence scores. + pred_cls (list): List of predicted classes. + target_cls (list): List of target classes. + """ + + results_mask = ap_per_class(tp_m, + conf, + pred_cls, + target_cls, + plot=self.plot, + save_dir=self.save_dir, + names=self.names, + prefix='Mask')[2:] + self.seg.nc = len(self.names) + self.seg.update(results_mask) + results_box = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=self.plot, + save_dir=self.save_dir, + names=self.names, + prefix='Box')[2:] + self.box.nc = len(self.names) + self.box.update(results_box) + + @property + def keys(self): + return [ + 'metrics/precision(B)', 'metrics/recall(B)', 'metrics/mAP50(B)', 'metrics/mAP50-95(B)', + 'metrics/precision(M)', 'metrics/recall(M)', 'metrics/mAP50(M)', 'metrics/mAP50-95(M)'] + + def mean_results(self): + return self.box.mean_results() + self.seg.mean_results() + + def class_result(self, i): + return self.box.class_result(i) + self.seg.class_result(i) + + @property + def maps(self): + return self.box.maps + self.seg.maps + + @property + def fitness(self): + return self.seg.fitness() + self.box.fitness() + + @property + def ap_class_index(self): + # boxes and masks have the same ap_class_index + return self.box.ap_class_index + + @property + def results_dict(self): + return dict(zip(self.keys + ['fitness'], self.mean_results() + [self.fitness])) + + +class ClassifyMetrics(SimpleClass): + """ + Class for computing classification metrics including top-1 and top-5 accuracy. + + Attributes: + top1 (float): The top-1 accuracy. + top5 (float): The top-5 accuracy. + speed (Dict[str, float]): A dictionary containing the time taken for each step in the pipeline. + + Properties: + fitness (float): The fitness of the model, which is equal to top-5 accuracy. + results_dict (Dict[str, Union[float, str]]): A dictionary containing the classification metrics and fitness. + keys (List[str]): A list of keys for the results_dict. + + Methods: + process(targets, pred): Processes the targets and predictions to compute classification metrics. + """ + + def __init__(self) -> None: + self.top1 = 0 + self.top5 = 0 + self.speed = {'preprocess': 0.0, 'inference': 0.0, 'loss': 0.0, 'postprocess': 0.0} + + def process(self, targets, pred): + # target classes and predicted classes + pred, targets = torch.cat(pred), torch.cat(targets) + correct = (targets[:, None] == pred).float() + acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy + self.top1, self.top5 = acc.mean(0).tolist() + + @property + def fitness(self): + return self.top5 + + @property + def results_dict(self): + return dict(zip(self.keys + ['fitness'], [self.top1, self.top5, self.fitness])) + + @property + def keys(self): + return ['metrics/accuracy_top1', 'metrics/accuracy_top5'] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/ops.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/ops.py new file mode 100644 index 0000000..2dd89c3 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/ops.py @@ -0,0 +1,718 @@ +import contextlib +import math +import re +import time + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +import torchvision + +from ultralytics.yolo.utils import LOGGER + +from .metrics import box_iou + + +class Profile(contextlib.ContextDecorator): + """ + YOLOv8 Profile class. + Usage: as a decorator with @Profile() or as a context manager with 'with Profile():' + """ + + def __init__(self, t=0.0): + """ + Initialize the Profile class. + + Args: + t (float): Initial time. Defaults to 0.0. + """ + self.t = t + self.cuda = torch.cuda.is_available() + + def __enter__(self): + """ + Start timing. + """ + self.start = self.time() + return self + + def __exit__(self, type, value, traceback): + """ + Stop timing. + """ + self.dt = self.time() - self.start # delta-time + self.t += self.dt # accumulate dt + + def time(self): + """ + Get current time. + """ + if self.cuda: + torch.cuda.synchronize() + return time.time() + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + return [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + + +def segment2box(segment, width=640, height=640): + """ + Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + + Args: + segment (torch.Tensor): the segment label + width (int): the width of the image. Defaults to 640 + height (int): The height of the image. Defaults to 640 + + Returns: + (np.ndarray): the minimum and maximum x and y values of the segment. + """ + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros(4) # xyxy + + +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): + """ + Rescales bounding boxes (in the format of xyxy) from the shape of the image they were originally specified in + (img1_shape) to the shape of a different image (img0_shape). + + Args: + img1_shape (tuple): The shape of the image that the bounding boxes are for, in the format of (height, width). + boxes (torch.Tensor): the bounding boxes of the objects in the image, in the format of (x1, y1, x2, y2) + img0_shape (tuple): the shape of the target image, in the format of (height, width). + ratio_pad (tuple): a tuple of (ratio, pad) for scaling the boxes. If not provided, the ratio and pad will be + calculated based on the size difference between the two images. + + Returns: + boxes (torch.Tensor): The scaled bounding boxes, in the format of (x1, y1, x2, y2) + """ + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + boxes[..., [0, 2]] -= pad[0] # x padding + boxes[..., [1, 3]] -= pad[1] # y padding + boxes[..., :4] /= gain + clip_boxes(boxes, img0_shape) + return boxes + + +def make_divisible(x, divisor): + """ + Returns the nearest number that is divisible by the given divisor. + + Args: + x (int): The number to make divisible. + divisor (int or torch.Tensor): The divisor. + + Returns: + int: The nearest number divisible by the divisor. + """ + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nc=0, # number of classes (optional) + max_time_img=0.05, + max_nms=30000, + max_wh=7680, +): + """ + Perform non-maximum suppression (NMS) on a set of boxes, with support for masks and multiple labels per box. + + Arguments: + prediction (torch.Tensor): A tensor of shape (batch_size, num_boxes, num_classes + 4 + num_masks) + containing the predicted boxes, classes, and masks. The tensor should be in the format + output by a model, such as YOLO. + conf_thres (float): The confidence threshold below which boxes will be filtered out. + Valid values are between 0.0 and 1.0. + iou_thres (float): The IoU threshold below which boxes will be filtered out during NMS. + Valid values are between 0.0 and 1.0. + classes (List[int]): A list of class indices to consider. If None, all classes will be considered. + agnostic (bool): If True, the model is agnostic to the number of classes, and all + classes will be considered as one. + multi_label (bool): If True, each box may have multiple labels. + labels (List[List[Union[int, float, torch.Tensor]]]): A list of lists, where each inner + list contains the apriori labels for a given image. The list should be in the format + output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2). + max_det (int): The maximum number of boxes to keep after NMS. + nc (int): (optional) The number of classes output by the model. Any indices after this will be considered masks. + max_time_img (float): The maximum time (seconds) for processing one image. + max_nms (int): The maximum number of boxes into torchvision.ops.nms(). + max_wh (int): The maximum box width and height in pixels + + Returns: + (List[torch.Tensor]): A list of length batch_size, where each element is a tensor of + shape (num_boxes, 6 + num_masks) containing the kept boxes, with columns + (x1, y1, x2, y2, confidence, class, mask1, mask2, ...). + """ + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + if isinstance(prediction, (list, tuple)): # YOLOv8 model in validation model, output = (inference_out, loss_out) + prediction = prediction[0] # select only inference output + + device = prediction.device + mps = 'mps' in device.type # Apple MPS + if mps: # MPS not fully supported yet, convert tensors to CPU before NMS + prediction = prediction.cpu() + bs = prediction.shape[0] # batch size + nc = nc or (prediction.shape[1] - 4) # number of classes + nm = prediction.shape[1] - nc - 4 + mi = 4 + nc # mask start index + xc = prediction[:, 4:mi].amax(1) > conf_thres # candidates + + # Settings + # min_wh = 2 # (pixels) minimum box width and height + time_limit = 0.5 + max_time_img * bs # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[:, 2:4] < min_wh) | (x[:, 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x.transpose(0, -1)[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + lb = labels[xi] + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) + v[:, :4] = lb[:, 1:5] # box + v[range(len(lb)), lb[:, 0].long() + 4] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Detections matrix nx6 (xyxy, conf, cls) + box, cls, mask = x.split((4, nc, nm), 1) + box = xywh2xyxy(box) # center_x, center_y, width, height) to (x1, y1, x2, y2) + if multi_label: + i, j = (cls > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, 4 + j, None], j[:, None].float(), mask[i]), 1) + else: # best class only + conf, j = cls.max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + i = i[:max_det] # limit detections + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if mps: + output[xi] = output[xi].to(device) + if (time.time() - t) > time_limit: + LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') + break # time limit exceeded + + return output + + +def clip_boxes(boxes, shape): + """ + It takes a list of bounding boxes and a shape (height, width) and clips the bounding boxes to the + shape + + Args: + boxes (torch.Tensor): the bounding boxes to clip + shape (tuple): the shape of the image + """ + if isinstance(boxes, torch.Tensor): # faster individually + boxes[..., 0].clamp_(0, shape[1]) # x1 + boxes[..., 1].clamp_(0, shape[0]) # y1 + boxes[..., 2].clamp_(0, shape[1]) # x2 + boxes[..., 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 + + +def clip_coords(boxes, shape): + """ + Clip bounding xyxy bounding boxes to image shape (height, width). + + Args: + boxes (torch.Tensor or numpy.ndarray): Bounding boxes to be clipped. + shape (tuple): The shape of the image. (height, width) + + Returns: + None + + Note: + The input `boxes` is modified in-place, there is no return value. + """ + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + + +def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): + """ + Takes a mask, and resizes it to the original image size + + Args: + im1_shape (tuple): model input shape, [h, w] + masks (torch.Tensor): [h, w, num] + im0_shape (tuple): the original image shape + ratio_pad (tuple): the ratio of the padding to the original image. + + Returns: + masks (torch.Tensor): The masks that are being returned. + """ + # Rescale coordinates (xyxy) from im1_shape to im0_shape + if ratio_pad is None: # calculate from im0_shape + gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new + pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding + else: + pad = ratio_pad[1] + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) + + if len(masks.shape) < 2: + raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') + masks = masks[top:bottom, left:right] + # masks = masks.permute(2, 0, 1).contiguous() + # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] + # masks = masks.permute(1, 2, 0).contiguous() + masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) + + if len(masks.shape) == 2: + masks = masks[:, :, None] + return masks + + +def xyxy2xywh(x): + """ + Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height) format. + + Args: + x (np.ndarray) or (torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. + Returns: + y (np.ndarray) or (torch.Tensor): The bounding box coordinates in (x, y, width, height) format. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height + return y + + +def xywh2xyxy(x): + """ + Convert bounding box coordinates from (x, y, width, height) format to (x1, y1, x2, y2) format where (x1, y1) is the + top-left corner and (x2, y2) is the bottom-right corner. + + Args: + x (np.ndarray) or (torch.Tensor): The input bounding box coordinates in (x, y, width, height) format. + Returns: + y (np.ndarray) or (torch.Tensor): The bounding box coordinates in (x1, y1, x2, y2) format. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + """ + Convert normalized bounding box coordinates to pixel coordinates. + + Args: + x (np.ndarray) or (torch.Tensor): The bounding box coordinates. + w (int): Width of the image. Defaults to 640 + h (int): Height of the image. Defaults to 640 + padw (int): Padding width. Defaults to 0 + padh (int): Padding height. Defaults to 0 + Returns: + y (np.ndarray) or (torch.Tensor): The coordinates of the bounding box in the format [x1, y1, x2, y2] where + x1,y1 is the top-left corner, x2,y2 is the bottom-right corner of the bounding box. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y + return y + + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + """ + Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height, normalized) format. + x, y, width and height are normalized to image dimensions + + Args: + x (np.ndarray) or (torch.Tensor): The input bounding box coordinates in (x1, y1, x2, y2) format. + w (int): The width of the image. Defaults to 640 + h (int): The height of the image. Defaults to 640 + clip (bool): If True, the boxes will be clipped to the image boundaries. Defaults to False + eps (float): The minimum value of the box's width and height. Defaults to 0.0 + Returns: + y (np.ndarray) or (torch.Tensor): The bounding box coordinates in (x, y, width, height, normalized) format + """ + if clip: + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + """ + Convert normalized coordinates to pixel coordinates of shape (n,2) + + Args: + x (np.ndarray) or (torch.Tensor): The input tensor of normalized bounding box coordinates + w (int): The width of the image. Defaults to 640 + h (int): The height of the image. Defaults to 640 + padw (int): The width of the padding. Defaults to 0 + padh (int): The height of the padding. Defaults to 0 + Returns: + y (np.ndarray) or (torch.Tensor): The x and y coordinates of the top left corner of the bounding box + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y + return y + + +def xywh2ltwh(x): + """ + Convert the bounding box format from [x, y, w, h] to [x1, y1, w, h], where x1, y1 are the top-left coordinates. + + Args: + x (np.ndarray) or (torch.Tensor): The input tensor with the bounding box coordinates in the xywh format + Returns: + y (np.ndarray) or (torch.Tensor): The bounding box coordinates in the xyltwh format + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + return y + + +def xyxy2ltwh(x): + """ + Convert nx4 bounding boxes from [x1, y1, x2, y2] to [x1, y1, w, h], where xy1=top-left, xy2=bottom-right + + Args: + x (np.ndarray) or (torch.Tensor): The input tensor with the bounding boxes coordinates in the xyxy format + Returns: + y (np.ndarray) or (torch.Tensor): The bounding box coordinates in the xyltwh format. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def ltwh2xywh(x): + """ + Convert nx4 boxes from [x1, y1, w, h] to [x, y, w, h] where xy1=top-left, xy=center + + Args: + x (torch.Tensor): the input tensor + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] + x[:, 2] / 2 # center x + y[:, 1] = x[:, 1] + x[:, 3] / 2 # center y + return y + + +def ltwh2xyxy(x): + """ + It converts the bounding box from [x1, y1, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + + Args: + x (np.ndarray) or (torch.Tensor): the input image + + Returns: + y (np.ndarray) or (torch.Tensor): the xyxy coordinates of the bounding boxes. + """ + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 2] = x[:, 2] + x[:, 0] # width + y[:, 3] = x[:, 3] + x[:, 1] # height + return y + + +def segments2boxes(segments): + """ + It converts segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + + Args: + segments (list): list of segments, each segment is a list of points, each point is a list of x, y coordinates + + Returns: + (np.ndarray): the xywh coordinates of the bounding boxes. + """ + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + """ + Inputs a list of segments (n,2) and returns a list of segments (n,2) up-sampled to n points each. + + Args: + segments (list): a list of (n,2) arrays, where n is the number of points in the segment. + n (int): number of points to resample the segment to. Defaults to 1000 + + Returns: + segments (list): the resampled segments. + """ + for i, s in enumerate(segments): + s = np.concatenate((s, s[0:1, :]), axis=0) + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def crop_mask(masks, boxes): + """ + It takes a mask and a bounding box, and returns a mask that is cropped to the bounding box + + Args: + masks (torch.Tensor): [h, w, n] tensor of masks + boxes (torch.Tensor): [n, 4] tensor of bbox coordinates in relative point form + + Returns: + (torch.Tensor): The masks are being cropped to the bounding box. + """ + n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) + r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) + c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) + + +def process_mask_upsample(protos, masks_in, bboxes, shape): + """ + It takes the output of the mask head, and applies the mask to the bounding boxes. This produces masks of higher + quality but is slower. + + Args: + protos (torch.Tensor): [mask_dim, mask_h, mask_w] + masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms + bboxes (torch.Tensor): [n, 4], n is number of masks after nms + shape (tuple): the size of the input image (h,w) + + Returns: + (torch.Tensor): The upsampled masks. + """ + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def process_mask(protos, masks_in, bboxes, shape, upsample=False): + """ + It takes the output of the mask head, and applies the mask to the bounding boxes. This is faster but produces + downsampled quality of mask + + Args: + protos (torch.Tensor): [mask_dim, mask_h, mask_w] + masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms + bboxes (torch.Tensor): [n, 4], n is number of masks after nms + shape (tuple): the size of the input image (h,w) + + Returns: + (torch.Tensor): The processed masks. + """ + + c, mh, mw = protos.shape # CHW + ih, iw = shape + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + + downsampled_bboxes = bboxes.clone() + downsampled_bboxes[:, 0] *= mw / iw + downsampled_bboxes[:, 2] *= mw / iw + downsampled_bboxes[:, 3] *= mh / ih + downsampled_bboxes[:, 1] *= mh / ih + + masks = crop_mask(masks, downsampled_bboxes) # CHW + if upsample: + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + return masks.gt_(0.5) + + +def process_mask_native(protos, masks_in, bboxes, shape): + """ + It takes the output of the mask head, and crops it after upsampling to the bounding boxes. + + Args: + protos (torch.Tensor): [mask_dim, mask_h, mask_w] + masks_in (torch.Tensor): [n, mask_dim], n is number of masks after nms + bboxes (torch.Tensor): [n, 4], n is number of masks after nms + shape (tuple): the size of the input image (h,w) + + Returns: + masks (torch.Tensor): The returned masks with dimensions [h, w, n] + """ + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + gain = min(mh / shape[0], mw / shape[1]) # gain = old / new + pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(mh - pad[1]), int(mw - pad[0]) + masks = masks[:, top:bottom, left:right] + + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False): + """ + Rescale segment coordinates (xyxy) from img1_shape to img0_shape + + Args: + img1_shape (tuple): The shape of the image that the segments are from. + segments (torch.Tensor): the segments to be scaled + img0_shape (tuple): the shape of the image that the segmentation is being applied to + ratio_pad (tuple): the ratio of the image size to the padded image size. + normalize (bool): If True, the coordinates will be normalized to the range [0, 1]. Defaults to False + + Returns: + segments (torch.Tensor): the segmented image. + """ + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + segments[:, 0] -= pad[0] # x padding + segments[:, 1] -= pad[1] # y padding + segments /= gain + clip_segments(segments, img0_shape) + if normalize: + segments[:, 0] /= img0_shape[1] # width + segments[:, 1] /= img0_shape[0] # height + return segments + + +def masks2segments(masks, strategy='largest'): + """ + It takes a list of masks(n,h,w) and returns a list of segments(n,xy) + + Args: + masks (torch.Tensor): the output of the model, which is a tensor of shape (batch_size, 160, 160) + strategy (str): 'concat' or 'largest'. Defaults to largest + + Returns: + segments (List): list of segment masks + """ + segments = [] + for x in masks.int().cpu().numpy().astype('uint8'): + c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] + if c: + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + else: + c = np.zeros((0, 2)) # no segments found + segments.append(c.astype('float32')) + return segments + + +def clip_segments(segments, shape): + """ + It takes a list of line segments (x1,y1,x2,y2) and clips them to the image shape (height, width) + + Args: + segments (list): a list of segments, each segment is a list of points, each point is a list of x,y + coordinates + shape (tuple): the shape of the image + """ + if isinstance(segments, torch.Tensor): # faster individually + segments[:, 0].clamp_(0, shape[1]) # x + segments[:, 1].clamp_(0, shape[0]) # y + else: # np.array (faster grouped) + segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x + segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y + + +def clean_str(s): + """ + Cleans a string by replacing special characters with underscore _ + + Args: + s (str): a string needing special characters replaced + + Returns: + (str): a string with special characters replaced by an underscore _ + """ + return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/plotting.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/plotting.py new file mode 100644 index 0000000..8418495 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/plotting.py @@ -0,0 +1,371 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import contextlib +import math +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import torch +from PIL import Image, ImageDraw, ImageFont +from PIL import __version__ as pil_version + +from ultralytics.yolo.utils import LOGGER, TryExcept, threaded + +from .checks import check_font, check_version, is_ascii +from .files import increment_path +from .ops import clip_coords, scale_image, xywh2xyxy, xyxy2xywh + +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb(f'#{c}') for c in hexs] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' + + +class Annotator: + # YOLOv8 Annotator for train/val mosaics and jpgs and detect/hub inference annotations + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic + self.pil = pil or non_ascii + if self.pil: # use PIL + self.pil_9_2_0_check = check_version(pil_version, '9.2.0') # deprecation check + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + try: + font = check_font('Arial.Unicode.ttf' if non_ascii else font) + size = font_size or max(round(sum(self.im.size) / 2 * 0.035), 12) + self.font = ImageFont.truetype(str(font), size) + except Exception: + self.font = ImageFont.load_default() + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if isinstance(box, torch.Tensor): + box = box.tolist() + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + if self.pil_9_2_0_check: + _, _, w, h = self.font.getbbox(label) # text width, height (New) + else: + w, h = self.font.getsize(label) # text width, height (Old, deprecated in 9.2.0) + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle( + (box[0], box[1] - h if outside else box[1], box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), + fill=color, + ) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h >= 3 + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, + label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, + self.lw / 3, + txt_color, + thickness=tf, + lineType=cv2.LINE_AA) + + def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): + """Plot masks at once. + Args: + masks (tensor): predicted masks on cuda, shape: [n, h, w] + colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] + im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] + alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque + """ + if self.pil: + # convert to numpy first + self.im = np.asarray(self.im).copy() + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + if im_gpu.device != masks.device: + im_gpu = im_gpu.to(masks.device) + colors = torch.tensor(colors, device=masks.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255) + im_mask_np = im_mask.byte().cpu().numpy() + self.im[:] = im_mask_np if retina_masks else scale_image(im_gpu.shape, im_mask_np, self.im.shape) + if self.pil: + # convert im back to PIL and update draw + self.fromarray(self.im) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): + # Add text to image (PIL-only) + if anchor == 'bottom': # start y from font bottom + w, h = self.font.getsize(text) # text width, height + xy[1] += 1 - h + if self.pil: + self.draw.text(xy, text, fill=txt_color, font=self.font) + else: + tf = max(self.lw - 1, 1) # font thickness + cv2.putText(self.im, text, xy, 0, self.lw / 3, txt_color, thickness=tf, lineType=cv2.LINE_AA) + + def fromarray(self, im): + # Update self.im from a numpy array + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) + + +@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 +def plot_labels(boxes, cls, names=(), save_dir=Path('')): + import pandas as pd + import seaborn as sn + + # plot dataset labels + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") + b = boxes.transpose() # classes, boxes + nc = int(cls.max() + 1) # number of classes + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + y = ax[0].hist(cls, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + with contextlib.suppress(Exception): # color histogram bars by class + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + boxes[:, 0:2] = 0.5 # center + boxes = xywh2xyxy(boxes) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, box in zip(cls[:1000], boxes[:1000]): + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + +def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + b = xyxy2xywh(xyxy.view(-1, 4)) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_coords(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + f = str(increment_path(file).with_suffix('.jpg')) + # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB + return crop + + +@threaded +def plot_images(images, + batch_idx, + cls, + bboxes, + masks=np.zeros(0, dtype=np.uint8), + paths=None, + fname='images.jpg', + names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(cls, torch.Tensor): + cls = cls.cpu().numpy() + if isinstance(bboxes, torch.Tensor): + bboxes = bboxes.cpu().numpy() + if isinstance(masks, torch.Tensor): + masks = masks.cpu().numpy().astype(int) + if isinstance(batch_idx, torch.Tensor): + batch_idx = batch_idx.cpu().numpy() + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(cls) > 0: + idx = batch_idx == i + + boxes = xywh2xyxy(bboxes[idx, :4]).T + classes = cls[idx].astype('int') + labels = bboxes.shape[1] == 4 # labels if no conf column + conf = None if labels else bboxes[idx, 4] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + c = classes[j] + color = colors(c) + c = names.get(c, c) if names else c + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{c}' if labels else f'{c} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + + # Plot masks + if len(masks): + if idx.shape[0] == masks.shape[0]: # overlap_masks=False + image_masks = masks[idx] + else: # overlap_masks=True + image_masks = masks[[i]] # (1, 640, 640) + nl = idx.sum() + index = np.arange(nl).reshape(nl, 1, 1) + 1 + image_masks = np.repeat(image_masks, nl, axis=0) + image_masks = np.where(image_masks == index, 1.0, 0.0) + + im = np.asarray(annotator.im).copy() + for j, box in enumerate(boxes.T.tolist()): + if labels or conf[j] > 0.25: # 0.25 conf thresh + color = colors(classes[j]) + mh, mw = image_masks[j].shape + if mh != h or mw != w: + mask = image_masks[j].astype(np.uint8) + mask = cv2.resize(mask, (w, h)) + mask = mask.astype(bool) + else: + mask = image_masks[j].astype(bool) + with contextlib.suppress(Exception): + im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + annotator.fromarray(im) + annotator.im.save(fname) # save + + +def plot_results(file='path/to/results.csv', dir='', segment=False): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + import pandas as pd + save_dir = Path(file).parent if file else Path(dir) + if segment: + fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) + index = [1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12] + else: + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + index = [1, 2, 3, 4, 5, 8, 9, 10, 6, 7] + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for f in files: + try: + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate(index): + y = data.values[:, j].astype('float') + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + LOGGER.warning(f'WARNING: Plotting error for {f}: {e}') + ax[1].legend() + fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() + + +def output_to_target(output, max_det=300): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting + targets = [] + for i, o in enumerate(output): + box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) + j = torch.full((conf.shape[0], 1), i) + targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1)) + targets = torch.cat(targets, 0).numpy() + return targets[:, 0], targets[:, 1], targets[:, 2:] diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/tal.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/tal.py new file mode 100644 index 0000000..0b71414 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/tal.py @@ -0,0 +1,222 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .checks import check_version +from .metrics import bbox_iou + +TORCH_1_10 = check_version(torch.__version__, '1.10.0') + + +def select_candidates_in_gts(xy_centers, gt_bboxes, eps=1e-9): + """select the positive anchor center in gt + + Args: + xy_centers (Tensor): shape(h*w, 4) + gt_bboxes (Tensor): shape(b, n_boxes, 4) + Return: + (Tensor): shape(b, n_boxes, h*w) + """ + n_anchors = xy_centers.shape[0] + bs, n_boxes, _ = gt_bboxes.shape + lt, rb = gt_bboxes.view(-1, 1, 4).chunk(2, 2) # left-top, right-bottom + bbox_deltas = torch.cat((xy_centers[None] - lt, rb - xy_centers[None]), dim=2).view(bs, n_boxes, n_anchors, -1) + # return (bbox_deltas.min(3)[0] > eps).to(gt_bboxes.dtype) + return bbox_deltas.amin(3).gt_(eps) + + +def select_highest_overlaps(mask_pos, overlaps, n_max_boxes): + """if an anchor box is assigned to multiple gts, + the one with the highest iou will be selected. + + Args: + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + overlaps (Tensor): shape(b, n_max_boxes, h*w) + Return: + target_gt_idx (Tensor): shape(b, h*w) + fg_mask (Tensor): shape(b, h*w) + mask_pos (Tensor): shape(b, n_max_boxes, h*w) + """ + # (b, n_max_boxes, h*w) -> (b, h*w) + fg_mask = mask_pos.sum(-2) + if fg_mask.max() > 1: # one anchor is assigned to multiple gt_bboxes + mask_multi_gts = (fg_mask.unsqueeze(1) > 1).repeat([1, n_max_boxes, 1]) # (b, n_max_boxes, h*w) + max_overlaps_idx = overlaps.argmax(1) # (b, h*w) + is_max_overlaps = F.one_hot(max_overlaps_idx, n_max_boxes) # (b, h*w, n_max_boxes) + is_max_overlaps = is_max_overlaps.permute(0, 2, 1).to(overlaps.dtype) # (b, n_max_boxes, h*w) + mask_pos = torch.where(mask_multi_gts, is_max_overlaps, mask_pos) # (b, n_max_boxes, h*w) + fg_mask = mask_pos.sum(-2) + # find each grid serve which gt(index) + target_gt_idx = mask_pos.argmax(-2) # (b, h*w) + return target_gt_idx, fg_mask, mask_pos + + +class TaskAlignedAssigner(nn.Module): + + def __init__(self, topk=13, num_classes=80, alpha=1.0, beta=6.0, eps=1e-9): + super().__init__() + self.topk = topk + self.num_classes = num_classes + self.bg_idx = num_classes + self.alpha = alpha + self.beta = beta + self.eps = eps + + @torch.no_grad() + def forward(self, pd_scores, pd_bboxes, anc_points, gt_labels, gt_bboxes, mask_gt): + """This code referenced to + https://github.com/Nioolek/PPYOLOE_pytorch/blob/master/ppyoloe/assigner/tal_assigner.py + + Args: + pd_scores (Tensor): shape(bs, num_total_anchors, num_classes) + pd_bboxes (Tensor): shape(bs, num_total_anchors, 4) + anc_points (Tensor): shape(num_total_anchors, 2) + gt_labels (Tensor): shape(bs, n_max_boxes, 1) + gt_bboxes (Tensor): shape(bs, n_max_boxes, 4) + mask_gt (Tensor): shape(bs, n_max_boxes, 1) + Returns: + target_labels (Tensor): shape(bs, num_total_anchors) + target_bboxes (Tensor): shape(bs, num_total_anchors, 4) + target_scores (Tensor): shape(bs, num_total_anchors, num_classes) + fg_mask (Tensor): shape(bs, num_total_anchors) + """ + self.bs = pd_scores.size(0) + self.n_max_boxes = gt_bboxes.size(1) + + if self.n_max_boxes == 0: + device = gt_bboxes.device + return (torch.full_like(pd_scores[..., 0], self.bg_idx).to(device), torch.zeros_like(pd_bboxes).to(device), + torch.zeros_like(pd_scores).to(device), torch.zeros_like(pd_scores[..., 0]).to(device), + torch.zeros_like(pd_scores[..., 0]).to(device)) + + mask_pos, align_metric, overlaps = self.get_pos_mask(pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, + mask_gt) + + target_gt_idx, fg_mask, mask_pos = select_highest_overlaps(mask_pos, overlaps, self.n_max_boxes) + + # assigned target + target_labels, target_bboxes, target_scores = self.get_targets(gt_labels, gt_bboxes, target_gt_idx, fg_mask) + + # normalize + align_metric *= mask_pos + pos_align_metrics = align_metric.amax(axis=-1, keepdim=True) # b, max_num_obj + pos_overlaps = (overlaps * mask_pos).amax(axis=-1, keepdim=True) # b, max_num_obj + norm_align_metric = (align_metric * pos_overlaps / (pos_align_metrics + self.eps)).amax(-2).unsqueeze(-1) + target_scores = target_scores * norm_align_metric + + return target_labels, target_bboxes, target_scores, fg_mask.bool(), target_gt_idx + + def get_pos_mask(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, anc_points, mask_gt): + # get in_gts mask, (b, max_num_obj, h*w) + mask_in_gts = select_candidates_in_gts(anc_points, gt_bboxes) + # get anchor_align metric, (b, max_num_obj, h*w) + align_metric, overlaps = self.get_box_metrics(pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_in_gts * mask_gt) + # get topk_metric mask, (b, max_num_obj, h*w) + mask_topk = self.select_topk_candidates(align_metric, topk_mask=mask_gt.repeat([1, 1, self.topk]).bool()) + # merge all mask to a final mask, (b, max_num_obj, h*w) + mask_pos = mask_topk * mask_in_gts * mask_gt + + return mask_pos, align_metric, overlaps + + def get_box_metrics(self, pd_scores, pd_bboxes, gt_labels, gt_bboxes, mask_gt): + na = pd_bboxes.shape[-2] + mask_gt = mask_gt.bool() # b, max_num_obj, h*w + overlaps = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_bboxes.dtype, device=pd_bboxes.device) + bbox_scores = torch.zeros([self.bs, self.n_max_boxes, na], dtype=pd_scores.dtype, device=pd_scores.device) + + ind = torch.zeros([2, self.bs, self.n_max_boxes], dtype=torch.long) # 2, b, max_num_obj + ind[0] = torch.arange(end=self.bs).view(-1, 1).repeat(1, self.n_max_boxes) # b, max_num_obj + ind[1] = gt_labels.long().squeeze(-1) # b, max_num_obj + # get the scores of each grid for each gt cls + bbox_scores[mask_gt] = pd_scores[ind[0], :, ind[1]][mask_gt] # b, max_num_obj, h*w + + # (b, max_num_obj, 1, 4), (b, 1, h*w, 4) + pd_boxes = pd_bboxes.unsqueeze(1).repeat(1, self.n_max_boxes, 1, 1)[mask_gt] + gt_boxes = gt_bboxes.unsqueeze(2).repeat(1, 1, na, 1)[mask_gt] + overlaps[mask_gt] = bbox_iou(gt_boxes, pd_boxes, xywh=False, CIoU=True).squeeze(-1).clamp(0) + + align_metric = bbox_scores.pow(self.alpha) * overlaps.pow(self.beta) + return align_metric, overlaps + + def select_topk_candidates(self, metrics, largest=True, topk_mask=None): + """ + Args: + metrics: (b, max_num_obj, h*w). + topk_mask: (b, max_num_obj, topk) or None + """ + + num_anchors = metrics.shape[-1] # h*w + # (b, max_num_obj, topk) + topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest) + if topk_mask is None: + topk_mask = (topk_metrics.max(-1, keepdim=True) > self.eps).tile([1, 1, self.topk]) + # (b, max_num_obj, topk) + topk_idxs[~topk_mask] = 0 + # (b, max_num_obj, topk, h*w) -> (b, max_num_obj, h*w) + is_in_topk = torch.zeros(metrics.shape, dtype=torch.long, device=metrics.device) + for it in range(self.topk): + is_in_topk += F.one_hot(topk_idxs[:, :, it], num_anchors) + # is_in_topk = F.one_hot(topk_idxs, num_anchors).sum(-2) + # filter invalid bboxes + is_in_topk = torch.where(is_in_topk > 1, 0, is_in_topk) + return is_in_topk.to(metrics.dtype) + + def get_targets(self, gt_labels, gt_bboxes, target_gt_idx, fg_mask): + """ + Args: + gt_labels: (b, max_num_obj, 1) + gt_bboxes: (b, max_num_obj, 4) + target_gt_idx: (b, h*w) + fg_mask: (b, h*w) + """ + + # assigned target labels, (b, 1) + batch_ind = torch.arange(end=self.bs, dtype=torch.int64, device=gt_labels.device)[..., None] + target_gt_idx = target_gt_idx + batch_ind * self.n_max_boxes # (b, h*w) + target_labels = gt_labels.long().flatten()[target_gt_idx] # (b, h*w) + + # assigned target boxes, (b, max_num_obj, 4) -> (b, h*w) + target_bboxes = gt_bboxes.view(-1, 4)[target_gt_idx] + + # assigned target scores + target_labels.clamp(0) + target_scores = F.one_hot(target_labels, self.num_classes) # (b, h*w, 80) + fg_scores_mask = fg_mask[:, :, None].repeat(1, 1, self.num_classes) # (b, h*w, 80) + target_scores = torch.where(fg_scores_mask > 0, target_scores, 0) + + return target_labels, target_bboxes, target_scores + + +def make_anchors(feats, strides, grid_cell_offset=0.5): + """Generate anchors from features.""" + anchor_points, stride_tensor = [], [] + assert feats is not None + dtype, device = feats[0].dtype, feats[0].device + for i, stride in enumerate(strides): + _, _, h, w = feats[i].shape + sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x + sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y + sy, sx = torch.meshgrid(sy, sx, indexing='ij') if TORCH_1_10 else torch.meshgrid(sy, sx) + anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2)) + stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device)) + return torch.cat(anchor_points), torch.cat(stride_tensor) + + +def dist2bbox(distance, anchor_points, xywh=True, dim=-1): + """Transform distance(ltrb) to box(xywh or xyxy).""" + lt, rb = distance.chunk(2, dim) + x1y1 = anchor_points - lt + x2y2 = anchor_points + rb + if xywh: + c_xy = (x1y1 + x2y2) / 2 + wh = x2y2 - x1y1 + return torch.cat((c_xy, wh), dim) # xywh bbox + return torch.cat((x1y1, x2y2), dim) # xyxy bbox + + +def bbox2dist(anchor_points, bbox, reg_max): + """Transform bbox(xyxy) to dist(ltrb).""" + x1y1, x2y2 = bbox.chunk(2, -1) + return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp(0, reg_max - 0.01) # dist (lt, rb) diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/torch_utils.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/torch_utils.py new file mode 100644 index 0000000..e07b893 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/utils/torch_utils.py @@ -0,0 +1,442 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import math +import os +import platform +import random +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path +from typing import Union + +import numpy as np +import thop +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F + +from ultralytics.yolo.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, RANK, __version__ +from ultralytics.yolo.utils.checks import check_version + +TORCH_1_9 = check_version(torch.__version__, '1.9.0') +TORCH_1_11 = check_version(torch.__version__, '1.11.0') +TORCH_1_12 = check_version(torch.__version__, '1.12.0') +TORCH_2_X = check_version(torch.__version__, minimum='2.0') + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + # Decorator to make all processes in distributed training wait for each local_master to do something + initialized = torch.distributed.is_available() and torch.distributed.is_initialized() + if initialized and local_rank not in (-1, 0): + dist.barrier(device_ids=[local_rank]) + yield + if initialized and local_rank == 0: + dist.barrier(device_ids=[0]) + + +def smart_inference_mode(): + # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator + def decorate(fn): + return (torch.inference_mode if TORCH_1_9 else torch.no_grad)()(fn) + + return decorate + + +def select_device(device='', batch=0, newline=False, verbose=True): + # device = None or 'cpu' or 0 or '0' or '0,1,2,3' + s = f'Ultralytics YOLOv{__version__} 🚀 Python-{platform.python_version()} torch-{torch.__version__} ' + device = str(device).lower() + for remove in 'cuda:', 'none', '(', ')', '[', ']', "'", ' ': + device = device.replace(remove, '') # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1' + cpu = device == 'cpu' + mps = device == 'mps' # Apple Metal Performance Shaders (MPS) + if cpu or mps: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + visible = os.environ.get('CUDA_VISIBLE_DEVICES', None) + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + if not (torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', ''))): + LOGGER.info(s) + install = 'See https://pytorch.org/get-started/locally/ for up-to-date torch install instructions if no ' \ + 'CUDA devices are seen by torch.\n' if torch.cuda.device_count() == 0 else '' + raise ValueError(f"Invalid CUDA 'device={device}' requested." + f" Use 'device=cpu' or pass valid CUDA device(s) if available," + f" i.e. 'device=0' or 'device=0,1,2,3' for Multi-GPU.\n" + f'\ntorch.cuda.is_available(): {torch.cuda.is_available()}' + f'\ntorch.cuda.device_count(): {torch.cuda.device_count()}' + f"\nos.environ['CUDA_VISIBLE_DEVICES']: {visible}\n" + f'{install}') + + if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch > 0 and batch % n != 0: # check batch_size is divisible by device_count + raise ValueError(f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or " + f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}.") + space = ' ' * (len(s) + 1) + for i, d in enumerate(devices): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB + arg = 'cuda:0' + elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available() and TORCH_2_X: + # prefer MPS if available + s += 'MPS\n' + arg = 'mps' + else: # revert to CPU + s += 'CPU\n' + arg = 'cpu' + + if verbose and RANK == -1: + LOGGER.info(s if newline else s.rstrip()) + return torch.device(arg) + + +def time_sync(): + # PyTorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def fuse_conv_and_bn(conv, bn): + # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + dilation=conv.dilation, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # Prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # Prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def fuse_deconv_and_bn(deconv, bn): + # Fuse ConvTranspose2d() and BatchNorm2d() layers + fuseddconv = nn.ConvTranspose2d(deconv.in_channels, + deconv.out_channels, + kernel_size=deconv.kernel_size, + stride=deconv.stride, + padding=deconv.padding, + output_padding=deconv.output_padding, + dilation=deconv.dilation, + groups=deconv.groups, + bias=True).requires_grad_(False).to(deconv.weight.device) + + # prepare filters + w_deconv = deconv.weight.clone().view(deconv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fuseddconv.weight.copy_(torch.mm(w_bn, w_deconv).view(fuseddconv.weight.shape)) + + # Prepare spatial bias + b_conv = torch.zeros(deconv.weight.size(1), device=deconv.weight.device) if deconv.bias is None else deconv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fuseddconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fuseddconv + + +def model_info(model, detailed=False, verbose=True, imgsz=640): + # Model information. imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320] + if not verbose: + return + n_p = get_num_params(model) + n_g = get_num_gradients(model) # number gradients + if detailed: + LOGGER.info( + f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + LOGGER.info('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + flops = get_flops(model, imgsz) + fused = ' (fused)' if model.is_fused() else '' + fs = f', {flops:.1f} GFLOPs' if flops else '' + m = Path(getattr(model, 'yaml_file', '') or model.yaml.get('yaml_file', '')).stem.replace('yolo', 'YOLO') or 'Model' + LOGGER.info(f'{m} summary{fused}: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}') + + +def get_num_params(model): + # Return the total number of parameters in a YOLO model + return sum(x.numel() for x in model.parameters()) + + +def get_num_gradients(model): + # Return the total number of parameters with gradients in a YOLO model + return sum(x.numel() for x in model.parameters() if x.requires_grad) + + +def get_flops(model, imgsz=640): + # Return a YOLO model's FLOPs + try: + model = de_parallel(model) + p = next(model.parameters()) + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride + im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + flops = thop.profile(deepcopy(model), inputs=[im], verbose=False)[0] / 1E9 * 2 # stride GFLOPs + imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float + flops = flops * imgsz[0] / stride * imgsz[1] / stride # 640x640 GFLOPs + return flops + except Exception: + return 0 + + +def initialize_weights(model): + # Initialize model weights to random values + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # Scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def make_divisible(x, divisor): + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from 'b' to 'a', options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +def get_latest_opset(): + # Return max supported ONNX opset by this version of torch + return max(int(k[14:]) for k in vars(torch.onnx) if 'symbolic_opset' in k) # opset + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} + + +def is_parallel(model): + # Returns True if model is of type DP or DDP + return isinstance(model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)) + + +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def init_seeds(seed=0, deterministic=False): + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe + # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 + if deterministic and TORCH_1_12: # https://github.com/ultralytics/yolov5/pull/8213 + torch.use_deterministic_algorithms(True) + torch.backends.cudnn.deterministic = True + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + os.environ['PYTHONHASHSEED'] = str(seed) + + +class ModelEMA: + """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + Keeps a moving average of everything in the model state_dict (parameters and buffers) + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + To disable EMA set the `enabled` attribute to `False`. + """ + + def __init__(self, model, decay=0.9999, tau=2000, updates=0): + # Create EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + self.enabled = True + + def update(self, model): + # Update EMA parameters + if self.enabled: + self.updates += 1 + d = self.decay(self.updates) + + msd = de_parallel(model).state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: # true for FP16 and FP32 + v *= d + v += (1 - d) * msd[k].detach() + # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype}, model {msd[k].dtype}' + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + if self.enabled: + copy_attr(self.ema, model, include, exclude) + + +def strip_optimizer(f: Union[str, Path] = 'best.pt', s: str = '') -> None: + """ + Strip optimizer from 'f' to finalize training, optionally save as 's'. + + Usage: + from ultralytics.yolo.utils.torch_utils import strip_optimizer + from pathlib import Path + for f in Path('/Users/glennjocher/Downloads/weights').glob('*.pt'): + strip_optimizer(f) + + Args: + f (str): file path to model to strip the optimizer from. Default is 'best.pt'. + s (str): file path to save the model with stripped optimizer to. If not provided, 'f' will be overwritten. + + Returns: + None + """ + x = torch.load(f, map_location=torch.device('cpu')) + args = {**DEFAULT_CFG_DICT, **x['train_args']} # combine model args with default args, preferring model args + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + x['train_args'] = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # strip non-default keys + # x['model'].args = x['train_args'] + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") + + +def profile(input, ops, n=10, device=None): + """ YOLOv8 speed/memory/FLOPs profiler + Usage: + input = torch.randn(16, 3, 640, 640) + m1 = lambda x: x * torch.sigmoid(x) + m2 = nn.SiLU() + profile(input, [m1, m2], n=100) # profile over 100 iterations + """ + results = [] + if not isinstance(device, torch.device): + device = select_device(device) + LOGGER.info(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward + try: + flops = thop.profile(m, inputs=[x], verbose=False)[0] / 1E9 * 2 # GFLOPs + except Exception: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception: # no backward method + # print(e) # for debug + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes + p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters + LOGGER.info(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + LOGGER.info(e) + results.append(None) + torch.cuda.empty_cache() + return results + + +class EarlyStopping: + """ + Early stopping class that stops training when a specified number of epochs have passed without improvement. + """ + + def __init__(self, patience=50): + """ + Initialize early stopping object + + Args: + patience (int, optional): Number of epochs to wait after fitness stops improving before stopping. + """ + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch + + def __call__(self, epoch, fitness): + """ + Check whether to stop training + + Args: + epoch (int): Current epoch of training + fitness (float): Fitness value of current epoch + + Returns: + bool: True if training should stop, False otherwise + """ + if fitness is None: # check if fitness=None (happens when val=False) + return False + + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' + f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' + f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' + f'i.e. `patience=300` or use `patience=0` to disable EarlyStopping.') + return stop diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/__init__.py new file mode 100644 index 0000000..1f03762 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/__init__.py @@ -0,0 +1,5 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from ultralytics.yolo.v8 import classify, detect, segment + +__all__ = 'classify', 'segment', 'detect' diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/__init__.py new file mode 100644 index 0000000..7fff3a8 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from ultralytics.yolo.v8.classify.predict import ClassificationPredictor, predict +from ultralytics.yolo.v8.classify.train import ClassificationTrainer, train +from ultralytics.yolo.v8.classify.val import ClassificationValidator, val + +__all__ = 'ClassificationPredictor', 'predict', 'ClassificationTrainer', 'train', 'ClassificationValidator', 'val' diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/predict.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/predict.py new file mode 100644 index 0000000..790fcee --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/predict.py @@ -0,0 +1,84 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import torch + +from ultralytics.yolo.engine.predictor import BasePredictor +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import DEFAULT_CFG, ROOT +from ultralytics.yolo.utils.plotting import Annotator + + +class ClassificationPredictor(BasePredictor): + + def get_annotator(self, img): + return Annotator(img, example=str(self.model.names), pil=True) + + def preprocess(self, img): + img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device) + return img.half() if self.model.fp16 else img.float() # uint8 to fp16/32 + + def postprocess(self, preds, img, orig_imgs): + results = [] + for i, pred in enumerate(preds): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + path, _, _, _, _ = self.batch + img_path = path[i] if isinstance(path, list) else path + results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, probs=pred)) + + return results + + def write_results(self, idx, results, batch): + p, im, im0 = batch + log_string = '' + if len(im.shape) == 3: + im = im[None] # expand for batch dim + self.seen += 1 + im0 = im0.copy() + if self.source_type.webcam or self.source_type.from_img: # batch_size >= 1 + log_string += f'{idx}: ' + frame = self.dataset.count + else: + frame = getattr(self.dataset, 'frame', 0) + + self.data_path = p + # save_path = str(self.save_dir / p.name) # im.jpg + self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}') + log_string += '%gx%g ' % im.shape[2:] # print string + self.annotator = self.get_annotator(im0) + + result = results[idx] + if len(result) == 0: + return log_string + prob = result.probs + # Print results + n5 = min(len(self.model.names), 5) + top5i = prob.argsort(0, descending=True)[:n5].tolist() # top 5 indices + log_string += f"{', '.join(f'{self.model.names[j]} {prob[j]:.2f}' for j in top5i)}, " + + # write + text = '\n'.join(f'{prob[j]:.2f} {self.model.names[j]}' for j in top5i) + if self.args.save or self.args.show: # Add bbox to image + self.annotator.text((32, 32), text, txt_color=(255, 255, 255)) + if self.args.save_txt: # Write to file + with open(f'{self.txt_path}.txt', 'a') as f: + f.write(text + '\n') + + return log_string + + +def predict(cfg=DEFAULT_CFG, use_python=False): + model = cfg.model or 'yolov8n-cls.pt' # or "resnet18" + source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \ + else 'https://ultralytics.com/images/bus.jpg' + + args = dict(model=model, source=source) + if use_python: + from ultralytics import YOLO + YOLO(model)(**args) + else: + predictor = ClassificationPredictor(overrides=args) + predictor.predict_cli() + + +if __name__ == '__main__': + predict() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/train.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/train.py new file mode 100644 index 0000000..ec03d1c --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/train.py @@ -0,0 +1,159 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import torch +import torchvision + +from ultralytics.nn.tasks import ClassificationModel, attempt_load_one_weight +from ultralytics.yolo import v8 +from ultralytics.yolo.data import build_classification_dataloader +from ultralytics.yolo.engine.trainer import BaseTrainer +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, RANK, colorstr +from ultralytics.yolo.utils.torch_utils import is_parallel, strip_optimizer + + +class ClassificationTrainer(BaseTrainer): + + def __init__(self, cfg=DEFAULT_CFG, overrides=None): + if overrides is None: + overrides = {} + overrides['task'] = 'classify' + super().__init__(cfg, overrides) + + def set_model_attributes(self): + self.model.names = self.data['names'] + + def get_model(self, cfg=None, weights=None, verbose=True): + model = ClassificationModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1) + if weights: + model.load(weights) + + pretrained = False + for m in model.modules(): + if not pretrained and hasattr(m, 'reset_parameters'): + m.reset_parameters() + if isinstance(m, torch.nn.Dropout) and self.args.dropout: + m.p = self.args.dropout # set dropout + for p in model.parameters(): + p.requires_grad = True # for training + + # Update defaults + if self.args.imgsz == 640: + self.args.imgsz = 224 + + return model + + def setup_model(self): + """ + load/create/download model for any task + """ + # classification models require special handling + + if isinstance(self.model, torch.nn.Module): # if model is loaded beforehand. No setup needed + return + + model = str(self.model) + # Load a YOLO model locally, from torchvision, or from Ultralytics assets + if model.endswith('.pt'): + self.model, _ = attempt_load_one_weight(model, device='cpu') + for p in self.model.parameters(): + p.requires_grad = True # for training + elif model.endswith('.yaml'): + self.model = self.get_model(cfg=model) + elif model in torchvision.models.__dict__: + pretrained = True + self.model = torchvision.models.__dict__[model](weights='IMAGENET1K_V1' if pretrained else None) + else: + FileNotFoundError(f'ERROR: model={model} not found locally or online. Please check model name.') + ClassificationModel.reshape_outputs(self.model, self.data['nc']) + + return # dont return ckpt. Classification doesn't support resume + + def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'): + loader = build_classification_dataloader(path=dataset_path, + imgsz=self.args.imgsz, + batch_size=batch_size if mode == 'train' else (batch_size * 2), + augment=mode == 'train', + rank=rank, + workers=self.args.workers) + # Attach inference transforms + if mode != 'train': + if is_parallel(self.model): + self.model.module.transforms = loader.dataset.torch_transforms + else: + self.model.transforms = loader.dataset.torch_transforms + return loader + + def preprocess_batch(self, batch): + batch['img'] = batch['img'].to(self.device) + batch['cls'] = batch['cls'].to(self.device) + return batch + + def progress_string(self): + return ('\n' + '%11s' * (4 + len(self.loss_names))) % \ + ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size') + + def get_validator(self): + self.loss_names = ['loss'] + return v8.classify.ClassificationValidator(self.test_loader, self.save_dir) + + def criterion(self, preds, batch): + loss = torch.nn.functional.cross_entropy(preds, batch['cls'], reduction='sum') / self.args.nbs + loss_items = loss.detach() + return loss, loss_items + + # def label_loss_items(self, loss_items=None, prefix="train"): + # """ + # Returns a loss dict with labelled training loss items tensor + # """ + # # Not needed for classification but necessary for segmentation & detection + # keys = [f"{prefix}/{x}" for x in self.loss_names] + # if loss_items is not None: + # loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats + # return dict(zip(keys, loss_items)) + # else: + # return keys + + def label_loss_items(self, loss_items=None, prefix='train'): + """ + Returns a loss dict with labelled training loss items tensor + """ + # Not needed for classification but necessary for segmentation & detection + keys = [f'{prefix}/{x}' for x in self.loss_names] + if loss_items is None: + return keys + loss_items = [round(float(loss_items), 5)] + return dict(zip(keys, loss_items)) + + def resume_training(self, ckpt): + pass + + def final_eval(self): + for f in self.last, self.best: + if f.exists(): + strip_optimizer(f) # strip optimizers + # TODO: validate best.pt after training completes + # if f is self.best: + # LOGGER.info(f'\nValidating {f}...') + # self.validator.args.save_json = True + # self.metrics = self.validator(model=f) + # self.metrics.pop('fitness', None) + # self.run_callbacks('on_fit_epoch_end') + LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}") + + +def train(cfg=DEFAULT_CFG, use_python=False): + model = cfg.model or 'yolov8n-cls.pt' # or "resnet18" + data = cfg.data or 'mnist160' # or yolo.ClassificationDataset("mnist") + device = cfg.device if cfg.device is not None else '' + + args = dict(model=model, data=data, device=device) + if use_python: + from ultralytics import YOLO + YOLO(model).train(**args) + else: + trainer = ClassificationTrainer(overrides=args) + trainer.train() + + +if __name__ == '__main__': + train() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/val.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/val.py new file mode 100644 index 0000000..f4b503b --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/classify/val.py @@ -0,0 +1,69 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from ultralytics.yolo.data import build_classification_dataloader +from ultralytics.yolo.engine.validator import BaseValidator +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER +from ultralytics.yolo.utils.metrics import ClassifyMetrics + + +class ClassificationValidator(BaseValidator): + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None): + super().__init__(dataloader, save_dir, pbar, args) + self.args.task = 'classify' + self.metrics = ClassifyMetrics() + + def get_desc(self): + return ('%22s' + '%11s' * 2) % ('classes', 'top1_acc', 'top5_acc') + + def init_metrics(self, model): + self.pred = [] + self.targets = [] + + def preprocess(self, batch): + batch['img'] = batch['img'].to(self.device, non_blocking=True) + batch['img'] = batch['img'].half() if self.args.half else batch['img'].float() + batch['cls'] = batch['cls'].to(self.device) + return batch + + def update_metrics(self, preds, batch): + n5 = min(len(self.model.names), 5) + self.pred.append(preds.argsort(1, descending=True)[:, :n5]) + self.targets.append(batch['cls']) + + def finalize_metrics(self, *args, **kwargs): + self.metrics.speed = self.speed + # self.metrics.confusion_matrix = self.confusion_matrix # TODO: classification ConfusionMatrix + + def get_stats(self): + self.metrics.process(self.targets, self.pred) + return self.metrics.results_dict + + def get_dataloader(self, dataset_path, batch_size): + return build_classification_dataloader(path=dataset_path, + imgsz=self.args.imgsz, + batch_size=batch_size, + augment=False, + shuffle=False, + workers=self.args.workers) + + def print_results(self): + pf = '%22s' + '%11.3g' * len(self.metrics.keys) # print format + LOGGER.info(pf % ('all', self.metrics.top1, self.metrics.top5)) + + +def val(cfg=DEFAULT_CFG, use_python=False): + model = cfg.model or 'yolov8n-cls.pt' # or "resnet18" + data = cfg.data or 'mnist160' + + args = dict(model=model, data=data) + if use_python: + from ultralytics import YOLO + YOLO(model).val(**args) + else: + validator = ClassificationValidator(args=args) + validator(model=args['model']) + + +if __name__ == '__main__': + val() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/__init__.py new file mode 100644 index 0000000..972b5e8 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from .predict import DetectionPredictor, predict +from .train import DetectionTrainer, train +from .val import DetectionValidator, val + +__all__ = 'DetectionPredictor', 'predict', 'DetectionTrainer', 'train', 'DetectionValidator', 'val' diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/predict.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/predict.py new file mode 100644 index 0000000..4df94b1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/predict.py @@ -0,0 +1,99 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import torch + +from ultralytics.yolo.engine.predictor import BasePredictor +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops +from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box + + +class DetectionPredictor(BasePredictor): + + def get_annotator(self, img): + return Annotator(img, line_width=self.args.line_thickness, example=str(self.model.names)) + + def preprocess(self, img): + img = (img if isinstance(img, torch.Tensor) else torch.from_numpy(img)).to(self.model.device) + img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32 + img /= 255 # 0 - 255 to 0.0 - 1.0 + return img + + def postprocess(self, preds, img, orig_imgs): + preds = ops.non_max_suppression(preds, + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + classes=self.args.classes) + + results = [] + for i, pred in enumerate(preds): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + if not isinstance(orig_imgs, torch.Tensor): + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + path, _, _, _, _ = self.batch + img_path = path[i] if isinstance(path, list) else path + results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred)) + return results + + def write_results(self, idx, results, batch): + p, im, im0 = batch + log_string = '' + if len(im.shape) == 3: + im = im[None] # expand for batch dim + self.seen += 1 + imc = im0.copy() if self.args.save_crop else im0 + if self.source_type.webcam or self.source_type.from_img: # batch_size >= 1 + log_string += f'{idx}: ' + frame = self.dataset.count + else: + frame = getattr(self.dataset, 'frame', 0) + self.data_path = p + self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}') + log_string += '%gx%g ' % im.shape[2:] # print string + self.annotator = self.get_annotator(im0) + + det = results[idx].boxes # TODO: make boxes inherit from tensors + if len(det) == 0: + return f'{log_string}(no detections), ' + for c in det.cls.unique(): + n = (det.cls == c).sum() # detections per class + log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, " + + # write + for d in reversed(det): + c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item()) + if self.args.save_txt: # Write to file + line = (c, *d.xywhn.view(-1)) + (conf, ) * self.args.save_conf + (() if id is None else (id, )) + with open(f'{self.txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + if self.args.save or self.args.show: # Add bbox to image + name = ('' if id is None else f'id:{id} ') + self.model.names[c] + label = None if self.args.hide_labels else (name if self.args.hide_conf else f'{name} {conf:.2f}') + self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True)) + if self.args.save_crop: + save_one_box(d.xyxy, + imc, + file=self.save_dir / 'crops' / self.model.names[c] / f'{self.data_path.stem}.jpg', + BGR=True) + + return log_string + + +def predict(cfg=DEFAULT_CFG, use_python=False): + model = cfg.model or 'yolov8n.pt' + source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \ + else 'https://ultralytics.com/images/bus.jpg' + + args = dict(model=model, source=source) + if use_python: + from ultralytics import YOLO + YOLO(model)(**args) + else: + predictor = DetectionPredictor(overrides=args) + predictor.predict_cli() + + +if __name__ == '__main__': + predict() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/train.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/train.py new file mode 100644 index 0000000..6484cd7 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/train.py @@ -0,0 +1,216 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +from copy import copy + +import numpy as np +import torch +import torch.nn as nn + +from ultralytics.nn.tasks import DetectionModel +from ultralytics.yolo import v8 +from ultralytics.yolo.data import build_dataloader +from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader +from ultralytics.yolo.engine.trainer import BaseTrainer +from ultralytics.yolo.utils import DEFAULT_CFG, RANK, colorstr +from ultralytics.yolo.utils.loss import BboxLoss +from ultralytics.yolo.utils.ops import xywh2xyxy +from ultralytics.yolo.utils.plotting import plot_images, plot_labels, plot_results +from ultralytics.yolo.utils.tal import TaskAlignedAssigner, dist2bbox, make_anchors +from ultralytics.yolo.utils.torch_utils import de_parallel + + +# BaseTrainer python usage +class DetectionTrainer(BaseTrainer): + + def get_dataloader(self, dataset_path, batch_size, mode='train', rank=0): + # TODO: manage splits differently + # calculate stride - check if model is initialized + gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32) + return create_dataloader(path=dataset_path, + imgsz=self.args.imgsz, + batch_size=batch_size, + stride=gs, + hyp=vars(self.args), + augment=mode == 'train', + cache=self.args.cache, + pad=0 if mode == 'train' else 0.5, + rect=self.args.rect or mode == 'val', + rank=rank, + workers=self.args.workers, + close_mosaic=self.args.close_mosaic != 0, + prefix=colorstr(f'{mode}: '), + shuffle=mode == 'train', + seed=self.args.seed)[0] if self.args.v5loader else \ + build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, rank=rank, mode=mode, + rect=mode == 'val', names=self.data['names'])[0] + + def preprocess_batch(self, batch): + batch['img'] = batch['img'].to(self.device, non_blocking=True).float() / 255 + return batch + + def set_model_attributes(self): + # nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps) + # self.args.box *= 3 / nl # scale to layers + # self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers + # self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + self.model.nc = self.data['nc'] # attach number of classes to model + self.model.names = self.data['names'] # attach class names to model + self.model.args = self.args # attach hyperparameters to model + # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc + + def get_model(self, cfg=None, weights=None, verbose=True): + model = DetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1) + if weights: + model.load(weights) + return model + + def get_validator(self): + self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss' + return v8.detect.DetectionValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) + + def criterion(self, preds, batch): + if not hasattr(self, 'compute_loss'): + self.compute_loss = Loss(de_parallel(self.model)) + return self.compute_loss(preds, batch) + + def label_loss_items(self, loss_items=None, prefix='train'): + """ + Returns a loss dict with labelled training loss items tensor + """ + # Not needed for classification but necessary for segmentation & detection + keys = [f'{prefix}/{x}' for x in self.loss_names] + if loss_items is not None: + loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats + return dict(zip(keys, loss_items)) + else: + return keys + + def progress_string(self): + return ('\n' + '%11s' * + (4 + len(self.loss_names))) % ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size') + + def plot_training_samples(self, batch, ni): + plot_images(images=batch['img'], + batch_idx=batch['batch_idx'], + cls=batch['cls'].squeeze(-1), + bboxes=batch['bboxes'], + paths=batch['im_file'], + fname=self.save_dir / f'train_batch{ni}.jpg') + + def plot_metrics(self): + plot_results(file=self.csv) # save results.png + + def plot_training_labels(self): + boxes = np.concatenate([lb['bboxes'] for lb in self.train_loader.dataset.labels], 0) + cls = np.concatenate([lb['cls'] for lb in self.train_loader.dataset.labels], 0) + plot_labels(boxes, cls.squeeze(), names=self.data['names'], save_dir=self.save_dir) + + +# Criterion class for computing training losses +class Loss: + + def __init__(self, model): # model must be de-paralleled + + device = next(model.parameters()).device # get model device + h = model.args # hyperparameters + + m = model.model[-1] # Detect() module + self.bce = nn.BCEWithLogitsLoss(reduction='none') + self.hyp = h + self.stride = m.stride # model strides + self.nc = m.nc # number of classes + self.no = m.no + self.reg_max = m.reg_max + self.device = device + + self.use_dfl = m.reg_max > 1 + + self.assigner = TaskAlignedAssigner(topk=10, num_classes=self.nc, alpha=0.5, beta=6.0) + self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=self.use_dfl).to(device) + self.proj = torch.arange(m.reg_max, dtype=torch.float, device=device) + + def preprocess(self, targets, batch_size, scale_tensor): + if targets.shape[0] == 0: + out = torch.zeros(batch_size, 0, 5, device=self.device) + else: + i = targets[:, 0] # image index + _, counts = i.unique(return_counts=True) + counts = counts.to(dtype=torch.int32) + out = torch.zeros(batch_size, counts.max(), 5, device=self.device) + for j in range(batch_size): + matches = i == j + n = matches.sum() + if n: + out[j, :n] = targets[matches, 1:] + out[..., 1:5] = xywh2xyxy(out[..., 1:5].mul_(scale_tensor)) + return out + + def bbox_decode(self, anchor_points, pred_dist): + if self.use_dfl: + b, a, c = pred_dist.shape # batch, anchors, channels + pred_dist = pred_dist.view(b, a, 4, c // 4).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = pred_dist.view(b, a, c // 4, 4).transpose(2,3).softmax(3).matmul(self.proj.type(pred_dist.dtype)) + # pred_dist = (pred_dist.view(b, a, c // 4, 4).softmax(2) * self.proj.type(pred_dist.dtype).view(1, 1, -1, 1)).sum(2) + return dist2bbox(pred_dist, anchor_points, xywh=False) + + def __call__(self, preds, batch): + loss = torch.zeros(3, device=self.device) # box, cls, dfl + feats = preds[1] if isinstance(preds, tuple) else preds + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + batch_size = pred_scores.shape[0] + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + targets = torch.cat((batch['batch_idx'].view(-1, 1), batch['cls'].view(-1, 1), batch['bboxes']), 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + _, target_bboxes, target_scores, fg_mask, _ = self.assigner( + pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt) + + target_bboxes /= stride_tensor + target_scores_sum = max(target_scores.sum(), 1) + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[1] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + # bbox loss + if fg_mask.sum(): + loss[0], loss[2] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes, target_scores, + target_scores_sum, fg_mask) + + loss[0] *= self.hyp.box # box gain + loss[1] *= self.hyp.cls # cls gain + loss[2] *= self.hyp.dfl # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + +def train(cfg=DEFAULT_CFG, use_python=False): + model = cfg.model or 'yolov8n.pt' + data = cfg.data or 'coco128.yaml' # or yolo.ClassificationDataset("mnist") + device = cfg.device if cfg.device is not None else '' + + args = dict(model=model, data=data, device=device) + if use_python: + from ultralytics import YOLO + YOLO(model).train(**args) + else: + trainer = DetectionTrainer(overrides=args) + trainer.train() + + +if __name__ == '__main__': + train() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/val.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/val.py new file mode 100644 index 0000000..5d09942 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/detect/val.py @@ -0,0 +1,261 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import os +from pathlib import Path + +import numpy as np +import torch + +from ultralytics.yolo.data import build_dataloader +from ultralytics.yolo.data.dataloaders.v5loader import create_dataloader +from ultralytics.yolo.engine.validator import BaseValidator +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, colorstr, ops +from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.yolo.utils.metrics import ConfusionMatrix, DetMetrics, box_iou +from ultralytics.yolo.utils.plotting import output_to_target, plot_images +from ultralytics.yolo.utils.torch_utils import de_parallel + + +class DetectionValidator(BaseValidator): + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None): + super().__init__(dataloader, save_dir, pbar, args) + self.args.task = 'detect' + self.is_coco = False + self.class_map = None + self.metrics = DetMetrics(save_dir=self.save_dir) + self.iouv = torch.linspace(0.5, 0.95, 10) # iou vector for mAP@0.5:0.95 + self.niou = self.iouv.numel() + + def preprocess(self, batch): + batch['img'] = batch['img'].to(self.device, non_blocking=True) + batch['img'] = (batch['img'].half() if self.args.half else batch['img'].float()) / 255 + for k in ['batch_idx', 'cls', 'bboxes']: + batch[k] = batch[k].to(self.device) + + nb = len(batch['img']) + self.lb = [torch.cat([batch['cls'], batch['bboxes']], dim=-1)[batch['batch_idx'] == i] + for i in range(nb)] if self.args.save_hybrid else [] # for autolabelling + + return batch + + def init_metrics(self, model): + val = self.data.get(self.args.split, '') # validation path + self.is_coco = isinstance(val, str) and val.endswith(f'coco{os.sep}val2017.txt') # is COCO dataset + self.class_map = ops.coco80_to_coco91_class() if self.is_coco else list(range(1000)) + self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO + self.names = model.names + self.nc = len(model.names) + self.metrics.names = self.names + self.metrics.plot = self.args.plots + self.confusion_matrix = ConfusionMatrix(nc=self.nc) + self.seen = 0 + self.jdict = [] + self.stats = [] + + def get_desc(self): + return ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)') + + def postprocess(self, preds): + preds = ops.non_max_suppression(preds, + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=True, + agnostic=self.args.single_cls, + max_det=self.args.max_det) + return preds + + def update_metrics(self, preds, batch): + # Metrics + for si, pred in enumerate(preds): + idx = batch['batch_idx'] == si + cls = batch['cls'][idx] + bbox = batch['bboxes'][idx] + nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions + shape = batch['ori_shape'][si] + correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + self.seen += 1 + + if npr == 0: + if nl: + self.stats.append((correct_bboxes, *torch.zeros((2, 0), device=self.device), cls.squeeze(-1))) + if self.args.plots: + self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + continue + + # Predictions + if self.args.single_cls: + pred[:, 5] = 0 + predn = pred.clone() + ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape, + ratio_pad=batch['ratio_pad'][si]) # native-space pred + + # Evaluate + if nl: + height, width = batch['img'].shape[2:] + tbox = ops.xywh2xyxy(bbox) * torch.tensor( + (width, height, width, height), device=self.device) # target boxes + ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape, + ratio_pad=batch['ratio_pad'][si]) # native-space labels + labelsn = torch.cat((cls, tbox), 1) # native-space labels + correct_bboxes = self._process_batch(predn, labelsn) + # TODO: maybe remove these `self.` arguments as they already are member variable + if self.args.plots: + self.confusion_matrix.process_batch(predn, labelsn) + self.stats.append((correct_bboxes, pred[:, 4], pred[:, 5], cls.squeeze(-1))) # (conf, pcls, tcls) + + # Save + if self.args.save_json: + self.pred_to_json(predn, batch['im_file'][si]) + if self.args.save_txt: + file = self.save_dir / 'labels' / f'{Path(batch["im_file"][si]).stem}.txt' + self.save_one_txt(predn, self.args.save_conf, shape, file) + + def finalize_metrics(self, *args, **kwargs): + self.metrics.speed = self.speed + self.metrics.confusion_matrix = self.confusion_matrix + + def get_stats(self): + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*self.stats)] # to numpy + if len(stats) and stats[0].any(): + self.metrics.process(*stats) + self.nt_per_class = np.bincount(stats[-1].astype(int), minlength=self.nc) # number of targets per class + return self.metrics.results_dict + + def print_results(self): + pf = '%22s' + '%11i' * 2 + '%11.3g' * len(self.metrics.keys) # print format + LOGGER.info(pf % ('all', self.seen, self.nt_per_class.sum(), *self.metrics.mean_results())) + if self.nt_per_class.sum() == 0: + LOGGER.warning( + f'WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels') + + # Print results per class + if self.args.verbose and not self.training and self.nc > 1 and len(self.stats): + for i, c in enumerate(self.metrics.ap_class_index): + LOGGER.info(pf % (self.names[c], self.seen, self.nt_per_class[c], *self.metrics.class_result(i))) + + if self.args.plots: + self.confusion_matrix.plot(save_dir=self.save_dir, names=list(self.names.values())) + + def _process_batch(self, detections, labels): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + iou = box_iou(labels[:, 1:], detections[:, :4]) + correct = np.zeros((detections.shape[0], self.iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(self.iouv)): + x = torch.where((iou >= self.iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), + 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=detections.device) + + def get_dataloader(self, dataset_path, batch_size): + # TODO: manage splits differently + # calculate stride - check if model is initialized + gs = max(int(de_parallel(self.model).stride if self.model else 0), 32) + return create_dataloader(path=dataset_path, + imgsz=self.args.imgsz, + batch_size=batch_size, + stride=gs, + hyp=vars(self.args), + cache=False, + pad=0.5, + rect=self.args.rect, + workers=self.args.workers, + prefix=colorstr(f'{self.args.mode}: '), + shuffle=False, + seed=self.args.seed)[0] if self.args.v5loader else \ + build_dataloader(self.args, batch_size, img_path=dataset_path, stride=gs, names=self.data['names'], + mode='val')[0] + + def plot_val_samples(self, batch, ni): + plot_images(batch['img'], + batch['batch_idx'], + batch['cls'].squeeze(-1), + batch['bboxes'], + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_labels.jpg', + names=self.names) + + def plot_predictions(self, batch, preds, ni): + plot_images(batch['img'], + *output_to_target(preds, max_det=15), + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_pred.jpg', + names=self.names) # pred + + def save_one_txt(self, predn, save_conf, shape, file): + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (ops.xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + def pred_to_json(self, predn, filename): + stem = Path(filename).stem + image_id = int(stem) if stem.isnumeric() else stem + box = ops.xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + self.jdict.append({ + 'image_id': image_id, + 'category_id': self.class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + def eval_json(self, stats): + if self.args.save_json and self.is_coco and len(self.jdict): + anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations + pred_json = self.save_dir / 'predictions.json' # predictions + LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools>=2.0.6') + from pycocotools.coco import COCO # noqa + from pycocotools.cocoeval import COCOeval # noqa + + for x in anno_json, pred_json: + assert x.is_file(), f'{x} file not found' + anno = COCO(str(anno_json)) # init annotations api + pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) + eval = COCOeval(anno, pred, 'bbox') + if self.is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval + eval.evaluate() + eval.accumulate() + eval.summarize() + stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = eval.stats[:2] # update mAP50-95 and mAP50 + except Exception as e: + LOGGER.warning(f'pycocotools unable to run: {e}') + return stats + + +def val(cfg=DEFAULT_CFG, use_python=False): + model = cfg.model or 'yolov8n.pt' + data = cfg.data or 'coco128.yaml' + + args = dict(model=model, data=data) + if use_python: + from ultralytics import YOLO + YOLO(model).val(**args) + else: + validator = DetectionValidator(args=args) + validator(model=args['model']) + + +if __name__ == '__main__': + val() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/__init__.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/__init__.py new file mode 100644 index 0000000..a9831ac --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/__init__.py @@ -0,0 +1,7 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from .predict import SegmentationPredictor, predict +from .train import SegmentationTrainer, train +from .val import SegmentationValidator, val + +__all__ = 'SegmentationPredictor', 'predict', 'SegmentationTrainer', 'train', 'SegmentationValidator', 'val' diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/predict.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/predict.py new file mode 100644 index 0000000..90211c1 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/predict.py @@ -0,0 +1,114 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +import torch + +from ultralytics.yolo.engine.results import Results +from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops +from ultralytics.yolo.utils.plotting import colors, save_one_box +from ultralytics.yolo.v8.detect.predict import DetectionPredictor + + +class SegmentationPredictor(DetectionPredictor): + + def postprocess(self, preds, img, orig_imgs): + # TODO: filter by classes + p = ops.non_max_suppression(preds[0], + self.args.conf, + self.args.iou, + agnostic=self.args.agnostic_nms, + max_det=self.args.max_det, + nc=len(self.model.names), + classes=self.args.classes) + results = [] + proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported + for i, pred in enumerate(p): + orig_img = orig_imgs[i] if isinstance(orig_imgs, list) else orig_imgs + path, _, _, _, _ = self.batch + img_path = path[i] if isinstance(path, list) else path + if not len(pred): # save empty boxes + results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6])) + continue + if self.args.retina_masks: + if not isinstance(orig_imgs, torch.Tensor): + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + masks = ops.process_mask_native(proto[i], pred[:, 6:], pred[:, :4], orig_img.shape[:2]) # HWC + else: + masks = ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC + if not isinstance(orig_imgs, torch.Tensor): + pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape) + results.append( + Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks)) + return results + + def write_results(self, idx, results, batch): + p, im, im0 = batch + log_string = '' + if len(im.shape) == 3: + im = im[None] # expand for batch dim + self.seen += 1 + imc = im0.copy() if self.args.save_crop else im0 + if self.source_type.webcam or self.source_type.from_img: # batch_size >= 1 + log_string += f'{idx}: ' + frame = self.dataset.count + else: + frame = getattr(self.dataset, 'frame', 0) + + self.data_path = p + self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}') + log_string += '%gx%g ' % im.shape[2:] # print string + self.annotator = self.get_annotator(im0) + + result = results[idx] + if len(result) == 0: + return f'{log_string}(no detections), ' + det, mask = result.boxes, result.masks # getting tensors TODO: mask mask,box inherit for tensor + + # Print results + for c in det.cls.unique(): + n = (det.cls == c).sum() # detections per class + log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, " + + # Mask plotting + if self.args.save or self.args.show: + im_gpu = torch.as_tensor(im0, dtype=torch.float16, device=mask.masks.device).permute( + 2, 0, 1).flip(0).contiguous() / 255 if self.args.retina_masks else im[idx] + self.annotator.masks(masks=mask.masks, colors=[colors(x, True) for x in det.cls], im_gpu=im_gpu) + + # Write results + for j, d in enumerate(reversed(det)): + c, conf, id = int(d.cls), float(d.conf), None if d.id is None else int(d.id.item()) + if self.args.save_txt: # Write to file + seg = mask.segments[len(det) - j - 1].copy().reshape(-1) # reversed mask.segments, (n,2) to (n*2) + line = (c, *seg) + (conf, ) * self.args.save_conf + (() if id is None else (id, )) + with open(f'{self.txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + if self.args.save or self.args.show: # Add bbox to image + name = ('' if id is None else f'id:{id} ') + self.model.names[c] + label = None if self.args.hide_labels else (name if self.args.hide_conf else f'{name} {conf:.2f}') + if self.args.boxes: + self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True)) + if self.args.save_crop: + save_one_box(d.xyxy, + imc, + file=self.save_dir / 'crops' / self.model.names[c] / f'{self.data_path.stem}.jpg', + BGR=True) + + return log_string + + +def predict(cfg=DEFAULT_CFG, use_python=False): + model = cfg.model or 'yolov8n-seg.pt' + source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \ + else 'https://ultralytics.com/images/bus.jpg' + + args = dict(model=model, source=source) + if use_python: + from ultralytics import YOLO + YOLO(model)(**args) + else: + predictor = SegmentationPredictor(overrides=args) + predictor.predict_cli() + + +if __name__ == '__main__': + predict() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/train.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/train.py new file mode 100644 index 0000000..86d7433 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/train.py @@ -0,0 +1,164 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +from copy import copy + +import torch +import torch.nn.functional as F + +from ultralytics.nn.tasks import SegmentationModel +from ultralytics.yolo import v8 +from ultralytics.yolo.utils import DEFAULT_CFG, RANK +from ultralytics.yolo.utils.ops import crop_mask, xyxy2xywh +from ultralytics.yolo.utils.plotting import plot_images, plot_results +from ultralytics.yolo.utils.tal import make_anchors +from ultralytics.yolo.utils.torch_utils import de_parallel +from ultralytics.yolo.v8.detect.train import Loss + + +# BaseTrainer python usage +class SegmentationTrainer(v8.detect.DetectionTrainer): + + def __init__(self, cfg=DEFAULT_CFG, overrides=None): + if overrides is None: + overrides = {} + overrides['task'] = 'segment' + super().__init__(cfg, overrides) + + def get_model(self, cfg=None, weights=None, verbose=True): + model = SegmentationModel(cfg, ch=3, nc=self.data['nc'], verbose=verbose and RANK == -1) + if weights: + model.load(weights) + + return model + + def get_validator(self): + self.loss_names = 'box_loss', 'seg_loss', 'cls_loss', 'dfl_loss' + return v8.segment.SegmentationValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args)) + + def criterion(self, preds, batch): + if not hasattr(self, 'compute_loss'): + self.compute_loss = SegLoss(de_parallel(self.model), overlap=self.args.overlap_mask) + return self.compute_loss(preds, batch) + + def plot_training_samples(self, batch, ni): + images = batch['img'] + masks = batch['masks'] + cls = batch['cls'].squeeze(-1) + bboxes = batch['bboxes'] + paths = batch['im_file'] + batch_idx = batch['batch_idx'] + plot_images(images, batch_idx, cls, bboxes, masks, paths=paths, fname=self.save_dir / f'train_batch{ni}.jpg') + + def plot_metrics(self): + plot_results(file=self.csv, segment=True) # save results.png + + +# Criterion class for computing training losses +class SegLoss(Loss): + + def __init__(self, model, overlap=True): # model must be de-paralleled + super().__init__(model) + self.nm = model.model[-1].nm # number of masks + self.overlap = overlap + + def __call__(self, preds, batch): + loss = torch.zeros(4, device=self.device) # box, cls, dfl + feats, pred_masks, proto = preds if len(preds) == 3 else preds[1] + batch_size, _, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width + pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split( + (self.reg_max * 4, self.nc), 1) + + # b, grids, .. + pred_scores = pred_scores.permute(0, 2, 1).contiguous() + pred_distri = pred_distri.permute(0, 2, 1).contiguous() + pred_masks = pred_masks.permute(0, 2, 1).contiguous() + + dtype = pred_scores.dtype + imgsz = torch.tensor(feats[0].shape[2:], device=self.device, dtype=dtype) * self.stride[0] # image size (h,w) + anchor_points, stride_tensor = make_anchors(feats, self.stride, 0.5) + + # targets + try: + batch_idx = batch['batch_idx'].view(-1, 1) + targets = torch.cat((batch_idx, batch['cls'].view(-1, 1), batch['bboxes'].to(dtype)), 1) + targets = self.preprocess(targets.to(self.device), batch_size, scale_tensor=imgsz[[1, 0, 1, 0]]) + gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy + mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0) + except RuntimeError as e: + raise TypeError('ERROR ❌ segment dataset incorrectly formatted or not a segment dataset.\n' + "This error can occur when incorrectly training a 'segment' model on a 'detect' dataset, " + "i.e. 'yolo train model=yolov8n-seg.pt data=coco128.yaml'.\nVerify your dataset is a " + "correctly formatted 'segment' dataset using 'data=coco128-seg.yaml' " + 'as an example.\nSee https://docs.ultralytics.com/tasks/segment/ for help.') from e + + # pboxes + pred_bboxes = self.bbox_decode(anchor_points, pred_distri) # xyxy, (b, h*w, 4) + + _, target_bboxes, target_scores, fg_mask, target_gt_idx = self.assigner( + pred_scores.detach().sigmoid(), (pred_bboxes.detach() * stride_tensor).type(gt_bboxes.dtype), + anchor_points * stride_tensor, gt_labels, gt_bboxes, mask_gt) + + target_scores_sum = max(target_scores.sum(), 1) + + # cls loss + # loss[1] = self.varifocal_loss(pred_scores, target_scores, target_labels) / target_scores_sum # VFL way + loss[2] = self.bce(pred_scores, target_scores.to(dtype)).sum() / target_scores_sum # BCE + + if fg_mask.sum(): + # bbox loss + loss[0], loss[3] = self.bbox_loss(pred_distri, pred_bboxes, anchor_points, target_bboxes / stride_tensor, + target_scores, target_scores_sum, fg_mask) + # masks loss + masks = batch['masks'].to(self.device).float() + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + + for i in range(batch_size): + if fg_mask[i].sum(): + mask_idx = target_gt_idx[i][fg_mask[i]] + if self.overlap: + gt_mask = torch.where(masks[[i]] == (mask_idx + 1).view(-1, 1, 1), 1.0, 0.0) + else: + gt_mask = masks[batch_idx.view(-1) == i][mask_idx] + xyxyn = target_bboxes[i][fg_mask[i]] / imgsz[[1, 0, 1, 0]] + marea = xyxy2xywh(xyxyn)[:, 2:].prod(1) + mxyxy = xyxyn * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device) + loss[1] += self.single_mask_loss(gt_mask, pred_masks[i][fg_mask[i]], proto[i], mxyxy, marea) # seg + + # WARNING: lines below prevents Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove + else: + loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss + + # WARNING: lines below prevent Multi-GPU DDP 'unused gradient' PyTorch errors, do not remove + else: + loss[1] += (proto * 0).sum() + (pred_masks * 0).sum() # inf sums may lead to nan loss + + loss[0] *= self.hyp.box # box gain + loss[1] *= self.hyp.box / batch_size # seg gain + loss[2] *= self.hyp.cls # cls gain + loss[3] *= self.hyp.dfl # dfl gain + + return loss.sum() * batch_size, loss.detach() # loss(box, cls, dfl) + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n, 32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + +def train(cfg=DEFAULT_CFG, use_python=False): + model = cfg.model or 'yolov8n-seg.pt' + data = cfg.data or 'coco128-seg.yaml' # or yolo.ClassificationDataset("mnist") + device = cfg.device if cfg.device is not None else '' + + args = dict(model=model, data=data, device=device) + if use_python: + from ultralytics import YOLO + YOLO(model).train(**args) + else: + trainer = SegmentationTrainer(overrides=args) + trainer.train() + + +if __name__ == '__main__': + train() diff --git a/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/val.py b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/val.py new file mode 100644 index 0000000..403f880 --- /dev/null +++ b/src/train_utils/train_models/models/ultralytics/ultralytics/yolo/v8/segment/val.py @@ -0,0 +1,250 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license + +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +import torch.nn.functional as F + +from ultralytics.yolo.utils import DEFAULT_CFG, LOGGER, NUM_THREADS, ops +from ultralytics.yolo.utils.checks import check_requirements +from ultralytics.yolo.utils.metrics import SegmentMetrics, box_iou, mask_iou +from ultralytics.yolo.utils.plotting import output_to_target, plot_images +from ultralytics.yolo.v8.detect import DetectionValidator + + +class SegmentationValidator(DetectionValidator): + + def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None): + super().__init__(dataloader, save_dir, pbar, args) + self.args.task = 'segment' + self.metrics = SegmentMetrics(save_dir=self.save_dir) + + def preprocess(self, batch): + batch = super().preprocess(batch) + batch['masks'] = batch['masks'].to(self.device).float() + return batch + + def init_metrics(self, model): + super().init_metrics(model) + self.plot_masks = [] + if self.args.save_json: + check_requirements('pycocotools>=2.0.6') + self.process = ops.process_mask_upsample # more accurate + else: + self.process = ops.process_mask # faster + + def get_desc(self): + return ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', + 'R', 'mAP50', 'mAP50-95)') + + def postprocess(self, preds): + p = ops.non_max_suppression(preds[0], + self.args.conf, + self.args.iou, + labels=self.lb, + multi_label=True, + agnostic=self.args.single_cls, + max_det=self.args.max_det, + nc=self.nc) + proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported + return p, proto + + def update_metrics(self, preds, batch): + # Metrics + for si, (pred, proto) in enumerate(zip(preds[0], preds[1])): + idx = batch['batch_idx'] == si + cls = batch['cls'][idx] + bbox = batch['bboxes'][idx] + nl, npr = cls.shape[0], pred.shape[0] # number of labels, predictions + shape = batch['ori_shape'][si] + correct_masks = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + correct_bboxes = torch.zeros(npr, self.niou, dtype=torch.bool, device=self.device) # init + self.seen += 1 + + if npr == 0: + if nl: + self.stats.append((correct_masks, correct_bboxes, *torch.zeros( + (2, 0), device=self.device), cls.squeeze(-1))) + if self.args.plots: + self.confusion_matrix.process_batch(detections=None, labels=cls.squeeze(-1)) + continue + + # Masks + midx = [si] if self.args.overlap_mask else idx + gt_masks = batch['masks'][midx] + pred_masks = self.process(proto, pred[:, 6:], pred[:, :4], shape=batch['img'][si].shape[1:]) + + # Predictions + if self.args.single_cls: + pred[:, 5] = 0 + predn = pred.clone() + ops.scale_boxes(batch['img'][si].shape[1:], predn[:, :4], shape, + ratio_pad=batch['ratio_pad'][si]) # native-space pred + + # Evaluate + if nl: + height, width = batch['img'].shape[2:] + tbox = ops.xywh2xyxy(bbox) * torch.tensor( + (width, height, width, height), device=self.device) # target boxes + ops.scale_boxes(batch['img'][si].shape[1:], tbox, shape, + ratio_pad=batch['ratio_pad'][si]) # native-space labels + labelsn = torch.cat((cls, tbox), 1) # native-space labels + correct_bboxes = self._process_batch(predn, labelsn) + # TODO: maybe remove these `self.` arguments as they already are member variable + correct_masks = self._process_batch(predn, + labelsn, + pred_masks, + gt_masks, + overlap=self.args.overlap_mask, + masks=True) + if self.args.plots: + self.confusion_matrix.process_batch(predn, labelsn) + + # Append correct_masks, correct_boxes, pconf, pcls, tcls + self.stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], cls.squeeze(-1))) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if self.args.plots and self.batch_i < 3: + self.plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot + + # Save + if self.args.save_json: + pred_masks = ops.scale_image(batch['img'][si].shape[1:], + pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), + shape, + ratio_pad=batch['ratio_pad'][si]) + self.pred_to_json(predn, batch['im_file'][si], pred_masks) + # if self.args.save_txt: + # save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + + def finalize_metrics(self, *args, **kwargs): + self.metrics.speed = self.speed + self.metrics.confusion_matrix = self.confusion_matrix + + def _process_batch(self, detections, labels, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], self.iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(self.iouv)): + x = torch.where((iou >= self.iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), + 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=detections.device) + + def plot_val_samples(self, batch, ni): + plot_images(batch['img'], + batch['batch_idx'], + batch['cls'].squeeze(-1), + batch['bboxes'], + batch['masks'], + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_labels.jpg', + names=self.names) + + def plot_predictions(self, batch, preds, ni): + plot_images(batch['img'], + *output_to_target(preds[0], max_det=15), + torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks, + paths=batch['im_file'], + fname=self.save_dir / f'val_batch{ni}_pred.jpg', + names=self.names) # pred + self.plot_masks.clear() + + def pred_to_json(self, predn, filename, pred_masks): + # Save one JSON result + # Example result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode # noqa + + def single_encode(x): + rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] + rle['counts'] = rle['counts'].decode('utf-8') + return rle + + stem = Path(filename).stem + image_id = int(stem) if stem.isnumeric() else stem + box = ops.xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + self.jdict.append({ + 'image_id': image_id, + 'category_id': self.class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + def eval_json(self, stats): + if self.args.save_json and self.is_coco and len(self.jdict): + anno_json = self.data['path'] / 'annotations/instances_val2017.json' # annotations + pred_json = self.save_dir / 'predictions.json' # predictions + LOGGER.info(f'\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...') + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools>=2.0.6') + from pycocotools.coco import COCO # noqa + from pycocotools.cocoeval import COCOeval # noqa + + for x in anno_json, pred_json: + assert x.is_file(), f'{x} file not found' + anno = COCO(str(anno_json)) # init annotations api + pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path) + for i, eval in enumerate([COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm')]): + if self.is_coco: + eval.params.imgIds = [int(Path(x).stem) + for x in self.dataloader.dataset.im_files] # images to eval + eval.evaluate() + eval.accumulate() + eval.summarize() + idx = i * 4 + 2 + stats[self.metrics.keys[idx + 1]], stats[ + self.metrics.keys[idx]] = eval.stats[:2] # update mAP50-95 and mAP50 + except Exception as e: + LOGGER.warning(f'pycocotools unable to run: {e}') + return stats + + +def val(cfg=DEFAULT_CFG, use_python=False): + model = cfg.model or 'yolov8n-seg.pt' + data = cfg.data or 'coco128-seg.yaml' + + args = dict(model=model, data=data) + if use_python: + from ultralytics import YOLO + YOLO(model).val(**args) + else: + validator = SegmentationValidator(args=args) + validator(model=args['model']) + + +if __name__ == '__main__': + val() diff --git a/src/train_utils/train_models/models/yolov5/.dockerignore b/src/train_utils/train_models/models/yolov5/.dockerignore new file mode 100644 index 0000000..3b66925 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.dockerignore @@ -0,0 +1,222 @@ +# Repo-specific DockerIgnore ------------------------------------------------------------------------------------------- +.git +.cache +.idea +runs +output +coco +storage.googleapis.com + +data/samples/* +**/results*.csv +*.jpg + +# Neural Network weights ----------------------------------------------------------------------------------------------- +**/*.pt +**/*.pth +**/*.onnx +**/*.engine +**/*.mlmodel +**/*.torchscript +**/*.torchscript.pt +**/*.tflite +**/*.h5 +**/*.pb +*_saved_model/ +*_web_model/ +*_openvino_model/ + +# Below Copied From .gitignore ----------------------------------------------------------------------------------------- +# Below Copied From .gitignore ----------------------------------------------------------------------------------------- + + +# GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +wandb/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv* +venv*/ +ENV*/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + + +# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- + +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon +Icon? + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + + +# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea/* +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/dictionaries +.html # Bokeh Plots +.pg # TensorFlow Frozen Graphs +.avi # videos + +# Sensitive or high-churn files: +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml + +# Gradle: +.idea/**/gradle.xml +.idea/**/libraries + +# CMake +cmake-build-debug/ +cmake-build-release/ + +# Mongo Explorer plugin: +.idea/**/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties diff --git a/src/train_utils/train_models/models/yolov5/.gitattributes b/src/train_utils/train_models/models/yolov5/.gitattributes new file mode 100644 index 0000000..dad4239 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.gitattributes @@ -0,0 +1,2 @@ +# this drop notebooks from GitHub language stats +*.ipynb linguist-vendored diff --git a/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/bug-report.yml b/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/bug-report.yml new file mode 100644 index 0000000..fcb6413 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/bug-report.yml @@ -0,0 +1,85 @@ +name: 🐛 Bug Report +# title: " " +description: Problems with YOLOv5 +labels: [bug, triage] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv5 🐛 Bug Report! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar bug report already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar bug report. + required: true + + - type: dropdown + attributes: + label: YOLOv5 Component + description: | + Please select the part of YOLOv5 where you found the bug. + multiple: true + options: + - "Training" + - "Validation" + - "Detection" + - "Export" + - "PyTorch Hub" + - "Multi-GPU" + - "Evolution" + - "Integrations" + - "Other" + validations: + required: false + + - type: textarea + attributes: + label: Bug + description: Provide console output with error messages and/or screenshots of the bug. + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Environment + description: Please specify the software and hardware you used to produce the bug. + placeholder: | + - YOLO: YOLOv5 🚀 v6.0-67-g60e42e1 torch 1.9.0+cu111 CUDA:0 (A100-SXM4-40GB, 40536MiB) + - OS: Ubuntu 20.04 + - Python: 3.9.0 + validations: + required: false + + - type: textarea + attributes: + label: Minimal Reproducible Example + description: > + When asking a question, people will be better able to provide help if you provide code that they can easily understand and use to **reproduce** the problem. + This is referred to by community members as creating a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + placeholder: | + ``` + # Code to reproduce your issue here + ``` + validations: + required: false + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/config.yml b/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..4db7cef --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: true +contact_links: + - name: 💬 Forum + url: https://community.ultralytics.com/ + about: Ask on Ultralytics Community Forum + - name: Stack Overflow + url: https://stackoverflow.com/search?q=YOLOv5 + about: Ask on Stack Overflow with 'YOLOv5' tag diff --git a/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/feature-request.yml b/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/feature-request.yml new file mode 100644 index 0000000..68ef985 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/feature-request.yml @@ -0,0 +1,50 @@ +name: 🚀 Feature Request +description: Suggest a YOLOv5 idea +# title: " " +labels: [enhancement] +body: + - type: markdown + attributes: + value: | + Thank you for submitting a YOLOv5 🚀 Feature Request! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) to see if a similar feature request already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and found no similar feature requests. + required: true + + - type: textarea + attributes: + label: Description + description: A short description of your feature. + placeholder: | + What new feature would you like to see in YOLOv5? + validations: + required: true + + - type: textarea + attributes: + label: Use case + description: | + Describe the use case of your feature request. It will help us understand and prioritize the feature request. + placeholder: | + How would this feature be used, and who would use it? + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? + + - type: checkboxes + attributes: + label: Are you willing to submit a PR? + description: > + (Optional) We encourage you to submit a [Pull Request](https://github.com/ultralytics/yolov5/pulls) (PR) to help improve YOLOv5 for everyone, especially if you have a good understanding of how to implement a fix or feature. + See the YOLOv5 [Contributing Guide](https://github.com/ultralytics/yolov5/blob/master/CONTRIBUTING.md) to get started. + options: + - label: Yes I'd like to help by submitting a PR! diff --git a/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/question.yml b/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/question.yml new file mode 100644 index 0000000..8e0993c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.github/ISSUE_TEMPLATE/question.yml @@ -0,0 +1,33 @@ +name: ❓ Question +description: Ask a YOLOv5 question +# title: " " +labels: [question] +body: + - type: markdown + attributes: + value: | + Thank you for asking a YOLOv5 ❓ Question! + + - type: checkboxes + attributes: + label: Search before asking + description: > + Please search the [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) to see if a similar question already exists. + options: + - label: > + I have searched the YOLOv5 [issues](https://github.com/ultralytics/yolov5/issues) and [discussions](https://github.com/ultralytics/yolov5/discussions) and found no similar questions. + required: true + + - type: textarea + attributes: + label: Question + description: What is your question? + placeholder: | + 💡 ProTip! Include as much information as possible (screenshots, logs, tracebacks etc.) to receive the most helpful response. + validations: + required: true + + - type: textarea + attributes: + label: Additional + description: Anything else you would like to share? diff --git a/src/train_utils/train_models/models/yolov5/.github/PULL_REQUEST_TEMPLATE.md b/src/train_utils/train_models/models/yolov5/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..f25b017 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,9 @@ + diff --git a/src/train_utils/train_models/models/yolov5/.github/dependabot.yml b/src/train_utils/train_models/models/yolov5/.github/dependabot.yml new file mode 100644 index 0000000..c1b3d5d --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.github/dependabot.yml @@ -0,0 +1,23 @@ +version: 2 +updates: + - package-ecosystem: pip + directory: "/" + schedule: + interval: weekly + time: "04:00" + open-pull-requests-limit: 10 + reviewers: + - glenn-jocher + labels: + - dependencies + + - package-ecosystem: github-actions + directory: "/" + schedule: + interval: weekly + time: "04:00" + open-pull-requests-limit: 5 + reviewers: + - glenn-jocher + labels: + - dependencies diff --git a/src/train_utils/train_models/models/yolov5/.github/workflows/ci-testing.yml b/src/train_utils/train_models/models/yolov5/.github/workflows/ci-testing.yml new file mode 100644 index 0000000..a6f47bb --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.github/workflows/ci-testing.yml @@ -0,0 +1,153 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# YOLOv5 Continuous Integration (CI) GitHub Actions tests + +name: YOLOv5 CI + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + schedule: + - cron: '0 0 * * *' # runs at 00:00 UTC every day + +jobs: + Benchmarks: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ ubuntu-latest ] + python-version: [ '3.10' ] # requires python<=3.10 + model: [ yolov5n ] + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' # caching pip dependencies + - name: Install requirements + run: | + python -m pip install --upgrade pip wheel + pip install -r requirements.txt coremltools openvino-dev tensorflow-cpu --extra-index-url https://download.pytorch.org/whl/cpu + python --version + pip --version + pip list + - name: Benchmark DetectionModel + run: | + python benchmarks.py --data coco128.yaml --weights ${{ matrix.model }}.pt --img 320 --hard-fail 0.29 + - name: Benchmark SegmentationModel + run: | + python benchmarks.py --data coco128-seg.yaml --weights ${{ matrix.model }}-seg.pt --img 320 --hard-fail 0.22 + - name: Test predictions + run: | + python export.py --weights ${{ matrix.model }}-cls.pt --include onnx --img 224 + python detect.py --weights ${{ matrix.model }}.onnx --img 320 + python segment/predict.py --weights ${{ matrix.model }}-seg.onnx --img 320 + python classify/predict.py --weights ${{ matrix.model }}-cls.onnx --img 224 + + Tests: + timeout-minutes: 60 + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ ubuntu-latest, windows-latest ] # macos-latest bug https://github.com/ultralytics/yolov5/pull/9049 + python-version: [ '3.10' ] + model: [ yolov5n ] + include: + - os: ubuntu-latest + python-version: '3.7' # '3.6.8' min + model: yolov5n + - os: ubuntu-latest + python-version: '3.8' + model: yolov5n + - os: ubuntu-latest + python-version: '3.9' + model: yolov5n + - os: ubuntu-latest + python-version: '3.8' # torch 1.7.0 requires python >=3.6, <=3.8 + model: yolov5n + torch: '1.7.0' # min torch version CI https://pypi.org/project/torchvision/ + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-version }} + cache: 'pip' # caching pip dependencies + - name: Install requirements + run: | + python -m pip install --upgrade pip wheel + if [ "${{ matrix.torch }}" == "1.7.0" ]; then + pip install -r requirements.txt torch==1.7.0 torchvision==0.8.1 --extra-index-url https://download.pytorch.org/whl/cpu + else + pip install -r requirements.txt --extra-index-url https://download.pytorch.org/whl/cpu + fi + shell: bash # for Windows compatibility + - name: Check environment + run: | + python -c "import utils; utils.notebook_init()" + echo "RUNNER_OS is ${{ runner.os }}" + echo "GITHUB_EVENT_NAME is ${{ github.event_name }}" + echo "GITHUB_WORKFLOW is ${{ github.workflow }}" + echo "GITHUB_ACTOR is ${{ github.actor }}" + echo "GITHUB_REPOSITORY is ${{ github.repository }}" + echo "GITHUB_REPOSITORY_OWNER is ${{ github.repository_owner }}" + python --version + pip --version + pip list + - name: Test detection + shell: bash # for Windows compatibility + run: | + # export PYTHONPATH="$PWD" # to run '$ python *.py' files in subdirectories + m=${{ matrix.model }} # official weights + b=runs/train/exp/weights/best # best.pt checkpoint + python train.py --imgsz 64 --batch 32 --weights $m.pt --cfg $m.yaml --epochs 1 --device cpu # train + for d in cpu; do # devices + for w in $m $b; do # weights + python val.py --imgsz 64 --batch 32 --weights $w.pt --device $d # val + python detect.py --imgsz 64 --weights $w.pt --device $d # detect + done + done + python hubconf.py --model $m # hub + # python models/tf.py --weights $m.pt # build TF model + python models/yolo.py --cfg $m.yaml # build PyTorch model + python export.py --weights $m.pt --img 64 --include torchscript # export + python - <=3.7.0**](https://www.python.org/) with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). To get started: + ```bash + git clone https://github.com/ultralytics/yolov5 # clone + cd yolov5 + pip install -r requirements.txt # install + ``` + + ## Environments + + YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): + + - **Notebooks** with free GPU: Run on Gradient Open In Colab Open In Kaggle + - **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart) + - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) + - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls + + ## Status + + YOLOv5 CI + + If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 [training](https://github.com/ultralytics/yolov5/blob/master/train.py), [validation](https://github.com/ultralytics/yolov5/blob/master/val.py), [inference](https://github.com/ultralytics/yolov5/blob/master/detect.py), [export](https://github.com/ultralytics/yolov5/blob/master/export.py) and [benchmarks](https://github.com/ultralytics/yolov5/blob/master/benchmarks.py) on MacOS, Windows, and Ubuntu every 24 hours and on every commit. + + ## Introducing YOLOv8 🚀 + + We're excited to announce the launch of our latest state-of-the-art (SOTA) object detection model for 2023 - [YOLOv8](https://github.com/ultralytics/ultralytics) 🚀! + + Designed to be fast, accurate, and easy to use, YOLOv8 is an ideal choice for a wide range of object detection, image segmentation and image classification tasks. With YOLOv8, you'll be able to quickly and accurately detect objects in real-time, streamline your workflows, and achieve new levels of accuracy in your projects. + + Check out our [YOLOv8 Docs](https://docs.ultralytics.com/) for details and get started with: + ```bash + pip install ultralytics + ``` diff --git a/src/train_utils/train_models/models/yolov5/.github/workflows/stale.yml b/src/train_utils/train_models/models/yolov5/.github/workflows/stale.yml new file mode 100644 index 0000000..470dc61 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.github/workflows/stale.yml @@ -0,0 +1,40 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +name: Close stale issues +on: + schedule: + - cron: '0 0 * * *' # Runs at 00:00 UTC every day + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v8 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + stale-issue-message: | + 👋 Hello, this issue has been automatically marked as stale because it has not had recent activity. Please note it will be closed if no further activity occurs. + + Access additional [YOLOv5](https://ultralytics.com/yolov5) 🚀 resources: + - **Wiki** – https://github.com/ultralytics/yolov5/wiki + - **Tutorials** – https://github.com/ultralytics/yolov5#tutorials + - **Docs** – https://docs.ultralytics.com + + Access additional [Ultralytics](https://ultralytics.com) ⚡ resources: + - **Ultralytics HUB** – https://ultralytics.com/hub + - **Vision API** – https://ultralytics.com/yolov5 + - **About Us** – https://ultralytics.com/about + - **Join Our Team** – https://ultralytics.com/work + - **Contact Us** – https://ultralytics.com/contact + + Feel free to inform us of any other **issues** you discover or **feature requests** that come to mind in the future. Pull Requests (PRs) are also always welcomed! + + Thank you for your contributions to YOLOv5 🚀 and Vision AI ⭐! + + stale-pr-message: 'This pull request has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs. Thank you for your contributions YOLOv5 🚀 and Vision AI ⭐.' + days-before-issue-stale: 30 + days-before-issue-close: 10 + days-before-pr-stale: 90 + days-before-pr-close: 30 + exempt-issue-labels: 'documentation,tutorial,TODO' + operations-per-run: 300 # The maximum number of operations per run, used to control rate limiting. diff --git a/src/train_utils/train_models/models/yolov5/.github/workflows/translate-readme.yml b/src/train_utils/train_models/models/yolov5/.github/workflows/translate-readme.yml new file mode 100644 index 0000000..2bb351e --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.github/workflows/translate-readme.yml @@ -0,0 +1,26 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# README translation action to translate README.md to Chinese as README.zh-CN.md on any change to README.md + +name: Translate README + +on: + push: + branches: + - translate_readme # replace with 'master' to enable action + paths: + - README.md + +jobs: + Translate: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Setup Node.js + uses: actions/setup-node@v3 + with: + node-version: 16 + # ISO Language Codes: https://cloud.google.com/translate/docs/languages + - name: Adding README - Chinese Simplified + uses: dephraiim/translate-readme@main + with: + LANG: zh-CN diff --git a/src/train_utils/train_models/models/yolov5/.gitignore b/src/train_utils/train_models/models/yolov5/.gitignore new file mode 100644 index 0000000..6bcedfa --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.gitignore @@ -0,0 +1,257 @@ +# Repo-specific GitIgnore ---------------------------------------------------------------------------------------------- +*.jpg +*.jpeg +*.png +*.bmp +*.tif +*.tiff +*.heic +*.JPG +*.JPEG +*.PNG +*.BMP +*.TIF +*.TIFF +*.HEIC +*.mp4 +*.mov +*.MOV +*.avi +*.data +*.json +*.cfg +!setup.cfg +!cfg/yolov3*.cfg + +storage.googleapis.com +runs/* +data/* +data/images/* +!data/*.yaml +!data/hyps +!data/scripts +!data/images +!data/images/zidane.jpg +!data/images/bus.jpg +!data/*.sh + +results*.csv + +# Datasets ------------------------------------------------------------------------------------------------------------- +coco/ +coco128/ +VOC/ + +# MATLAB GitIgnore ----------------------------------------------------------------------------------------------------- +*.m~ +*.mat +!targets*.mat + +# Neural Network weights ----------------------------------------------------------------------------------------------- +*.weights +*.pt +*.pb +*.onnx +*.engine +*.mlmodel +*.torchscript +*.tflite +*.h5 +*_saved_model/ +*_web_model/ +*_openvino_model/ +*_paddle_model/ +darknet53.conv.74 +yolov3-tiny.conv.15 + +# GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +/wandb/ +.installed.cfg +*.egg + + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv* +venv*/ +ENV*/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + + +# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- + +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon +Icon? + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + + +# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea/* +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/dictionaries +.html # Bokeh Plots +.pg # TensorFlow Frozen Graphs +.avi # videos + +# Sensitive or high-churn files: +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml + +# Gradle: +.idea/**/gradle.xml +.idea/**/libraries + +# CMake +cmake-build-debug/ +cmake-build-release/ + +# Mongo Explorer plugin: +.idea/**/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties diff --git a/src/train_utils/train_models/models/yolov5/.pre-commit-config.yaml b/src/train_utils/train_models/models/yolov5/.pre-commit-config.yaml new file mode 100644 index 0000000..c516237 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/.pre-commit-config.yaml @@ -0,0 +1,69 @@ +# Ultralytics YOLO 🚀, GPL-3.0 license +# Pre-commit hooks. For more information see https://github.com/pre-commit/pre-commit-hooks/blob/main/README.md + +exclude: 'docs/' +# Define bot property if installed via https://github.com/marketplace/pre-commit-ci +ci: + autofix_prs: true + autoupdate_commit_msg: '[pre-commit.ci] pre-commit suggestions' + autoupdate_schedule: monthly + # submodules: true + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: end-of-file-fixer + - id: trailing-whitespace + - id: check-case-conflict + - id: check-yaml + - id: check-docstring-first + - id: double-quote-string-fixer + - id: detect-private-key + + - repo: https://github.com/asottile/pyupgrade + rev: v3.3.1 + hooks: + - id: pyupgrade + name: Upgrade code + args: [--py37-plus] + + - repo: https://github.com/PyCQA/isort + rev: 5.12.0 + hooks: + - id: isort + name: Sort imports + + - repo: https://github.com/google/yapf + rev: v0.32.0 + hooks: + - id: yapf + name: YAPF formatting + + - repo: https://github.com/executablebooks/mdformat + rev: 0.7.16 + hooks: + - id: mdformat + name: MD formatting + additional_dependencies: + - mdformat-gfm + - mdformat-black + # exclude: "README.md|README.zh-CN.md|CONTRIBUTING.md" + + - repo: https://github.com/PyCQA/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + name: PEP8 + + - repo: https://github.com/codespell-project/codespell + rev: v2.2.2 + hooks: + - id: codespell + args: + - --ignore-words-list=crate,nd,strack,dota + + #- repo: https://github.com/asottile/yesqa + # rev: v1.4.0 + # hooks: + # - id: yesqa diff --git a/src/train_utils/train_models/models/yolov5/CITATION.cff b/src/train_utils/train_models/models/yolov5/CITATION.cff new file mode 100644 index 0000000..8e2cf11 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/CITATION.cff @@ -0,0 +1,14 @@ +cff-version: 1.2.0 +preferred-citation: + type: software + message: If you use YOLOv5, please cite it as below. + authors: + - family-names: Jocher + given-names: Glenn + orcid: "https://orcid.org/0000-0001-5950-6979" + title: "YOLOv5 by Ultralytics" + version: 7.0 + doi: 10.5281/zenodo.3908559 + date-released: 2020-5-29 + license: GPL-3.0 + url: "https://github.com/ultralytics/yolov5" diff --git a/src/train_utils/train_models/models/yolov5/CONTRIBUTING.md b/src/train_utils/train_models/models/yolov5/CONTRIBUTING.md new file mode 100644 index 0000000..71857fa --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/CONTRIBUTING.md @@ -0,0 +1,93 @@ +## Contributing to YOLOv5 🚀 + +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing a new feature +- Becoming a maintainer + +YOLOv5 works so well due to our combined community effort, and for every small improvement you contribute you will be +helping push the frontiers of what's possible in AI 😃! + +## Submitting a Pull Request (PR) 🛠️ + +Submitting a PR is easy! This example shows how to submit a PR for updating `requirements.txt` in 4 steps: + +### 1. Select File to Update + +Select `requirements.txt` to update by clicking on it in GitHub. + +

PR_step1

+ +### 2. Click 'Edit this file' + +The button is in the top-right corner. + +

PR_step2

+ +### 3. Make Changes + +Change the `matplotlib` version from `3.2.2` to `3.3`. + +

PR_step3

+ +### 4. Preview Changes and Submit PR + +Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** +for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose +changes** button. All done, your PR is now submitted to YOLOv5 for review and approval 😃! + +

PR_step4

+ +### PR recommendations + +To allow your work to be integrated as seamlessly as possible, we advise you to: + +- ✅ Verify your PR is **up-to-date** with `ultralytics/yolov5` `master` branch. If your PR is behind you can update + your code by clicking the 'Update branch' button or by running `git pull` and `git merge master` locally. + +

Screenshot 2022-08-29 at 22 47 15

+ +- ✅ Verify all YOLOv5 Continuous Integration (CI) **checks are passing**. + +

Screenshot 2022-08-29 at 22 47 03

+ +- ✅ Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase + but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ — Bruce Lee + +## Submitting a Bug Report 🐛 + +If you spot a problem with YOLOv5 please submit a Bug Report! + +For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few +short guidelines below to help users provide what we need to get started. + +When asking a question, people will be better able to provide help if you provide **code** that they can easily +understand and use to **reproduce** the problem. This is referred to by community members as creating +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces +the problem should be: + +- ✅ **Minimal** – Use as little code as possible that still produces the same problem +- ✅ **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself +- ✅ **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem + +In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code +should be: + +- ✅ **Current** – Verify that your code is up-to-date with the current + GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new + copy to ensure your problem has not already been resolved by previous commits. +- ✅ **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this + repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. + +If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the 🐛 +**Bug Report** [template](https://github.com/ultralytics/yolov5/issues/new/choose) and provide +a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example) to help us better +understand and diagnose your problem. + +## License + +By contributing, you agree that your contributions will be licensed under +the [GPL-3.0 license](https://choosealicense.com/licenses/gpl-3.0/) diff --git a/src/train_utils/train_models/models/yolov5/LICENSE b/src/train_utils/train_models/models/yolov5/LICENSE new file mode 100644 index 0000000..92b370f --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/LICENSE @@ -0,0 +1,674 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/train_utils/train_models/models/yolov5/README.md b/src/train_utils/train_models/models/yolov5/README.md new file mode 100644 index 0000000..cb15407 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/README.md @@ -0,0 +1,487 @@ +
+

+ + +

+ +[English](README.md) | [简体中文](README.zh-CN.md) +
+ +
+ YOLOv5 CI + YOLOv5 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+
+ +YOLOv5 🚀 is the world's most loved vision AI, representing Ultralytics open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development. + +To request an Enterprise License please complete the form at Ultralytics Licensing. + +
+ + + + + + + + + + + + + + + + + +
+ +
+
+ +##
YOLOv8 🚀 NEW
+ +We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. + +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: + +```commandline +pip install ultralytics +``` + +
+ + +
+ +##
Documentation
+ +See the [YOLOv5 Docs](https://docs.ultralytics.com/yolov5) for full documentation on training, testing and deployment. See below for quickstart examples. + +
+Install + +Clone repo and install [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) in a +[**Python>=3.7.0**](https://www.python.org/) environment, including +[**PyTorch>=1.7**](https://pytorch.org/get-started/locally/). + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +
+ +
+Inference + +YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) inference. [Models](https://github.com/ultralytics/yolov5/tree/master/models) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). + +```python +import torch + +# Model +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom + +# Images +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list + +# Inference +results = model(img) + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +
+ +
+Inference with detect.py + +`detect.py` runs inference on a variety of sources, downloading [models](https://github.com/ultralytics/yolov5/tree/master/models) automatically from +the latest YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) and saving results to `runs/detect`. + +```bash +python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +``` + +
+ +
+Training + +The commands below reproduce YOLOv5 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) +results. [Models](https://github.com/ultralytics/yolov5/tree/master/models) +and [datasets](https://github.com/ultralytics/yolov5/tree/master/data) download automatically from the latest +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases). Training times for YOLOv5n/s/m/l/x are +1/2/4/6/8 days on a V100 GPU ([Multi-GPU](https://github.com/ultralytics/yolov5/issues/475) times faster). Use the +largest `--batch-size` possible, or pass `--batch-size -1` for +YOLOv5 [AutoBatch](https://github.com/ultralytics/yolov5/pull/5092). Batch sizes shown for V100-16GB. + +```bash +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + + +
+ +
+Tutorials + +- [Train Custom Data](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 RECOMMENDED +- [Tips for Best Training Results](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ RECOMMENDED +- [Multi-GPU Training](https://docs.ultralytics.com/yolov5/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 NEW +- [TFLite, ONNX, CoreML, TensorRT Export](https://docs.ultralytics.com/yolov5/export) 🚀 +- [NVIDIA Jetson platform Deployment](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 NEW +- [Test-Time Augmentation (TTA)](https://docs.ultralytics.com/yolov5/tta) +- [Model Ensembling](https://docs.ultralytics.com/yolov5/ensemble) +- [Model Pruning/Sparsity](https://docs.ultralytics.com/yolov5/pruning_sparsity) +- [Hyperparameter Evolution](https://docs.ultralytics.com/yolov5/hyp_evolution) +- [Transfer Learning with Frozen Layers](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) +- [Architecture Summary](https://docs.ultralytics.com/yolov5/architecture) 🌟 NEW +- [Roboflow for Datasets, Labeling, and Active Learning](https://docs.ultralytics.com/yolov5/roboflow) +- [ClearML Logging](https://docs.ultralytics.com/yolov5/clearml) 🌟 NEW +- [YOLOv5 with Neural Magic's Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 NEW +- [Comet Logging](https://docs.ultralytics.com/yolov5/comet) 🌟 NEW + +
+ +##
Integrations
+ +
+ + +
+
+ +
+ + + + + + + + + + + +
+ +| Roboflow | ClearML ⭐ NEW | Comet ⭐ NEW | Neural Magic ⭐ NEW | +| :--------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: | +| Label and export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | Automatically track, visualize and even remotely train YOLOv5 using [ClearML](https://cutt.ly/yolov5-readme-clearml) (open-source!) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet2) lets you save YOLOv5 models, resume training, and interactively visualise and debug predictions | Run YOLOv5 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) | + +##
Ultralytics HUB
+ +Experience seamless AI with [Ultralytics HUB](https://bit.ly/ultralytics_hub) ⭐, the all-in-one solution for data visualization, YOLOv5 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now! + + + + +##
Why YOLOv5
+ +YOLOv5 has been designed to be super easy to get started and simple to learn. We prioritize real-world results. + +

+
+ YOLOv5-P5 640 Figure + +

+
+
+ Figure Notes + +- **COCO AP val** denotes mAP@0.5:0.95 metric measured on the 5000-image [COCO val2017](http://cocodataset.org) dataset over various inference sizes from 256 to 1536. +- **GPU Speed** measures average inference time per image on [COCO val2017](http://cocodataset.org) dataset using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100 instance at batch-size 32. +- **EfficientDet** data from [google/automl](https://github.com/google/automl) at batch size 8. +- **Reproduce** by `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + +
+ +### Pretrained Checkpoints + +| Model | size
(pixels) | mAPval
50-95 | mAPval
50 | Speed
CPU b1
(ms) | Speed
V100 b1
(ms) | Speed
V100 b32
(ms) | params
(M) | FLOPs
@640 (B) | +| ----------------------------------------------------------------------------------------------- | --------------------- | -------------------- | ----------------- | ---------------------------- | ----------------------------- | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+ [TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | + +
+ Table Notes + +- All checkpoints are trained to 300 epochs with default settings. Nano and Small models use [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) hyps, all others use [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml). +- **mAPval** values are for single-model single-scale on [COCO val2017](http://cocodataset.org) dataset.
Reproduce by `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **Speed** averaged over COCO val images using a [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) instance. NMS times (~1 ms/img) not included.
Reproduce by `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [Test Time Augmentation](https://github.com/ultralytics/yolov5/issues/303) includes reflection and scale augmentations.
Reproduce by `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` + +
+ +##
Segmentation
+ +Our new YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) instance segmentation models are the fastest and most accurate in the world, beating all current [SOTA benchmarks](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco). We've made them super simple to train, validate and deploy. See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v7.0) and visit our [YOLOv5 Segmentation Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) for quickstart tutorials. + +
+ Segmentation Checkpoints + +
+ + +
+ +We trained YOLOv5 segmentations models on COCO for 300 epochs at image size 640 using A100 GPUs. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) notebooks for easy reproducibility. + +| Model | size
(pixels) | mAPbox
50-95 | mAPmask
50-95 | Train time
300 epochs
A100 (hours) | Speed
ONNX CPU
(ms) | Speed
TRT A100
(ms) | params
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------------- | -------------------- | --------------------- | --------------------------------------------- | ------------------------------ | ------------------------------ | ------------------ | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- All checkpoints are trained to 300 epochs with SGD optimizer with `lr0=0.01` and `weight_decay=5e-5` at image size 640 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **Accuracy** values are for single-model single-scale on COCO dataset.
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **Speed** averaged over 100 inference images using a [Colab Pro](https://colab.research.google.com/signup) A100 High-RAM instance. Values indicate inference speed only (NMS adds about 1ms per image).
Reproduce by `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ Segmentation Usage Examples  Open In Colab + +### Train + +YOLOv5 segmentation training supports auto-download COCO128-seg segmentation dataset with `--data coco128-seg.yaml` argument and manual download of COCO-segments dataset with `bash data/scripts/get_coco.sh --train --val --segments` and then `python train.py --data coco.yaml`. + +```bash +# Single-GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### Val + +Validate YOLOv5s-seg mask mAP on COCO dataset: + +```bash +bash data/scripts/get_coco.sh --val --segments # download COCO val segments split (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate +``` + +### Predict + +Use pretrained YOLOv5m-seg.pt to predict bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # load from PyTorch Hub (WARNING: inference not yet supported) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### Export + +Export YOLOv5s-seg model to ONNX and TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ +##
Classification
+ +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) brings support for classification model training, validation and deployment! See full details in our [Release Notes](https://github.com/ultralytics/yolov5/releases/v6.2) and visit our [YOLOv5 Classification Colab Notebook](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) for quickstart tutorials. + +
+ Classification Checkpoints + +
+ +We trained YOLOv5-cls classification models on ImageNet for 90 epochs using a 4xA100 instance, and we trained ResNet and EfficientNet models alongside with the same default training settings to compare. We exported all models to ONNX FP32 for CPU speed tests and to TensorRT FP16 for GPU speed tests. We ran all speed tests on Google [Colab Pro](https://colab.research.google.com/signup) for easy reproducibility. + +| Model | size
(pixels) | acc
top1 | acc
top5 | Training
90 epochs
4xA100 (hours) | Speed
ONNX CPU
(ms) | Speed
TensorRT V100
(ms) | params
(M) | FLOPs
@224 (B) | +| -------------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | -------------------------------------------- | ------------------------------ | ----------------------------------- | ------------------ | ---------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [ResNet34](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | + +
+ Table Notes (click to expand) + +- All checkpoints are trained to 90 epochs with SGD optimizer with `lr0=0.001` and `weight_decay=5e-5` at image size 224 and all default settings.
Runs logged to https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **Accuracy** values are for single-model single-scale on [ImageNet-1k](https://www.image-net.org/index.php) dataset.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224` +- **Speed** averaged over 100 inference images using a Google [Colab Pro](https://colab.research.google.com/signup) V100 High-RAM instance.
Reproduce by `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **Export** to ONNX at FP32 and TensorRT at FP16 done with `export.py`.
Reproduce by `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` + +
+
+ +
+ Classification Usage Examples  Open In Colab + +### Train + +YOLOv5 classification training supports auto-download of MNIST, Fashion-MNIST, CIFAR10, CIFAR100, Imagenette, Imagewoof, and ImageNet datasets with the `--data` argument. To start training on MNIST for example use `--data mnist`. + +```bash +# Single-GPU +python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 + +# Multi-GPU DDP +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +``` + +### Val + +Validate YOLOv5m-cls accuracy on ImageNet-1k dataset: + +```bash +bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) +python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate +``` + +### Predict + +Use pretrained YOLOv5s-cls.pt to predict bus.jpg: + +```bash +python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub +``` + +### Export + +Export a group of trained YOLOv5s-cls, ResNet and EfficientNet models to ONNX and TensorRT: + +```bash +python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 +``` + +
+ +##
Environments
+ +Get started in seconds with our verified environments. Click each icon below for details. + +
+ + + + + + + + + + + + + + + + + +
+ +##
Contribute
+ +We love your input! We want to make contributing to YOLOv5 as easy and transparent as possible. Please see our [Contributing Guide](CONTRIBUTING.md) to get started, and fill out the [YOLOv5 Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experiences. Thank you to all our contributors! + + + + + + +##
License
+ +YOLOv5 is available under two different licenses: + +- **GPL-3.0 License**: See [LICENSE](https://github.com/ultralytics/yolov5/blob/master/LICENSE) file for details. +- **Enterprise License**: Provides greater flexibility for commercial product development without the open-source requirements of GPL-3.0. Typical use cases are embedding Ultralytics software and AI models in commercial products and applications. Request an Enterprise License at [Ultralytics Licensing](https://ultralytics.com/license). + +##
Contact
+ +For YOLOv5 bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/yolov5/issues) or the [Ultralytics Community Forum](https://community.ultralytics.com/). + +
+
+ + + + + + + + + + + + + + + + + +
+ +[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/src/train_utils/train_models/models/yolov5/README.zh-CN.md b/src/train_utils/train_models/models/yolov5/README.zh-CN.md new file mode 100644 index 0000000..9a81959 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/README.zh-CN.md @@ -0,0 +1,482 @@ +
+

+ + +

+ +[英文](README.md)|[简体中文](README.zh-CN.md)
+ +
+ YOLOv5 CI + YOLOv5 Citation + Docker Pulls +
+ Run on Gradient + Open In Colab + Open In Kaggle +
+
+ +YOLOv5 🚀 是世界上最受欢迎的视觉 AI,代表 Ultralytics 对未来视觉 AI 方法的开源研究,结合在数千小时的研究和开发中积累的经验教训和最佳实践。 + +如果要申请企业许可证,请填写表格Ultralytics 许可. + +
+ + + + + + + + + + + + + + + + + +
+
+ +##
YOLOv8 🚀 NEW
+ +We are thrilled to announce the launch of Ultralytics YOLOv8 🚀, our NEW cutting-edge, state-of-the-art (SOTA) model +released at **[https://github.com/ultralytics/ultralytics](https://github.com/ultralytics/ultralytics)**. +YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of +object detection, image segmentation and image classification tasks. + +See the [YOLOv8 Docs](https://docs.ultralytics.com) for details and get started with: + +```commandline +pip install ultralytics +``` + +
+ + +
+ +##
文档
+ +有关训练、测试和部署的完整文档见[YOLOv5 文档](https://docs.ultralytics.com)。请参阅下面的快速入门示例。 + +
+安装 + +克隆 repo,并要求在 [**Python>=3.7.0**](https://www.python.org/) 环境中安装 [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) ,且要求 [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/) 。 + +```bash +git clone https://github.com/ultralytics/yolov5 # clone +cd yolov5 +pip install -r requirements.txt # install +``` + +
+ +
+推理 + +使用 YOLOv5 [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36) 推理。最新 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 将自动的从 +YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 + +```python +import torch + +# Model +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5n - yolov5x6, custom + +# Images +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list + +# Inference +results = model(img) + +# Results +results.print() # or .show(), .save(), .crop(), .pandas(), etc. +``` + +
+ +
+使用 detect.py 推理 + +`detect.py` 在各种来源上运行推理, [模型](https://github.com/ultralytics/yolov5/tree/master/models) 自动从 +最新的YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载,并将结果保存到 `runs/detect` 。 + +```bash +python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +``` + +
+ +
+训练 + +下面的命令重现 YOLOv5 在 [COCO](https://github.com/ultralytics/yolov5/blob/master/data/scripts/get_coco.sh) 数据集上的结果。 +最新的 [模型](https://github.com/ultralytics/yolov5/tree/master/models) 和 [数据集](https://github.com/ultralytics/yolov5/tree/master/data) +将自动的从 YOLOv5 [release](https://github.com/ultralytics/yolov5/releases) 中下载。 +YOLOv5n/s/m/l/x 在 V100 GPU 的训练时间为 1/2/4/6/8 天( [多GPU](https://github.com/ultralytics/yolov5/issues/475) 训练速度更快)。 +尽可能使用更大的 `--batch-size` ,或通过 `--batch-size -1` 实现 +YOLOv5 [自动批处理](https://github.com/ultralytics/yolov5/pull/5092) 。下方显示的 batchsize 适用于 V100-16GB。 + +```bash +python train.py --data coco.yaml --epochs 300 --weights '' --cfg yolov5n.yaml --batch-size 128 + yolov5s 64 + yolov5m 40 + yolov5l 24 + yolov5x 16 +``` + + + +
+ +
+教程 + +- [训练自定义数据](https://docs.ultralytics.com/yolov5/train_custom_data) 🚀 推荐 +- [获得最佳训练结果的技巧](https://docs.ultralytics.com/yolov5/tips_for_best_training_results) ☘️ 推荐 +- [多 GPU 训练](https://docs.ultralytics.com/yolov5/multi_gpu_training) +- [PyTorch Hub](https://docs.ultralytics.com/yolov5/pytorch_hub) 🌟 新 +- [TFLite, ONNX, CoreML, TensorRT 导出](https://docs.ultralytics.com/yolov5/export) 🚀 +- [NVIDIA Jetson 平台部署](https://docs.ultralytics.com/yolov5/jetson_nano) 🌟 新 +- [测试时增强(TTA)](https://docs.ultralytics.com/yolov5/tta) +- [模型集成](https://docs.ultralytics.com/yolov5/ensemble) +- [模型剪枝/稀疏性](https://docs.ultralytics.com/yolov5/pruning_sparsity) +- [超参数进化](https://docs.ultralytics.com/yolov5/hyp_evolution) +- [冻结层的迁移学习](https://docs.ultralytics.com/yolov5/transfer_learn_frozen) +- [架构概述](https://docs.ultralytics.com/yolov5/architecture) 🌟 新 +- [Roboflow 用于数据集、标签和主动学习](https://docs.ultralytics.com/yolov5/roboflow) +- [ClearML 日志记录](https://docs.ultralytics.com/yolov5/clearml) 🌟 新 +- [YOLOv5 与 Neural Magic 的 Deepsparse](https://docs.ultralytics.com/yolov5/neural_magic) 🌟 新 +- [Comet 日志记录](https://docs.ultralytics.com/yolov5/comet) 🌟 新 + +
+ +##
模块集成
+ +
+ + +
+
+ +
+ + + + + + + + + + + +
+ +| Roboflow | ClearML ⭐ 新 | Comet ⭐ 新 | Neural Magic ⭐ 新 | +| :--------------------------------------------------------------------------------: | :-------------------------------------------------------------------------: | :--------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | +| 将您的自定义数据集进行标注并直接导出到 YOLOv5 以进行训练 [Roboflow](https://roboflow.com/?ref=ultralytics) | 自动跟踪、可视化甚至远程训练 YOLOv5 [ClearML](https://cutt.ly/yolov5-readme-clearml)(开源!) | 永远免费,[Comet](https://bit.ly/yolov5-readme-comet2)可让您保存 YOLOv5 模型、恢复训练以及交互式可视化和调试预测 | 使用 [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic),运行 YOLOv5 推理的速度最高可提高6倍 | + +##
Ultralytics HUB
+ +[Ultralytics HUB](https://bit.ly/ultralytics_hub) 是我们的⭐**新的**用于可视化数据集、训练 YOLOv5 🚀 模型并以无缝体验部署到现实世界的无代码解决方案。现在开始 **免费** 使用他! + + + + +##
为什么选择 YOLOv5
+ +YOLOv5 超级容易上手,简单易学。我们优先考虑现实世界的结果。 + +

+
+ YOLOv5-P5 640 图 + +

+
+
+ 图表笔记 + +- **COCO AP val** 表示 mAP@0.5:0.95 指标,在 [COCO val2017](http://cocodataset.org) 数据集的 5000 张图像上测得, 图像包含 256 到 1536 各种推理大小。 +- **显卡推理速度** 为在 [COCO val2017](http://cocodataset.org) 数据集上的平均推理时间,使用 [AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/) V100实例,batchsize 为 32 。 +- **EfficientDet** 数据来自 [google/automl](https://github.com/google/automl) , batchsize 为32。 +- **复现命令** 为 `python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n6.pt yolov5s6.pt yolov5m6.pt yolov5l6.pt yolov5x6.pt` + +
+ +### 预训练模型 + +| 模型 | 尺寸
(像素) | mAPval
50-95 | mAPval
50 | 推理速度
CPU b1
(ms) | 推理速度
V100 b1
(ms) | 速度
V100 b32
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ---------------------------------------------------------------------------------------------- | --------------- | -------------------- | ----------------- | --------------------------- | ---------------------------- | --------------------------- | --------------- | ---------------------- | +| [YOLOv5n](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n.pt) | 640 | 28.0 | 45.7 | **45** | **6.3** | **0.6** | **1.9** | **4.5** | +| [YOLOv5s](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt) | 640 | 37.4 | 56.8 | 98 | 6.4 | 0.9 | 7.2 | 16.5 | +| [YOLOv5m](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m.pt) | 640 | 45.4 | 64.1 | 224 | 8.2 | 1.7 | 21.2 | 49.0 | +| [YOLOv5l](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l.pt) | 640 | 49.0 | 67.3 | 430 | 10.1 | 2.7 | 46.5 | 109.1 | +| [YOLOv5x](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x.pt) | 640 | 50.7 | 68.9 | 766 | 12.1 | 4.8 | 86.7 | 205.7 | +| | | | | | | | | | +| [YOLOv5n6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n6.pt) | 1280 | 36.0 | 54.4 | 153 | 8.1 | 2.1 | 3.2 | 4.6 | +| [YOLOv5s6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s6.pt) | 1280 | 44.8 | 63.7 | 385 | 8.2 | 3.6 | 12.6 | 16.8 | +| [YOLOv5m6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m6.pt) | 1280 | 51.3 | 69.3 | 887 | 11.1 | 6.8 | 35.7 | 50.0 | +| [YOLOv5l6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l6.pt) | 1280 | 53.7 | 71.3 | 1784 | 15.8 | 10.5 | 76.8 | 111.4 | +| [YOLOv5x6](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x6.pt)
+[TTA] | 1280
1536 | 55.0
**55.8** | 72.7
**72.7** | 3136
- | 26.2
- | 19.4
- | 140.7
- | 209.8
- | + +
+ 笔记 + +- 所有模型都使用默认配置,训练 300 epochs。n和s模型使用 [hyp.scratch-low.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-low.yaml) ,其他模型都使用 [hyp.scratch-high.yaml](https://github.com/ultralytics/yolov5/blob/master/data/hyps/hyp.scratch-high.yaml) 。 +- \*\*mAPval\*\*在单模型单尺度上计算,数据集使用 [COCO val2017](http://cocodataset.org) 。
复现命令 `python val.py --data coco.yaml --img 640 --conf 0.001 --iou 0.65` +- **推理速度**在 COCO val 图像总体时间上进行平均得到,测试环境使用[AWS p3.2xlarge](https://aws.amazon.com/ec2/instance-types/p3/)实例。 NMS 时间 (大约 1 ms/img) 不包括在内。
复现命令 `python val.py --data coco.yaml --img 640 --task speed --batch 1` +- **TTA** [测试时数据增强](https://github.com/ultralytics/yolov5/issues/303) 包括反射和尺度变换。
复现命令 `python val.py --data coco.yaml --img 1536 --iou 0.7 --augment` + +
+ +##
实例分割模型 ⭐ 新
+ +我们新的 YOLOv5 [release v7.0](https://github.com/ultralytics/yolov5/releases/v7.0) 实例分割模型是世界上最快和最准确的模型,击败所有当前 [SOTA 基准](https://paperswithcode.com/sota/real-time-instance-segmentation-on-mscoco)。我们使它非常易于训练、验证和部署。更多细节请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v7.0) 或访问我们的 [YOLOv5 分割 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/segment/tutorial.ipynb) 以快速入门。 + +
+ 实例分割模型列表 + +
+ +
+ + +
+ +我们使用 A100 GPU 在 COCO 上以 640 图像大小训练了 300 epochs 得到 YOLOv5 分割模型。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于再现,我们在 Google [Colab Pro](https://colab.research.google.com/signup) 上进行了所有速度测试。 + +| 模型 | 尺寸
(像素) | mAPbox
50-95 | mAPmask
50-95 | 训练时长
300 epochs
A100 GPU(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TRT A100
(ms) | 参数量
(M) | FLOPs
@640 (B) | +| ------------------------------------------------------------------------------------------ | --------------- | -------------------- | --------------------- | --------------------------------------- | ----------------------------- | ----------------------------- | --------------- | ---------------------- | +| [YOLOv5n-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-seg.pt) | 640 | 27.6 | 23.4 | 80:17 | **62.7** | **1.2** | **2.0** | **7.1** | +| [YOLOv5s-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt) | 640 | 37.6 | 31.7 | 88:16 | 173.3 | 1.4 | 7.6 | 26.4 | +| [YOLOv5m-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-seg.pt) | 640 | 45.0 | 37.1 | 108:36 | 427.0 | 2.2 | 22.0 | 70.8 | +| [YOLOv5l-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-seg.pt) | 640 | 49.0 | 39.9 | 66:43 (2x) | 857.4 | 2.9 | 47.9 | 147.7 | +| [YOLOv5x-seg](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-seg.pt) | 640 | **50.7** | **41.4** | 62:56 (3x) | 1579.2 | 4.5 | 88.8 | 265.7 | + +- 所有模型使用 SGD 优化器训练, 都使用 `lr0=0.01` 和 `weight_decay=5e-5` 参数, 图像大小为 640 。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5_v70_official +- **准确性**结果都在 COCO 数据集上,使用单模型单尺度测试得到。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt` +- **推理速度**是使用 100 张图像推理时间进行平均得到,测试环境使用 [Colab Pro](https://colab.research.google.com/signup) 上 A100 高 RAM 实例。结果仅表示推理速度(NMS 每张图像增加约 1 毫秒)。
复现命令 `python segment/val.py --data coco.yaml --weights yolov5s-seg.pt --batch 1` +- **模型转换**到 FP32 的 ONNX 和 FP16 的 TensorRT 脚本为 `export.py`.
运行命令 `python export.py --weights yolov5s-seg.pt --include engine --device 0 --half` + +
+ +
+ 分割模型使用示例  Open In Colab + +### 训练 + +YOLOv5分割训练支持自动下载 COCO128-seg 分割数据集,用户仅需在启动指令中包含 `--data coco128-seg.yaml` 参数。 若要手动下载,使用命令 `bash data/scripts/get_coco.sh --train --val --segments`, 在下载完毕后,使用命令 `python train.py --data coco.yaml` 开启训练。 + +```bash +# 单 GPU +python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 + +# 多 GPU, DDP 模式 +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 +``` + +### 验证 + +在 COCO 数据集上验证 YOLOv5s-seg mask mAP: + +```bash +bash data/scripts/get_coco.sh --val --segments # 下载 COCO val segments 数据集 (780MB, 5000 images) +python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # 验证 +``` + +### 预测 + +使用预训练的 YOLOv5m-seg.pt 来预测 bus.jpg: + +```bash +python segment/predict.py --weights yolov5m-seg.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5m-seg.pt" +) # 从load from PyTorch Hub 加载模型 (WARNING: 推理暂未支持) +``` + +| ![zidane](https://user-images.githubusercontent.com/26833433/203113421-decef4c4-183d-4a0a-a6c2-6435b33bc5d3.jpg) | ![bus](https://user-images.githubusercontent.com/26833433/203113416-11fe0025-69f7-4874-a0a6-65d0bfe2999a.jpg) | +| ---------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------- | + +### 模型导出 + +将 YOLOv5s-seg 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-seg.pt --include onnx engine --img 640 --device 0 +``` + +
+ +##
分类网络 ⭐ 新
+ +YOLOv5 [release v6.2](https://github.com/ultralytics/yolov5/releases) 带来对分类模型训练、验证和部署的支持!详情请查看 [发行说明](https://github.com/ultralytics/yolov5/releases/v6.2) 或访问我们的 [YOLOv5 分类 Colab 笔记本](https://github.com/ultralytics/yolov5/blob/master/classify/tutorial.ipynb) 以快速入门。 + +
+ 分类网络模型 + +
+ +我们使用 4xA100 实例在 ImageNet 上训练了 90 个 epochs 得到 YOLOv5-cls 分类模型,我们训练了 ResNet 和 EfficientNet 模型以及相同的默认训练设置以进行比较。我们将所有模型导出到 ONNX FP32 以进行 CPU 速度测试,并导出到 TensorRT FP16 以进行 GPU 速度测试。为了便于重现,我们在 Google 上进行了所有速度测试 [Colab Pro](https://colab.research.google.com/signup) 。 + +| 模型 | 尺寸
(像素) | acc
top1 | acc
top5 | 训练时长
90 epochs
4xA100(小时) | 推理速度
ONNX CPU
(ms) | 推理速度
TensorRT V100
(ms) | 参数
(M) | FLOPs
@640 (B) | +| -------------------------------------------------------------------------------------------------- | --------------- | ---------------- | ---------------- | ------------------------------------ | ----------------------------- | ---------------------------------- | -------------- | ---------------------- | +| [YOLOv5n-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5n-cls.pt) | 224 | 64.6 | 85.4 | 7:59 | **3.3** | **0.5** | **2.5** | **0.5** | +| [YOLOv5s-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt) | 224 | 71.5 | 90.2 | 8:09 | 6.6 | 0.6 | 5.4 | 1.4 | +| [YOLOv5m-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5m-cls.pt) | 224 | 75.9 | 92.9 | 10:06 | 15.5 | 0.9 | 12.9 | 3.9 | +| [YOLOv5l-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5l-cls.pt) | 224 | 78.0 | 94.0 | 11:56 | 26.9 | 1.4 | 26.5 | 8.5 | +| [YOLOv5x-cls](https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5x-cls.pt) | 224 | **79.0** | **94.4** | 15:04 | 54.3 | 1.8 | 48.1 | 15.9 | +| | | | | | | | | | +| [ResNet18](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet18.pt) | 224 | 70.3 | 89.5 | **6:47** | 11.2 | 0.5 | 11.7 | 3.7 | +| [Resnetzch](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet34.pt) | 224 | 73.9 | 91.8 | 8:33 | 20.6 | 0.9 | 21.8 | 7.4 | +| [ResNet50](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet50.pt) | 224 | 76.8 | 93.4 | 11:10 | 23.4 | 1.0 | 25.6 | 8.5 | +| [ResNet101](https://github.com/ultralytics/yolov5/releases/download/v7.0/resnet101.pt) | 224 | 78.5 | 94.3 | 17:10 | 42.1 | 1.9 | 44.5 | 15.9 | +| | | | | | | | | | +| [EfficientNet_b0](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b0.pt) | 224 | 75.1 | 92.4 | 13:03 | 12.5 | 1.3 | 5.3 | 1.0 | +| [EfficientNet_b1](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b1.pt) | 224 | 76.4 | 93.2 | 17:04 | 14.9 | 1.6 | 7.8 | 1.5 | +| [EfficientNet_b2](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b2.pt) | 224 | 76.6 | 93.4 | 17:10 | 15.9 | 1.6 | 9.1 | 1.7 | +| [EfficientNet_b3](https://github.com/ultralytics/yolov5/releases/download/v7.0/efficientnet_b3.pt) | 224 | 77.7 | 94.0 | 19:19 | 18.9 | 1.9 | 12.2 | 2.4 | + +
+ Table Notes (点击以展开) + +- 所有模型都使用 SGD 优化器训练 90 个 epochs,都使用 `lr0=0.001` 和 `weight_decay=5e-5` 参数, 图像大小为 224 ,且都使用默认设置。
训练 log 可以查看 https://wandb.ai/glenn-jocher/YOLOv5-Classifier-v6-2 +- **准确性**都在单模型单尺度上计算,数据集使用 [ImageNet-1k](https://www.image-net.org/index.php) 。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224` +- **推理速度**是使用 100 个推理图像进行平均得到,测试环境使用谷歌 [Colab Pro](https://colab.research.google.com/signup) V100 高 RAM 实例。
复现命令 `python classify/val.py --data ../datasets/imagenet --img 224 --batch 1` +- **模型导出**到 FP32 的 ONNX 和 FP16 的 TensorRT 使用 `export.py` 。
复现命令 `python export.py --weights yolov5s-cls.pt --include engine onnx --imgsz 224` +
+
+ +
+ 分类训练示例  Open In Colab + +### 训练 + +YOLOv5 分类训练支持自动下载 MNIST、Fashion-MNIST、CIFAR10、CIFAR100、Imagenette、Imagewoof 和 ImageNet 数据集,命令中使用 `--data` 即可。 MNIST 示例 `--data mnist` 。 + +```bash +# 单 GPU +python classify/train.py --model yolov5s-cls.pt --data cifar100 --epochs 5 --img 224 --batch 128 + +# 多 GPU, DDP 模式 +python -m torch.distributed.run --nproc_per_node 4 --master_port 1 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 +``` + +### 验证 + +在 ImageNet-1k 数据集上验证 YOLOv5m-cls 的准确性: + +```bash +bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) +python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate +``` + +### 预测 + +使用预训练的 YOLOv5s-cls.pt 来预测 bus.jpg: + +```bash +python classify/predict.py --weights yolov5s-cls.pt --data data/images/bus.jpg +``` + +```python +model = torch.hub.load( + "ultralytics/yolov5", "custom", "yolov5s-cls.pt" +) # load from PyTorch Hub +``` + +### 模型导出 + +将一组经过训练的 YOLOv5s-cls、ResNet 和 EfficientNet 模型导出到 ONNX 和 TensorRT: + +```bash +python export.py --weights yolov5s-cls.pt resnet50.pt efficientnet_b0.pt --include onnx engine --img 224 +``` + +
+ +##
环境
+ +使用下面我们经过验证的环境,在几秒钟内开始使用 YOLOv5 。单击下面的图标了解详细信息。 + +
+ + + + + + + + + + + + + + + + + +
+ +##
贡献
+ +我们喜欢您的意见或建议!我们希望尽可能简单和透明地为 YOLOv5 做出贡献。请看我们的 [投稿指南](CONTRIBUTING.md),并填写 [YOLOv5调查](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) 向我们发送您的体验反馈。感谢我们所有的贡献者! + + + + + + +##
License
+ +YOLOv5 在两种不同的 License 下可用: + +- **GPL-3.0 License**: 查看 [License](https://github.com/ultralytics/yolov5/blob/master/LICENSE) 文件的详细信息。 +- **企业License**:在没有 GPL-3.0 开源要求的情况下为商业产品开发提供更大的灵活性。典型用例是将 Ultralytics 软件和 AI 模型嵌入到商业产品和应用程序中。在以下位置申请企业许可证 [Ultralytics 许可](https://ultralytics.com/license) 。 + +##
联系我们
+ +请访问 [GitHub Issues](https://github.com/ultralytics/yolov5/issues) 或 [Ultralytics Community Forum](https://community.ultralytis.com) 以报告 YOLOv5 错误和请求功能。 + +
+
+ + + + + + + + + + + + + + + + + +
+ +[tta]: https://github.com/ultralytics/yolov5/issues/303 diff --git a/src/train_utils/train_models/models/yolov5/benchmarks.py b/src/train_utils/train_models/models/yolov5/benchmarks.py new file mode 100644 index 0000000..09108b8 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/benchmarks.py @@ -0,0 +1,169 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 benchmarks on all supported export formats + +Format | `export.py --include` | Model +--- | --- | --- +PyTorch | - | yolov5s.pt +TorchScript | `torchscript` | yolov5s.torchscript +ONNX | `onnx` | yolov5s.onnx +OpenVINO | `openvino` | yolov5s_openvino_model/ +TensorRT | `engine` | yolov5s.engine +CoreML | `coreml` | yolov5s.mlmodel +TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ +TensorFlow GraphDef | `pb` | yolov5s.pb +TensorFlow Lite | `tflite` | yolov5s.tflite +TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov5s_web_model/ + +Requirements: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + $ pip install -U nvidia-tensorrt --index-url https://pypi.ngc.nvidia.com # TensorRT + +Usage: + $ python benchmarks.py --weights yolov5s.pt --img 640 +""" + +import argparse +import platform +import sys +import time +from pathlib import Path + +import pandas as pd + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative + +import export +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from segment.val import run as val_seg +from utils import notebook_init +from utils.general import LOGGER, check_yaml, file_size, print_args +from utils.torch_utils import select_device +from val import run as val_det + + +def run( + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=640, # inference size (pixels) + batch_size=1, # batch size + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + test=False, # test exports only + pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure +): + y, t = [], time.time() + device = select_device(device) + model_type = type(attempt_load(weights, fuse=False)) # DetectionModel, SegmentationModel, etc. + for i, (name, f, suffix, cpu, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, CPU, GPU) + try: + assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported + assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML + if 'cpu' in device.type: + assert cpu, 'inference not supported on CPU' + if 'cuda' in device.type: + assert gpu, 'inference not supported on GPU' + + # Export + if f == '-': + w = weights # PyTorch format + else: + w = export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # all others + assert suffix in str(w), 'export failed' + + # Validate + if model_type == SegmentationModel: + result = val_seg(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) + metric = result[0][7] # (box(p, r, map50, map), mask(p, r, map50, map), *loss(box, obj, cls)) + else: # DetectionModel: + result = val_det(data, w, batch_size, imgsz, plots=False, device=device, task='speed', half=half) + metric = result[0][3] # (p, r, map50, map, *loss(box, obj, cls)) + speed = result[2][1] # times (preprocess, inference, postprocess) + y.append([name, round(file_size(w), 1), round(metric, 4), round(speed, 2)]) # MB, mAP, t_inference + except Exception as e: + if hard_fail: + assert type(e) is AssertionError, f'Benchmark --hard-fail for {name}: {e}' + LOGGER.warning(f'WARNING ⚠️ Benchmark failure for {name}: {e}') + y.append([name, None, None, None]) # mAP, t_inference + if pt_only and i == 0: + break # break after PyTorch + + # Print results + LOGGER.info('\n') + parse_opt() + notebook_init() # print system info + c = ['Format', 'Size (MB)', 'mAP50-95', 'Inference time (ms)'] if map else ['Format', 'Export', '', ''] + py = pd.DataFrame(y, columns=c) + LOGGER.info(f'\nBenchmarks complete ({time.time() - t:.2f}s)') + LOGGER.info(str(py if map else py.iloc[:, :2])) + if hard_fail and isinstance(hard_fail, str): + metrics = py['mAP50-95'].array # values to compare to floor + floor = eval(hard_fail) # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n + assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: mAP50-95 < floor {floor}' + return py + + +def test( + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=640, # inference size (pixels) + batch_size=1, # batch size + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + half=False, # use FP16 half-precision inference + test=False, # test exports only + pt_only=False, # test PyTorch only + hard_fail=False, # throw error on benchmark failure +): + y, t = [], time.time() + device = select_device(device) + for i, (name, f, suffix, gpu) in export.export_formats().iterrows(): # index, (name, file, suffix, gpu-capable) + try: + w = weights if f == '-' else \ + export.run(weights=weights, imgsz=[imgsz], include=[f], device=device, half=half)[-1] # weights + assert suffix in str(w), 'export failed' + y.append([name, True]) + except Exception: + y.append([name, False]) # mAP, t_inference + + # Print results + LOGGER.info('\n') + parse_opt() + notebook_init() # print system info + py = pd.DataFrame(y, columns=['Format', 'Export']) + LOGGER.info(f'\nExports complete ({time.time() - t:.2f}s)') + LOGGER.info(str(py)) + return py + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--test', action='store_true', help='test exports only') + parser.add_argument('--pt-only', action='store_true', help='test PyTorch only') + parser.add_argument('--hard-fail', nargs='?', const=True, default=False, help='Exception on error or < min metric') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + print_args(vars(opt)) + return opt + + +def main(opt): + test(**vars(opt)) if opt.test else run(**vars(opt)) + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/classify/predict.py b/src/train_utils/train_models/models/yolov5/classify/predict.py new file mode 100644 index 0000000..5f0d407 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/classify/predict.py @@ -0,0 +1,226 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 classification inference on images, videos, directories, globs, YouTube, webcam, streams, etc. + +Usage - sources: + $ python classify/predict.py --weights yolov5s-cls.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python classify/predict.py --weights yolov5s-cls.pt # PyTorch + yolov5s-cls.torchscript # TorchScript + yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-cls_openvino_model # OpenVINO + yolov5s-cls.engine # TensorRT + yolov5s-cls.mlmodel # CoreML (macOS-only) + yolov5s-cls_saved_model # TensorFlow SavedModel + yolov5s-cls.pb # TensorFlow GraphDef + yolov5s-cls.tflite # TensorFlow Lite + yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-cls_paddle_model # PaddlePaddle +""" + +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch +import torch.nn.functional as F + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.augmentations import classify_transforms +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, print_args, strip_optimizer) +from utils.plots import Annotator +from utils.torch_utils import select_device, smart_inference_mode + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(224, 224), # inference size (height, width) + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + nosave=False, # do not save images/videos + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/predict-cls', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + bs = 1 # batch_size + if webcam: + view_img = check_imshow(warn=True) + dataset = LoadStreams(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) + bs = len(dataset) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) + else: + dataset = LoadImages(source, img_size=imgsz, transforms=classify_transforms(imgsz[0]), vid_stride=vid_stride) + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: + with dt[0]: + im = torch.Tensor(im).to(model.device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + results = model(im) + + # Post-process + with dt[2]: + pred = F.softmax(results, dim=1) # probabilities + + # Process predictions + for i, prob in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + + s += '%gx%g ' % im.shape[2:] # print string + annotator = Annotator(im0, example=str(names), pil=True) + + # Print results + top5i = prob.argsort(0, descending=True)[:5].tolist() # top 5 indices + s += f"{', '.join(f'{names[j]} {prob[j]:.2f}' for j in top5i)}, " + + # Write results + text = '\n'.join(f'{prob[j]:.2f} {names[j]}' for j in top5i) + if save_img or view_img: # Add bbox to image + annotator.text((32, 32), text, txt_color=(255, 255, 255)) + if save_txt: # Write to file + with open(f'{txt_path}.txt', 'a') as f: + f.write(text + '\n') + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + cv2.waitKey(1) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f'{s}{dt[1].dt * 1E3:.1f}ms') + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[224], help='inference size h,w') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/predict-cls', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/classify/train.py b/src/train_utils/train_models/models/yolov5/classify/train.py new file mode 100644 index 0000000..ae2363c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/classify/train.py @@ -0,0 +1,333 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 classifier model on a classification dataset + +Usage - Single-GPU training: + $ python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 + +Usage - Multi-GPU DDP training: + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 2022 classify/train.py --model yolov5s-cls.pt --data imagenet --epochs 5 --img 224 --device 0,1,2,3 + +Datasets: --data mnist, fashion-mnist, cifar10, cifar100, imagenette, imagewoof, imagenet, or 'path/to/data' +YOLOv5-cls models: --model yolov5n-cls.pt, yolov5s-cls.pt, yolov5m-cls.pt, yolov5l-cls.pt, yolov5x-cls.pt +Torchvision models: --model resnet50, efficientnet_b0, etc. See https://pytorch.org/vision/stable/models.html +""" + +import argparse +import os +import subprocess +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.hub as hub +import torch.optim.lr_scheduler as lr_scheduler +import torchvision +from torch.cuda import amp +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from classify import val as validate +from models.experimental import attempt_load +from models.yolo import ClassificationModel, DetectionModel +from utils.dataloaders import create_classification_dataloader +from utils.general import (DATASETS_DIR, LOGGER, TQDM_BAR_FORMAT, WorkingDirectory, check_git_info, check_git_status, + check_requirements, colorstr, download, increment_path, init_seeds, print_args, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import imshow_cls +from utils.torch_utils import (ModelEMA, de_parallel, model_info, reshape_classifier_output, select_device, smart_DDP, + smart_optimizer, smartCrossEntropyLoss, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() + + +def train(opt, device): + init_seeds(opt.seed + 1 + RANK, deterministic=True) + save_dir, data, bs, epochs, nw, imgsz, pretrained = \ + opt.save_dir, Path(opt.data), opt.batch_size, opt.epochs, min(os.cpu_count() - 1, opt.workers), \ + opt.imgsz, str(opt.pretrained).lower() == 'true' + cuda = device.type != 'cpu' + + # Directories + wdir = save_dir / 'weights' + wdir.mkdir(parents=True, exist_ok=True) # make dir + last, best = wdir / 'last.pt', wdir / 'best.pt' + + # Save run settings + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Logger + logger = GenericLogger(opt=opt, console_logger=LOGGER) if RANK in {-1, 0} else None + + # Download Dataset + with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): + data_dir = data if data.is_dir() else (DATASETS_DIR / data) + if not data_dir.is_dir(): + LOGGER.info(f'\nDataset not found ⚠️, missing path {data_dir}, attempting download...') + t = time.time() + if str(data) == 'imagenet': + subprocess.run(['bash', str(ROOT / 'data/scripts/get_imagenet.sh')], shell=True, check=True) + else: + url = f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{data}.zip' + download(url, dir=data_dir.parent) + s = f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n" + LOGGER.info(s) + + # Dataloaders + nc = len([x for x in (data_dir / 'train').glob('*') if x.is_dir()]) # number of classes + trainloader = create_classification_dataloader(path=data_dir / 'train', + imgsz=imgsz, + batch_size=bs // WORLD_SIZE, + augment=True, + cache=opt.cache, + rank=LOCAL_RANK, + workers=nw) + + test_dir = data_dir / 'test' if (data_dir / 'test').exists() else data_dir / 'val' # data/test or data/val + if RANK in {-1, 0}: + testloader = create_classification_dataloader(path=test_dir, + imgsz=imgsz, + batch_size=bs // WORLD_SIZE * 2, + augment=False, + cache=opt.cache, + rank=-1, + workers=nw) + + # Model + with torch_distributed_zero_first(LOCAL_RANK), WorkingDirectory(ROOT): + if Path(opt.model).is_file() or opt.model.endswith('.pt'): + model = attempt_load(opt.model, device='cpu', fuse=False) + elif opt.model in torchvision.models.__dict__: # TorchVision models i.e. resnet50, efficientnet_b0 + model = torchvision.models.__dict__[opt.model](weights='IMAGENET1K_V1' if pretrained else None) + else: + m = hub.list('ultralytics/yolov5') # + hub.list('pytorch/vision') # models + raise ModuleNotFoundError(f'--model {opt.model} not found. Available models are: \n' + '\n'.join(m)) + if isinstance(model, DetectionModel): + LOGGER.warning("WARNING ⚠️ pass YOLOv5 classifier model with '-cls' suffix, i.e. '--model yolov5s-cls.pt'") + model = ClassificationModel(model=model, nc=nc, cutoff=opt.cutoff or 10) # convert to classification model + reshape_classifier_output(model, nc) # update class count + for m in model.modules(): + if not pretrained and hasattr(m, 'reset_parameters'): + m.reset_parameters() + if isinstance(m, torch.nn.Dropout) and opt.dropout is not None: + m.p = opt.dropout # set dropout + for p in model.parameters(): + p.requires_grad = True # for training + model = model.to(device) + + # Info + if RANK in {-1, 0}: + model.names = trainloader.dataset.classes # attach class names + model.transforms = testloader.dataset.torch_transforms # attach inference transforms + model_info(model) + if opt.verbose: + LOGGER.info(model) + images, labels = next(iter(trainloader)) + file = imshow_cls(images[:25], labels[:25], names=model.names, f=save_dir / 'train_images.jpg') + logger.log_images(file, name='Train Examples') + logger.log_graph(model, imgsz) # log model + + # Optimizer + optimizer = smart_optimizer(model, opt.optimizer, opt.lr0, momentum=0.9, decay=opt.decay) + + # Scheduler + lrf = 0.01 # final lr (fraction of lr0) + # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - lrf) + lrf # cosine + lf = lambda x: (1 - x / epochs) * (1 - lrf) + lrf # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # scheduler = lr_scheduler.OneCycleLR(optimizer, max_lr=lr0, total_steps=epochs, pct_start=0.1, + # final_div_factor=1 / 25 / lrf) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Train + t0 = time.time() + criterion = smartCrossEntropyLoss(label_smoothing=opt.label_smoothing) # loss function + best_fitness = 0.0 + scaler = amp.GradScaler(enabled=cuda) + val = test_dir.stem # 'val' or 'test' + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} test\n' + f'Using {nw * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting {opt.model} training on {data} dataset with {nc} classes for {epochs} epochs...\n\n' + f"{'Epoch':>10}{'GPU_mem':>10}{'train_loss':>12}{f'{val}_loss':>12}{'top1_acc':>12}{'top5_acc':>12}") + for epoch in range(epochs): # loop over the dataset multiple times + tloss, vloss, fitness = 0.0, 0.0, 0.0 # train loss, val loss, fitness + model.train() + if RANK != -1: + trainloader.sampler.set_epoch(epoch) + pbar = enumerate(trainloader) + if RANK in {-1, 0}: + pbar = tqdm(enumerate(trainloader), total=len(trainloader), bar_format=TQDM_BAR_FORMAT) + for i, (images, labels) in pbar: # progress bar + images, labels = images.to(device, non_blocking=True), labels.to(device) + + # Forward + with amp.autocast(enabled=cuda): # stability issues when enabled + loss = criterion(model(images), labels) + + # Backward + scaler.scale(loss).backward() + + # Optimize + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + + if RANK in {-1, 0}: + # Print + tloss = (tloss * i + loss.item()) / (i + 1) # update mean losses + mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) + pbar.desc = f"{f'{epoch + 1}/{epochs}':>10}{mem:>10}{tloss:>12.3g}" + ' ' * 36 + + # Test + if i == len(pbar) - 1: # last batch + top1, top5, vloss = validate.run(model=ema.ema, + dataloader=testloader, + criterion=criterion, + pbar=pbar) # test accuracy, loss + fitness = top1 # define fitness as top1 accuracy + + # Scheduler + scheduler.step() + + # Log metrics + if RANK in {-1, 0}: + # Best fitness + if fitness > best_fitness: + best_fitness = fitness + + # Log + metrics = { + 'train/loss': tloss, + f'{val}/loss': vloss, + 'metrics/accuracy_top1': top1, + 'metrics/accuracy_top5': top5, + 'lr/0': optimizer.param_groups[0]['lr']} # learning rate + logger.log_metrics(metrics, epoch) + + # Save model + final_epoch = epoch + 1 == epochs + if (not opt.nosave) or final_epoch: + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(ema.ema).half(), # deepcopy(de_parallel(model)).half(), + 'ema': None, # deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': None, # optimizer.state_dict(), + 'opt': vars(opt), + 'git': GIT_INFO, # {remote, branch, commit} if a git repo + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fitness: + torch.save(ckpt, best) + del ckpt + + # Train complete + if RANK in {-1, 0} and final_epoch: + LOGGER.info(f'\nTraining complete ({(time.time() - t0) / 3600:.3f} hours)' + f"\nResults saved to {colorstr('bold', save_dir)}" + f'\nPredict: python classify/predict.py --weights {best} --source im.jpg' + f'\nValidate: python classify/val.py --weights {best} --data {data_dir}' + f'\nExport: python export.py --weights {best} --include onnx' + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{best}')" + f'\nVisualize: https://netron.app\n') + + # Plot examples + images, labels = (x[:25] for x in next(iter(testloader))) # first 25 images and labels + pred = torch.max(ema.ema(images.to(device)), 1)[1] + file = imshow_cls(images, labels, pred, de_parallel(model).names, verbose=False, f=save_dir / 'test_images.jpg') + + # Log results + meta = {'epochs': epochs, 'top1_acc': best_fitness, 'date': datetime.now().isoformat()} + logger.log_images(file, name='Test Examples (true-predicted)', epoch=epoch) + logger.log_model(best, epochs, metadata=meta) + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--model', type=str, default='yolov5s-cls.pt', help='initial weights path') + parser.add_argument('--data', type=str, default='imagenette160', help='cifar10, cifar100, mnist, imagenet, ...') + parser.add_argument('--epochs', type=int, default=10, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=64, help='total batch size for all GPUs') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='train, val image size (pixels)') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-cls', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--pretrained', nargs='?', const=True, default=True, help='start from i.e. --pretrained False') + parser.add_argument('--optimizer', choices=['SGD', 'Adam', 'AdamW', 'RMSProp'], default='Adam', help='optimizer') + parser.add_argument('--lr0', type=float, default=0.001, help='initial learning rate') + parser.add_argument('--decay', type=float, default=5e-5, help='weight decay') + parser.add_argument('--label-smoothing', type=float, default=0.1, help='Label smoothing epsilon') + parser.add_argument('--cutoff', type=int, default=None, help='Model layer cutoff index for Classify() head') + parser.add_argument('--dropout', type=float, default=None, help='Dropout (fraction)') + parser.add_argument('--verbose', action='store_true', help='Verbose mode') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + check_git_status() + check_requirements() + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + assert opt.batch_size != -1, 'AutoBatch is coming soon for classification, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') + + # Parameters + opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok) # increment run + + # Train + train(opt, device) + + +def run(**kwargs): + # Usage: from yolov5 import classify; classify.train.run(data=mnist, imgsz=320, model='yolov5m') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/classify/tutorial.ipynb b/src/train_utils/train_models/models/yolov5/classify/tutorial.ipynb new file mode 100644 index 0000000..5872360 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/classify/tutorial.ipynb @@ -0,0 +1,1480 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "0806e375-610d-4ec0-c867-763dbb518279" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`classify/predict.py` runs YOLOv5 Classification inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict-cls`. Example inference sources are:\n", + "\n", + "```shell\n", + "python classify/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "50504ef7-aa3e-4281-a4e3-d0c7df3c0ffe" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/predict: \u001b[0mweights=['yolov5s-cls.pt'], source=data/images, data=data/coco128.yaml, imgsz=[224, 224], device=, view_img=False, save_txt=False, nosave=False, augment=False, visualize=False, update=False, project=runs/predict-cls, name=exp, exist_ok=False, half=False, dnn=False, vid_stride=1\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-cls.pt to yolov5s-cls.pt...\n", + "100% 10.5M/10.5M [00:00<00:00, 12.3MB/s]\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 224x224 minibus 0.39, police van 0.24, amphibious vehicle 0.05, recreational vehicle 0.04, trolleybus 0.03, 3.9ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 224x224 suit 0.38, bow tie 0.19, bridegroom 0.18, rugby ball 0.04, stage 0.02, 4.6ms\n", + "Speed: 0.3ms pre-process, 4.3ms inference, 1.5ms NMS per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/predict-cls/exp\u001b[0m\n" + ] + } + ], + "source": [ + "!python classify/predict.py --weights yolov5s-cls.pt --img 224 --source data/images\n", + "# display.Image(filename='runs/predict-cls/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [Imagenet](https://image-net.org/) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WQPtK1QYVaD_", + "outputId": "20fc0630-141e-4a90-ea06-342cbd7ce496" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "--2022-11-22 19:53:40-- https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar\n", + "Resolving image-net.org (image-net.org)... 171.64.68.16\n", + "Connecting to image-net.org (image-net.org)|171.64.68.16|:443... connected.\n", + "HTTP request sent, awaiting response... 200 OK\n", + "Length: 6744924160 (6.3G) [application/x-tar]\n", + "Saving to: ‘ILSVRC2012_img_val.tar’\n", + "\n", + "ILSVRC2012_img_val. 100%[===================>] 6.28G 16.1MB/s in 10m 52s \n", + "\n", + "2022-11-22 20:04:32 (9.87 MB/s) - ‘ILSVRC2012_img_val.tar’ saved [6744924160/6744924160]\n", + "\n" + ] + } + ], + "source": [ + "# Download Imagenet val (6.3G, 50000 images)\n", + "!bash data/scripts/get_imagenet.sh --val" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "41843132-98e2-4c25-d474-4cd7b246fb8e" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/val: \u001b[0mdata=../datasets/imagenet, weights=['yolov5s-cls.pt'], batch_size=128, imgsz=224, device=, workers=8, verbose=True, project=runs/val-cls, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "Model summary: 117 layers, 5447688 parameters, 0 gradients, 11.4 GFLOPs\n", + "validating: 100% 391/391 [04:57<00:00, 1.31it/s]\n", + " Class Images top1_acc top5_acc\n", + " all 50000 0.715 0.902\n", + " tench 50 0.94 0.98\n", + " goldfish 50 0.88 0.92\n", + " great white shark 50 0.78 0.96\n", + " tiger shark 50 0.68 0.96\n", + " hammerhead shark 50 0.82 0.92\n", + " electric ray 50 0.76 0.9\n", + " stingray 50 0.7 0.9\n", + " cock 50 0.78 0.92\n", + " hen 50 0.84 0.96\n", + " ostrich 50 0.98 1\n", + " brambling 50 0.9 0.96\n", + " goldfinch 50 0.92 0.98\n", + " house finch 50 0.88 0.96\n", + " junco 50 0.94 0.98\n", + " indigo bunting 50 0.86 0.88\n", + " American robin 50 0.9 0.96\n", + " bulbul 50 0.84 0.96\n", + " jay 50 0.9 0.96\n", + " magpie 50 0.84 0.96\n", + " chickadee 50 0.9 1\n", + " American dipper 50 0.82 0.92\n", + " kite 50 0.76 0.94\n", + " bald eagle 50 0.92 1\n", + " vulture 50 0.96 1\n", + " great grey owl 50 0.94 0.98\n", + " fire salamander 50 0.96 0.98\n", + " smooth newt 50 0.58 0.94\n", + " newt 50 0.74 0.9\n", + " spotted salamander 50 0.86 0.94\n", + " axolotl 50 0.86 0.96\n", + " American bullfrog 50 0.78 0.92\n", + " tree frog 50 0.84 0.96\n", + " tailed frog 50 0.48 0.8\n", + " loggerhead sea turtle 50 0.68 0.94\n", + " leatherback sea turtle 50 0.5 0.8\n", + " mud turtle 50 0.64 0.84\n", + " terrapin 50 0.52 0.98\n", + " box turtle 50 0.84 0.98\n", + " banded gecko 50 0.7 0.88\n", + " green iguana 50 0.76 0.94\n", + " Carolina anole 50 0.58 0.96\n", + "desert grassland whiptail lizard 50 0.82 0.94\n", + " agama 50 0.74 0.92\n", + " frilled-necked lizard 50 0.84 0.86\n", + " alligator lizard 50 0.58 0.78\n", + " Gila monster 50 0.72 0.8\n", + " European green lizard 50 0.42 0.9\n", + " chameleon 50 0.76 0.84\n", + " Komodo dragon 50 0.86 0.96\n", + " Nile crocodile 50 0.7 0.84\n", + " American alligator 50 0.76 0.96\n", + " triceratops 50 0.9 0.94\n", + " worm snake 50 0.76 0.88\n", + " ring-necked snake 50 0.8 0.92\n", + " eastern hog-nosed snake 50 0.58 0.88\n", + " smooth green snake 50 0.6 0.94\n", + " kingsnake 50 0.82 0.9\n", + " garter snake 50 0.88 0.94\n", + " water snake 50 0.7 0.94\n", + " vine snake 50 0.66 0.76\n", + " night snake 50 0.34 0.82\n", + " boa constrictor 50 0.8 0.96\n", + " African rock python 50 0.48 0.76\n", + " Indian cobra 50 0.82 0.94\n", + " green mamba 50 0.54 0.86\n", + " sea snake 50 0.62 0.9\n", + " Saharan horned viper 50 0.56 0.86\n", + "eastern diamondback rattlesnake 50 0.6 0.86\n", + " sidewinder 50 0.28 0.86\n", + " trilobite 50 0.98 0.98\n", + " harvestman 50 0.86 0.94\n", + " scorpion 50 0.86 0.94\n", + " yellow garden spider 50 0.92 0.96\n", + " barn spider 50 0.38 0.98\n", + " European garden spider 50 0.62 0.98\n", + " southern black widow 50 0.88 0.94\n", + " tarantula 50 0.94 1\n", + " wolf spider 50 0.82 0.92\n", + " tick 50 0.74 0.84\n", + " centipede 50 0.68 0.82\n", + " black grouse 50 0.88 0.98\n", + " ptarmigan 50 0.78 0.94\n", + " ruffed grouse 50 0.88 1\n", + " prairie grouse 50 0.92 1\n", + " peacock 50 0.88 0.9\n", + " quail 50 0.9 0.94\n", + " partridge 50 0.74 0.96\n", + " grey parrot 50 0.9 0.96\n", + " macaw 50 0.88 0.98\n", + "sulphur-crested cockatoo 50 0.86 0.92\n", + " lorikeet 50 0.96 1\n", + " coucal 50 0.82 0.88\n", + " bee eater 50 0.96 0.98\n", + " hornbill 50 0.9 0.96\n", + " hummingbird 50 0.88 0.96\n", + " jacamar 50 0.92 0.94\n", + " toucan 50 0.84 0.94\n", + " duck 50 0.76 0.94\n", + " red-breasted merganser 50 0.86 0.96\n", + " goose 50 0.74 0.96\n", + " black swan 50 0.94 0.98\n", + " tusker 50 0.54 0.92\n", + " echidna 50 0.98 1\n", + " platypus 50 0.72 0.84\n", + " wallaby 50 0.78 0.88\n", + " koala 50 0.84 0.92\n", + " wombat 50 0.78 0.84\n", + " jellyfish 50 0.88 0.96\n", + " sea anemone 50 0.72 0.9\n", + " brain coral 50 0.88 0.96\n", + " flatworm 50 0.8 0.98\n", + " nematode 50 0.86 0.9\n", + " conch 50 0.74 0.88\n", + " snail 50 0.78 0.88\n", + " slug 50 0.74 0.82\n", + " sea slug 50 0.88 0.98\n", + " chiton 50 0.88 0.98\n", + " chambered nautilus 50 0.88 0.92\n", + " Dungeness crab 50 0.78 0.94\n", + " rock crab 50 0.68 0.86\n", + " fiddler crab 50 0.64 0.86\n", + " red king crab 50 0.76 0.96\n", + " American lobster 50 0.78 0.96\n", + " spiny lobster 50 0.74 0.88\n", + " crayfish 50 0.56 0.86\n", + " hermit crab 50 0.78 0.96\n", + " isopod 50 0.66 0.78\n", + " white stork 50 0.88 0.96\n", + " black stork 50 0.84 0.98\n", + " spoonbill 50 0.96 1\n", + " flamingo 50 0.94 1\n", + " little blue heron 50 0.92 0.98\n", + " great egret 50 0.9 0.96\n", + " bittern 50 0.86 0.94\n", + " crane (bird) 50 0.62 0.9\n", + " limpkin 50 0.98 1\n", + " common gallinule 50 0.92 0.96\n", + " American coot 50 0.9 0.98\n", + " bustard 50 0.92 0.96\n", + " ruddy turnstone 50 0.94 1\n", + " dunlin 50 0.86 0.94\n", + " common redshank 50 0.9 0.96\n", + " dowitcher 50 0.84 0.96\n", + " oystercatcher 50 0.86 0.94\n", + " pelican 50 0.92 0.96\n", + " king penguin 50 0.88 0.96\n", + " albatross 50 0.9 1\n", + " grey whale 50 0.84 0.92\n", + " killer whale 50 0.92 1\n", + " dugong 50 0.84 0.96\n", + " sea lion 50 0.82 0.92\n", + " Chihuahua 50 0.66 0.84\n", + " Japanese Chin 50 0.72 0.98\n", + " Maltese 50 0.76 0.94\n", + " Pekingese 50 0.84 0.94\n", + " Shih Tzu 50 0.74 0.96\n", + " King Charles Spaniel 50 0.88 0.98\n", + " Papillon 50 0.86 0.94\n", + " toy terrier 50 0.48 0.94\n", + " Rhodesian Ridgeback 50 0.76 0.98\n", + " Afghan Hound 50 0.84 1\n", + " Basset Hound 50 0.8 0.92\n", + " Beagle 50 0.82 0.96\n", + " Bloodhound 50 0.48 0.72\n", + " Bluetick Coonhound 50 0.86 0.94\n", + " Black and Tan Coonhound 50 0.54 0.8\n", + "Treeing Walker Coonhound 50 0.66 0.98\n", + " English foxhound 50 0.32 0.84\n", + " Redbone Coonhound 50 0.62 0.94\n", + " borzoi 50 0.92 1\n", + " Irish Wolfhound 50 0.48 0.88\n", + " Italian Greyhound 50 0.76 0.98\n", + " Whippet 50 0.74 0.92\n", + " Ibizan Hound 50 0.6 0.86\n", + " Norwegian Elkhound 50 0.88 0.98\n", + " Otterhound 50 0.62 0.9\n", + " Saluki 50 0.72 0.92\n", + " Scottish Deerhound 50 0.86 0.98\n", + " Weimaraner 50 0.88 0.94\n", + "Staffordshire Bull Terrier 50 0.66 0.98\n", + "American Staffordshire Terrier 50 0.64 0.92\n", + " Bedlington Terrier 50 0.9 0.92\n", + " Border Terrier 50 0.86 0.92\n", + " Kerry Blue Terrier 50 0.78 0.98\n", + " Irish Terrier 50 0.7 0.96\n", + " Norfolk Terrier 50 0.68 0.9\n", + " Norwich Terrier 50 0.72 1\n", + " Yorkshire Terrier 50 0.66 0.9\n", + " Wire Fox Terrier 50 0.64 0.98\n", + " Lakeland Terrier 50 0.74 0.92\n", + " Sealyham Terrier 50 0.76 0.9\n", + " Airedale Terrier 50 0.82 0.92\n", + " Cairn Terrier 50 0.76 0.9\n", + " Australian Terrier 50 0.48 0.84\n", + " Dandie Dinmont Terrier 50 0.82 0.92\n", + " Boston Terrier 50 0.92 1\n", + " Miniature Schnauzer 50 0.68 0.9\n", + " Giant Schnauzer 50 0.72 0.98\n", + " Standard Schnauzer 50 0.74 1\n", + " Scottish Terrier 50 0.76 0.96\n", + " Tibetan Terrier 50 0.48 1\n", + "Australian Silky Terrier 50 0.66 0.96\n", + "Soft-coated Wheaten Terrier 50 0.74 0.96\n", + "West Highland White Terrier 50 0.88 0.96\n", + " Lhasa Apso 50 0.68 0.96\n", + " Flat-Coated Retriever 50 0.72 0.94\n", + " Curly-coated Retriever 50 0.82 0.94\n", + " Golden Retriever 50 0.86 0.94\n", + " Labrador Retriever 50 0.82 0.94\n", + "Chesapeake Bay Retriever 50 0.76 0.96\n", + "German Shorthaired Pointer 50 0.8 0.96\n", + " Vizsla 50 0.68 0.96\n", + " English Setter 50 0.7 1\n", + " Irish Setter 50 0.8 0.9\n", + " Gordon Setter 50 0.84 0.92\n", + " Brittany 50 0.84 0.96\n", + " Clumber Spaniel 50 0.92 0.96\n", + "English Springer Spaniel 50 0.88 1\n", + " Welsh Springer Spaniel 50 0.92 1\n", + " Cocker Spaniels 50 0.7 0.94\n", + " Sussex Spaniel 50 0.72 0.92\n", + " Irish Water Spaniel 50 0.88 0.98\n", + " Kuvasz 50 0.66 0.9\n", + " Schipperke 50 0.9 0.98\n", + " Groenendael 50 0.8 0.94\n", + " Malinois 50 0.86 0.98\n", + " Briard 50 0.52 0.8\n", + " Australian Kelpie 50 0.6 0.88\n", + " Komondor 50 0.88 0.94\n", + " Old English Sheepdog 50 0.94 0.98\n", + " Shetland Sheepdog 50 0.74 0.9\n", + " collie 50 0.6 0.96\n", + " Border Collie 50 0.74 0.96\n", + " Bouvier des Flandres 50 0.78 0.94\n", + " Rottweiler 50 0.88 0.96\n", + " German Shepherd Dog 50 0.8 0.98\n", + " Dobermann 50 0.68 0.96\n", + " Miniature Pinscher 50 0.76 0.88\n", + "Greater Swiss Mountain Dog 50 0.68 0.94\n", + " Bernese Mountain Dog 50 0.96 1\n", + " Appenzeller Sennenhund 50 0.22 1\n", + " Entlebucher Sennenhund 50 0.64 0.98\n", + " Boxer 50 0.7 0.92\n", + " Bullmastiff 50 0.78 0.98\n", + " Tibetan Mastiff 50 0.88 0.96\n", + " French Bulldog 50 0.84 0.94\n", + " Great Dane 50 0.54 0.9\n", + " St. Bernard 50 0.92 1\n", + " husky 50 0.46 0.98\n", + " Alaskan Malamute 50 0.76 0.96\n", + " Siberian Husky 50 0.46 0.98\n", + " Dalmatian 50 0.94 0.98\n", + " Affenpinscher 50 0.78 0.9\n", + " Basenji 50 0.92 0.94\n", + " pug 50 0.94 0.98\n", + " Leonberger 50 1 1\n", + " Newfoundland 50 0.78 0.96\n", + " Pyrenean Mountain Dog 50 0.78 0.96\n", + " Samoyed 50 0.96 1\n", + " Pomeranian 50 0.98 1\n", + " Chow Chow 50 0.9 0.96\n", + " Keeshond 50 0.88 0.94\n", + " Griffon Bruxellois 50 0.84 0.98\n", + " Pembroke Welsh Corgi 50 0.82 0.94\n", + " Cardigan Welsh Corgi 50 0.66 0.98\n", + " Toy Poodle 50 0.52 0.88\n", + " Miniature Poodle 50 0.52 0.92\n", + " Standard Poodle 50 0.8 1\n", + " Mexican hairless dog 50 0.88 0.98\n", + " grey wolf 50 0.82 0.92\n", + " Alaskan tundra wolf 50 0.78 0.98\n", + " red wolf 50 0.48 0.9\n", + " coyote 50 0.64 0.86\n", + " dingo 50 0.76 0.88\n", + " dhole 50 0.9 0.98\n", + " African wild dog 50 0.98 1\n", + " hyena 50 0.88 0.96\n", + " red fox 50 0.54 0.92\n", + " kit fox 50 0.72 0.98\n", + " Arctic fox 50 0.94 1\n", + " grey fox 50 0.7 0.94\n", + " tabby cat 50 0.54 0.92\n", + " tiger cat 50 0.22 0.94\n", + " Persian cat 50 0.9 0.98\n", + " Siamese cat 50 0.96 1\n", + " Egyptian Mau 50 0.54 0.8\n", + " cougar 50 0.9 1\n", + " lynx 50 0.72 0.88\n", + " leopard 50 0.78 0.98\n", + " snow leopard 50 0.9 0.98\n", + " jaguar 50 0.7 0.94\n", + " lion 50 0.9 0.98\n", + " tiger 50 0.92 0.98\n", + " cheetah 50 0.94 0.98\n", + " brown bear 50 0.94 0.98\n", + " American black bear 50 0.8 1\n", + " polar bear 50 0.84 0.96\n", + " sloth bear 50 0.72 0.92\n", + " mongoose 50 0.7 0.92\n", + " meerkat 50 0.82 0.92\n", + " tiger beetle 50 0.92 0.94\n", + " ladybug 50 0.86 0.94\n", + " ground beetle 50 0.64 0.94\n", + " longhorn beetle 50 0.62 0.88\n", + " leaf beetle 50 0.64 0.98\n", + " dung beetle 50 0.86 0.98\n", + " rhinoceros beetle 50 0.86 0.94\n", + " weevil 50 0.9 1\n", + " fly 50 0.78 0.94\n", + " bee 50 0.68 0.94\n", + " ant 50 0.68 0.78\n", + " grasshopper 50 0.5 0.92\n", + " cricket 50 0.64 0.92\n", + " stick insect 50 0.64 0.92\n", + " cockroach 50 0.72 0.8\n", + " mantis 50 0.64 0.86\n", + " cicada 50 0.9 0.96\n", + " leafhopper 50 0.88 0.94\n", + " lacewing 50 0.78 0.92\n", + " dragonfly 50 0.82 0.98\n", + " damselfly 50 0.82 1\n", + " red admiral 50 0.94 0.96\n", + " ringlet 50 0.86 0.98\n", + " monarch butterfly 50 0.9 0.92\n", + " small white 50 0.9 1\n", + " sulphur butterfly 50 0.92 1\n", + "gossamer-winged butterfly 50 0.88 1\n", + " starfish 50 0.88 0.92\n", + " sea urchin 50 0.84 0.94\n", + " sea cucumber 50 0.66 0.84\n", + " cottontail rabbit 50 0.72 0.94\n", + " hare 50 0.84 0.96\n", + " Angora rabbit 50 0.94 0.98\n", + " hamster 50 0.96 1\n", + " porcupine 50 0.88 0.98\n", + " fox squirrel 50 0.76 0.94\n", + " marmot 50 0.92 0.96\n", + " beaver 50 0.78 0.94\n", + " guinea pig 50 0.78 0.94\n", + " common sorrel 50 0.96 0.98\n", + " zebra 50 0.94 0.96\n", + " pig 50 0.5 0.76\n", + " wild boar 50 0.84 0.96\n", + " warthog 50 0.84 0.96\n", + " hippopotamus 50 0.88 0.96\n", + " ox 50 0.48 0.94\n", + " water buffalo 50 0.78 0.94\n", + " bison 50 0.88 0.96\n", + " ram 50 0.58 0.92\n", + " bighorn sheep 50 0.66 1\n", + " Alpine ibex 50 0.92 0.98\n", + " hartebeest 50 0.94 1\n", + " impala 50 0.82 0.96\n", + " gazelle 50 0.7 0.96\n", + " dromedary 50 0.9 1\n", + " llama 50 0.82 0.94\n", + " weasel 50 0.44 0.92\n", + " mink 50 0.78 0.96\n", + " European polecat 50 0.46 0.9\n", + " black-footed ferret 50 0.68 0.96\n", + " otter 50 0.66 0.88\n", + " skunk 50 0.96 0.96\n", + " badger 50 0.86 0.92\n", + " armadillo 50 0.88 0.9\n", + " three-toed sloth 50 0.96 1\n", + " orangutan 50 0.78 0.92\n", + " gorilla 50 0.82 0.94\n", + " chimpanzee 50 0.84 0.94\n", + " gibbon 50 0.76 0.86\n", + " siamang 50 0.68 0.94\n", + " guenon 50 0.8 0.94\n", + " patas monkey 50 0.62 0.82\n", + " baboon 50 0.9 0.98\n", + " macaque 50 0.8 0.86\n", + " langur 50 0.6 0.82\n", + " black-and-white colobus 50 0.86 0.9\n", + " proboscis monkey 50 1 1\n", + " marmoset 50 0.74 0.98\n", + " white-headed capuchin 50 0.72 0.9\n", + " howler monkey 50 0.86 0.94\n", + " titi 50 0.5 0.9\n", + "Geoffroy's spider monkey 50 0.42 0.8\n", + " common squirrel monkey 50 0.76 0.92\n", + " ring-tailed lemur 50 0.72 0.94\n", + " indri 50 0.9 0.96\n", + " Asian elephant 50 0.58 0.92\n", + " African bush elephant 50 0.7 0.98\n", + " red panda 50 0.94 0.94\n", + " giant panda 50 0.94 0.98\n", + " snoek 50 0.74 0.9\n", + " eel 50 0.6 0.84\n", + " coho salmon 50 0.84 0.96\n", + " rock beauty 50 0.88 0.98\n", + " clownfish 50 0.78 0.98\n", + " sturgeon 50 0.68 0.94\n", + " garfish 50 0.62 0.8\n", + " lionfish 50 0.96 0.96\n", + " pufferfish 50 0.88 0.96\n", + " abacus 50 0.74 0.88\n", + " abaya 50 0.84 0.92\n", + " academic gown 50 0.42 0.86\n", + " accordion 50 0.8 0.9\n", + " acoustic guitar 50 0.5 0.76\n", + " aircraft carrier 50 0.8 0.96\n", + " airliner 50 0.92 1\n", + " airship 50 0.76 0.82\n", + " altar 50 0.64 0.98\n", + " ambulance 50 0.88 0.98\n", + " amphibious vehicle 50 0.64 0.94\n", + " analog clock 50 0.52 0.92\n", + " apiary 50 0.82 0.96\n", + " apron 50 0.7 0.84\n", + " waste container 50 0.4 0.8\n", + " assault rifle 50 0.42 0.84\n", + " backpack 50 0.34 0.64\n", + " bakery 50 0.4 0.68\n", + " balance beam 50 0.8 0.98\n", + " balloon 50 0.86 0.96\n", + " ballpoint pen 50 0.52 0.96\n", + " Band-Aid 50 0.7 0.9\n", + " banjo 50 0.84 1\n", + " baluster 50 0.68 0.94\n", + " barbell 50 0.56 0.9\n", + " barber chair 50 0.7 0.92\n", + " barbershop 50 0.54 0.86\n", + " barn 50 0.96 0.96\n", + " barometer 50 0.84 0.98\n", + " barrel 50 0.56 0.88\n", + " wheelbarrow 50 0.66 0.88\n", + " baseball 50 0.74 0.98\n", + " basketball 50 0.88 0.98\n", + " bassinet 50 0.66 0.92\n", + " bassoon 50 0.74 0.98\n", + " swimming cap 50 0.62 0.88\n", + " bath towel 50 0.54 0.78\n", + " bathtub 50 0.4 0.88\n", + " station wagon 50 0.66 0.84\n", + " lighthouse 50 0.78 0.94\n", + " beaker 50 0.52 0.68\n", + " military cap 50 0.84 0.96\n", + " beer bottle 50 0.66 0.88\n", + " beer glass 50 0.6 0.84\n", + " bell-cot 50 0.56 0.96\n", + " bib 50 0.58 0.82\n", + " tandem bicycle 50 0.86 0.96\n", + " bikini 50 0.56 0.88\n", + " ring binder 50 0.64 0.84\n", + " binoculars 50 0.54 0.78\n", + " birdhouse 50 0.86 0.94\n", + " boathouse 50 0.74 0.92\n", + " bobsleigh 50 0.92 0.96\n", + " bolo tie 50 0.8 0.94\n", + " poke bonnet 50 0.64 0.86\n", + " bookcase 50 0.66 0.92\n", + " bookstore 50 0.62 0.88\n", + " bottle cap 50 0.58 0.7\n", + " bow 50 0.72 0.86\n", + " bow tie 50 0.7 0.9\n", + " brass 50 0.92 0.96\n", + " bra 50 0.5 0.7\n", + " breakwater 50 0.62 0.86\n", + " breastplate 50 0.4 0.9\n", + " broom 50 0.6 0.86\n", + " bucket 50 0.66 0.8\n", + " buckle 50 0.5 0.68\n", + " bulletproof vest 50 0.5 0.78\n", + " high-speed train 50 0.94 0.96\n", + " butcher shop 50 0.74 0.94\n", + " taxicab 50 0.64 0.86\n", + " cauldron 50 0.44 0.66\n", + " candle 50 0.48 0.74\n", + " cannon 50 0.88 0.94\n", + " canoe 50 0.94 1\n", + " can opener 50 0.66 0.86\n", + " cardigan 50 0.68 0.8\n", + " car mirror 50 0.94 0.96\n", + " carousel 50 0.94 0.98\n", + " tool kit 50 0.56 0.78\n", + " carton 50 0.42 0.7\n", + " car wheel 50 0.38 0.74\n", + "automated teller machine 50 0.76 0.94\n", + " cassette 50 0.52 0.8\n", + " cassette player 50 0.28 0.9\n", + " castle 50 0.78 0.88\n", + " catamaran 50 0.78 1\n", + " CD player 50 0.52 0.82\n", + " cello 50 0.82 1\n", + " mobile phone 50 0.68 0.86\n", + " chain 50 0.38 0.66\n", + " chain-link fence 50 0.7 0.84\n", + " chain mail 50 0.64 0.9\n", + " chainsaw 50 0.84 0.92\n", + " chest 50 0.68 0.92\n", + " chiffonier 50 0.26 0.64\n", + " chime 50 0.62 0.84\n", + " china cabinet 50 0.82 0.96\n", + " Christmas stocking 50 0.92 0.94\n", + " church 50 0.62 0.9\n", + " movie theater 50 0.58 0.88\n", + " cleaver 50 0.32 0.62\n", + " cliff dwelling 50 0.88 1\n", + " cloak 50 0.32 0.64\n", + " clogs 50 0.58 0.88\n", + " cocktail shaker 50 0.62 0.7\n", + " coffee mug 50 0.44 0.72\n", + " coffeemaker 50 0.64 0.92\n", + " coil 50 0.66 0.84\n", + " combination lock 50 0.64 0.84\n", + " computer keyboard 50 0.7 0.82\n", + " confectionery store 50 0.54 0.86\n", + " container ship 50 0.82 0.98\n", + " convertible 50 0.78 0.98\n", + " corkscrew 50 0.82 0.92\n", + " cornet 50 0.46 0.88\n", + " cowboy boot 50 0.64 0.8\n", + " cowboy hat 50 0.64 0.82\n", + " cradle 50 0.38 0.8\n", + " crane (machine) 50 0.78 0.94\n", + " crash helmet 50 0.92 0.96\n", + " crate 50 0.52 0.82\n", + " infant bed 50 0.74 1\n", + " Crock Pot 50 0.78 0.9\n", + " croquet ball 50 0.9 0.96\n", + " crutch 50 0.46 0.7\n", + " cuirass 50 0.54 0.86\n", + " dam 50 0.74 0.92\n", + " desk 50 0.6 0.86\n", + " desktop computer 50 0.54 0.94\n", + " rotary dial telephone 50 0.88 0.94\n", + " diaper 50 0.68 0.84\n", + " digital clock 50 0.54 0.76\n", + " digital watch 50 0.58 0.86\n", + " dining table 50 0.76 0.9\n", + " dishcloth 50 0.94 1\n", + " dishwasher 50 0.44 0.78\n", + " disc brake 50 0.98 1\n", + " dock 50 0.54 0.94\n", + " dog sled 50 0.84 1\n", + " dome 50 0.72 0.92\n", + " doormat 50 0.56 0.82\n", + " drilling rig 50 0.84 0.96\n", + " drum 50 0.38 0.68\n", + " drumstick 50 0.56 0.72\n", + " dumbbell 50 0.62 0.9\n", + " Dutch oven 50 0.7 0.84\n", + " electric fan 50 0.82 0.86\n", + " electric guitar 50 0.62 0.84\n", + " electric locomotive 50 0.92 0.98\n", + " entertainment center 50 0.9 0.98\n", + " envelope 50 0.44 0.86\n", + " espresso machine 50 0.72 0.94\n", + " face powder 50 0.7 0.92\n", + " feather boa 50 0.7 0.84\n", + " filing cabinet 50 0.88 0.98\n", + " fireboat 50 0.94 0.98\n", + " fire engine 50 0.84 0.9\n", + " fire screen sheet 50 0.62 0.76\n", + " flagpole 50 0.74 0.88\n", + " flute 50 0.36 0.72\n", + " folding chair 50 0.62 0.84\n", + " football helmet 50 0.86 0.94\n", + " forklift 50 0.8 0.92\n", + " fountain 50 0.84 0.94\n", + " fountain pen 50 0.76 0.92\n", + " four-poster bed 50 0.78 0.94\n", + " freight car 50 0.96 1\n", + " French horn 50 0.76 0.92\n", + " frying pan 50 0.36 0.78\n", + " fur coat 50 0.84 0.96\n", + " garbage truck 50 0.9 0.98\n", + " gas mask 50 0.84 0.92\n", + " gas pump 50 0.9 0.98\n", + " goblet 50 0.68 0.82\n", + " go-kart 50 0.9 1\n", + " golf ball 50 0.84 0.9\n", + " golf cart 50 0.78 0.86\n", + " gondola 50 0.98 0.98\n", + " gong 50 0.74 0.92\n", + " gown 50 0.62 0.96\n", + " grand piano 50 0.7 0.96\n", + " greenhouse 50 0.8 0.98\n", + " grille 50 0.72 0.9\n", + " grocery store 50 0.66 0.94\n", + " guillotine 50 0.86 0.92\n", + " barrette 50 0.52 0.66\n", + " hair spray 50 0.5 0.74\n", + " half-track 50 0.78 0.9\n", + " hammer 50 0.56 0.76\n", + " hamper 50 0.64 0.84\n", + " hair dryer 50 0.56 0.74\n", + " hand-held computer 50 0.42 0.86\n", + " handkerchief 50 0.78 0.94\n", + " hard disk drive 50 0.76 0.84\n", + " harmonica 50 0.7 0.88\n", + " harp 50 0.88 0.96\n", + " harvester 50 0.78 1\n", + " hatchet 50 0.54 0.74\n", + " holster 50 0.66 0.84\n", + " home theater 50 0.64 0.94\n", + " honeycomb 50 0.56 0.88\n", + " hook 50 0.3 0.6\n", + " hoop skirt 50 0.64 0.86\n", + " horizontal bar 50 0.68 0.98\n", + " horse-drawn vehicle 50 0.88 0.94\n", + " hourglass 50 0.88 0.96\n", + " iPod 50 0.76 0.94\n", + " clothes iron 50 0.82 0.88\n", + " jack-o'-lantern 50 0.98 0.98\n", + " jeans 50 0.68 0.84\n", + " jeep 50 0.72 0.9\n", + " T-shirt 50 0.72 0.96\n", + " jigsaw puzzle 50 0.84 0.94\n", + " pulled rickshaw 50 0.86 0.94\n", + " joystick 50 0.8 0.9\n", + " kimono 50 0.84 0.96\n", + " knee pad 50 0.62 0.88\n", + " knot 50 0.66 0.8\n", + " lab coat 50 0.8 0.96\n", + " ladle 50 0.36 0.64\n", + " lampshade 50 0.48 0.84\n", + " laptop computer 50 0.26 0.88\n", + " lawn mower 50 0.78 0.96\n", + " lens cap 50 0.46 0.72\n", + " paper knife 50 0.26 0.5\n", + " library 50 0.54 0.9\n", + " lifeboat 50 0.92 0.98\n", + " lighter 50 0.56 0.78\n", + " limousine 50 0.76 0.92\n", + " ocean liner 50 0.88 0.94\n", + " lipstick 50 0.74 0.9\n", + " slip-on shoe 50 0.74 0.92\n", + " lotion 50 0.5 0.86\n", + " speaker 50 0.52 0.68\n", + " loupe 50 0.32 0.52\n", + " sawmill 50 0.72 0.9\n", + " magnetic compass 50 0.52 0.82\n", + " mail bag 50 0.68 0.92\n", + " mailbox 50 0.82 0.92\n", + " tights 50 0.22 0.94\n", + " tank suit 50 0.24 0.9\n", + " manhole cover 50 0.96 0.98\n", + " maraca 50 0.74 0.9\n", + " marimba 50 0.84 0.94\n", + " mask 50 0.44 0.82\n", + " match 50 0.66 0.9\n", + " maypole 50 0.96 1\n", + " maze 50 0.8 0.96\n", + " measuring cup 50 0.54 0.76\n", + " medicine chest 50 0.6 0.84\n", + " megalith 50 0.8 0.92\n", + " microphone 50 0.52 0.7\n", + " microwave oven 50 0.48 0.72\n", + " military uniform 50 0.62 0.84\n", + " milk can 50 0.68 0.82\n", + " minibus 50 0.7 1\n", + " miniskirt 50 0.46 0.76\n", + " minivan 50 0.38 0.8\n", + " missile 50 0.4 0.84\n", + " mitten 50 0.76 0.88\n", + " mixing bowl 50 0.8 0.92\n", + " mobile home 50 0.54 0.78\n", + " Model T 50 0.92 0.96\n", + " modem 50 0.58 0.86\n", + " monastery 50 0.44 0.9\n", + " monitor 50 0.4 0.86\n", + " moped 50 0.56 0.94\n", + " mortar 50 0.68 0.94\n", + " square academic cap 50 0.5 0.84\n", + " mosque 50 0.9 1\n", + " mosquito net 50 0.9 0.98\n", + " scooter 50 0.9 0.98\n", + " mountain bike 50 0.78 0.96\n", + " tent 50 0.88 0.96\n", + " computer mouse 50 0.42 0.82\n", + " mousetrap 50 0.76 0.88\n", + " moving van 50 0.4 0.72\n", + " muzzle 50 0.5 0.72\n", + " nail 50 0.68 0.74\n", + " neck brace 50 0.56 0.68\n", + " necklace 50 0.86 1\n", + " nipple 50 0.7 0.88\n", + " notebook computer 50 0.34 0.84\n", + " obelisk 50 0.8 0.92\n", + " oboe 50 0.6 0.84\n", + " ocarina 50 0.8 0.86\n", + " odometer 50 0.96 1\n", + " oil filter 50 0.58 0.82\n", + " organ 50 0.82 0.9\n", + " oscilloscope 50 0.9 0.96\n", + " overskirt 50 0.2 0.7\n", + " bullock cart 50 0.7 0.94\n", + " oxygen mask 50 0.46 0.84\n", + " packet 50 0.5 0.78\n", + " paddle 50 0.56 0.94\n", + " paddle wheel 50 0.86 0.96\n", + " padlock 50 0.74 0.78\n", + " paintbrush 50 0.62 0.8\n", + " pajamas 50 0.56 0.92\n", + " palace 50 0.64 0.96\n", + " pan flute 50 0.84 0.86\n", + " paper towel 50 0.66 0.84\n", + " parachute 50 0.92 0.94\n", + " parallel bars 50 0.62 0.96\n", + " park bench 50 0.74 0.9\n", + " parking meter 50 0.84 0.92\n", + " passenger car 50 0.5 0.82\n", + " patio 50 0.58 0.84\n", + " payphone 50 0.74 0.92\n", + " pedestal 50 0.52 0.9\n", + " pencil case 50 0.64 0.92\n", + " pencil sharpener 50 0.52 0.78\n", + " perfume 50 0.7 0.9\n", + " Petri dish 50 0.6 0.8\n", + " photocopier 50 0.88 0.98\n", + " plectrum 50 0.7 0.84\n", + " Pickelhaube 50 0.72 0.86\n", + " picket fence 50 0.84 0.94\n", + " pickup truck 50 0.64 0.92\n", + " pier 50 0.52 0.82\n", + " piggy bank 50 0.82 0.94\n", + " pill bottle 50 0.76 0.86\n", + " pillow 50 0.76 0.9\n", + " ping-pong ball 50 0.84 0.88\n", + " pinwheel 50 0.76 0.88\n", + " pirate ship 50 0.76 0.94\n", + " pitcher 50 0.46 0.84\n", + " hand plane 50 0.84 0.94\n", + " planetarium 50 0.88 0.98\n", + " plastic bag 50 0.36 0.62\n", + " plate rack 50 0.52 0.78\n", + " plow 50 0.78 0.88\n", + " plunger 50 0.42 0.7\n", + " Polaroid camera 50 0.84 0.92\n", + " pole 50 0.38 0.74\n", + " police van 50 0.76 0.94\n", + " poncho 50 0.58 0.86\n", + " billiard table 50 0.8 0.88\n", + " soda bottle 50 0.56 0.94\n", + " pot 50 0.78 0.92\n", + " potter's wheel 50 0.9 0.94\n", + " power drill 50 0.42 0.72\n", + " prayer rug 50 0.7 0.86\n", + " printer 50 0.54 0.86\n", + " prison 50 0.7 0.9\n", + " projectile 50 0.28 0.9\n", + " projector 50 0.62 0.84\n", + " hockey puck 50 0.92 0.96\n", + " punching bag 50 0.6 0.68\n", + " purse 50 0.42 0.78\n", + " quill 50 0.68 0.84\n", + " quilt 50 0.64 0.9\n", + " race car 50 0.72 0.92\n", + " racket 50 0.72 0.9\n", + " radiator 50 0.66 0.76\n", + " radio 50 0.64 0.92\n", + " radio telescope 50 0.9 0.96\n", + " rain barrel 50 0.8 0.98\n", + " recreational vehicle 50 0.84 0.94\n", + " reel 50 0.72 0.82\n", + " reflex camera 50 0.72 0.92\n", + " refrigerator 50 0.7 0.9\n", + " remote control 50 0.7 0.88\n", + " restaurant 50 0.5 0.66\n", + " revolver 50 0.82 1\n", + " rifle 50 0.38 0.7\n", + " rocking chair 50 0.62 0.84\n", + " rotisserie 50 0.88 0.92\n", + " eraser 50 0.54 0.76\n", + " rugby ball 50 0.86 0.94\n", + " ruler 50 0.68 0.86\n", + " running shoe 50 0.78 0.94\n", + " safe 50 0.82 0.92\n", + " safety pin 50 0.4 0.62\n", + " salt shaker 50 0.66 0.9\n", + " sandal 50 0.66 0.86\n", + " sarong 50 0.64 0.86\n", + " saxophone 50 0.66 0.88\n", + " scabbard 50 0.76 0.92\n", + " weighing scale 50 0.58 0.78\n", + " school bus 50 0.92 1\n", + " schooner 50 0.84 1\n", + " scoreboard 50 0.9 0.96\n", + " CRT screen 50 0.14 0.7\n", + " screw 50 0.9 0.98\n", + " screwdriver 50 0.3 0.58\n", + " seat belt 50 0.88 0.94\n", + " sewing machine 50 0.76 0.9\n", + " shield 50 0.56 0.82\n", + " shoe store 50 0.78 0.96\n", + " shoji 50 0.8 0.92\n", + " shopping basket 50 0.52 0.88\n", + " shopping cart 50 0.76 0.92\n", + " shovel 50 0.62 0.84\n", + " shower cap 50 0.7 0.84\n", + " shower curtain 50 0.64 0.82\n", + " ski 50 0.74 0.92\n", + " ski mask 50 0.72 0.88\n", + " sleeping bag 50 0.68 0.8\n", + " slide rule 50 0.72 0.88\n", + " sliding door 50 0.44 0.78\n", + " slot machine 50 0.94 0.98\n", + " snorkel 50 0.86 0.98\n", + " snowmobile 50 0.88 1\n", + " snowplow 50 0.84 0.98\n", + " soap dispenser 50 0.56 0.86\n", + " soccer ball 50 0.86 0.96\n", + " sock 50 0.62 0.76\n", + " solar thermal collector 50 0.72 0.96\n", + " sombrero 50 0.6 0.84\n", + " soup bowl 50 0.56 0.94\n", + " space bar 50 0.34 0.88\n", + " space heater 50 0.52 0.74\n", + " space shuttle 50 0.82 0.96\n", + " spatula 50 0.3 0.6\n", + " motorboat 50 0.86 1\n", + " spider web 50 0.7 0.9\n", + " spindle 50 0.86 0.98\n", + " sports car 50 0.6 0.94\n", + " spotlight 50 0.26 0.6\n", + " stage 50 0.68 0.86\n", + " steam locomotive 50 0.94 1\n", + " through arch bridge 50 0.84 0.96\n", + " steel drum 50 0.82 0.9\n", + " stethoscope 50 0.6 0.82\n", + " scarf 50 0.5 0.92\n", + " stone wall 50 0.76 0.9\n", + " stopwatch 50 0.58 0.9\n", + " stove 50 0.46 0.74\n", + " strainer 50 0.64 0.84\n", + " tram 50 0.88 0.96\n", + " stretcher 50 0.6 0.8\n", + " couch 50 0.8 0.96\n", + " stupa 50 0.88 0.88\n", + " submarine 50 0.72 0.92\n", + " suit 50 0.4 0.78\n", + " sundial 50 0.58 0.74\n", + " sunglass 50 0.14 0.58\n", + " sunglasses 50 0.28 0.58\n", + " sunscreen 50 0.32 0.7\n", + " suspension bridge 50 0.6 0.94\n", + " mop 50 0.74 0.92\n", + " sweatshirt 50 0.28 0.66\n", + " swimsuit 50 0.52 0.82\n", + " swing 50 0.76 0.84\n", + " switch 50 0.56 0.76\n", + " syringe 50 0.62 0.82\n", + " table lamp 50 0.6 0.88\n", + " tank 50 0.8 0.96\n", + " tape player 50 0.46 0.76\n", + " teapot 50 0.84 1\n", + " teddy bear 50 0.82 0.94\n", + " television 50 0.6 0.9\n", + " tennis ball 50 0.7 0.94\n", + " thatched roof 50 0.88 0.9\n", + " front curtain 50 0.8 0.92\n", + " thimble 50 0.6 0.8\n", + " threshing machine 50 0.56 0.88\n", + " throne 50 0.72 0.82\n", + " tile roof 50 0.72 0.94\n", + " toaster 50 0.66 0.84\n", + " tobacco shop 50 0.42 0.7\n", + " toilet seat 50 0.62 0.88\n", + " torch 50 0.64 0.84\n", + " totem pole 50 0.92 0.98\n", + " tow truck 50 0.62 0.88\n", + " toy store 50 0.6 0.94\n", + " tractor 50 0.76 0.98\n", + " semi-trailer truck 50 0.78 0.92\n", + " tray 50 0.46 0.64\n", + " trench coat 50 0.54 0.72\n", + " tricycle 50 0.72 0.94\n", + " trimaran 50 0.7 0.98\n", + " tripod 50 0.58 0.86\n", + " triumphal arch 50 0.92 0.98\n", + " trolleybus 50 0.9 1\n", + " trombone 50 0.54 0.88\n", + " tub 50 0.24 0.82\n", + " turnstile 50 0.84 0.94\n", + " typewriter keyboard 50 0.68 0.98\n", + " umbrella 50 0.52 0.7\n", + " unicycle 50 0.74 0.96\n", + " upright piano 50 0.76 0.9\n", + " vacuum cleaner 50 0.62 0.9\n", + " vase 50 0.5 0.78\n", + " vault 50 0.76 0.92\n", + " velvet 50 0.2 0.42\n", + " vending machine 50 0.9 1\n", + " vestment 50 0.54 0.82\n", + " viaduct 50 0.78 0.86\n", + " violin 50 0.68 0.78\n", + " volleyball 50 0.86 1\n", + " waffle iron 50 0.72 0.88\n", + " wall clock 50 0.54 0.88\n", + " wallet 50 0.52 0.9\n", + " wardrobe 50 0.68 0.88\n", + " military aircraft 50 0.9 0.98\n", + " sink 50 0.72 0.96\n", + " washing machine 50 0.78 0.94\n", + " water bottle 50 0.54 0.74\n", + " water jug 50 0.22 0.74\n", + " water tower 50 0.9 0.96\n", + " whiskey jug 50 0.64 0.74\n", + " whistle 50 0.72 0.84\n", + " wig 50 0.84 0.9\n", + " window screen 50 0.68 0.8\n", + " window shade 50 0.52 0.76\n", + " Windsor tie 50 0.22 0.66\n", + " wine bottle 50 0.42 0.82\n", + " wing 50 0.54 0.96\n", + " wok 50 0.46 0.82\n", + " wooden spoon 50 0.58 0.8\n", + " wool 50 0.32 0.82\n", + " split-rail fence 50 0.74 0.9\n", + " shipwreck 50 0.84 0.96\n", + " yawl 50 0.78 0.96\n", + " yurt 50 0.84 1\n", + " website 50 0.98 1\n", + " comic book 50 0.62 0.9\n", + " crossword 50 0.84 0.88\n", + " traffic sign 50 0.78 0.9\n", + " traffic light 50 0.8 0.94\n", + " dust jacket 50 0.72 0.94\n", + " menu 50 0.82 0.96\n", + " plate 50 0.44 0.88\n", + " guacamole 50 0.8 0.92\n", + " consomme 50 0.54 0.88\n", + " hot pot 50 0.86 0.98\n", + " trifle 50 0.92 0.98\n", + " ice cream 50 0.68 0.94\n", + " ice pop 50 0.62 0.84\n", + " baguette 50 0.62 0.88\n", + " bagel 50 0.64 0.92\n", + " pretzel 50 0.72 0.88\n", + " cheeseburger 50 0.9 1\n", + " hot dog 50 0.74 0.94\n", + " mashed potato 50 0.74 0.9\n", + " cabbage 50 0.84 0.96\n", + " broccoli 50 0.9 0.96\n", + " cauliflower 50 0.82 1\n", + " zucchini 50 0.74 0.9\n", + " spaghetti squash 50 0.8 0.96\n", + " acorn squash 50 0.82 0.96\n", + " butternut squash 50 0.7 0.94\n", + " cucumber 50 0.6 0.96\n", + " artichoke 50 0.84 0.94\n", + " bell pepper 50 0.84 0.98\n", + " cardoon 50 0.88 0.94\n", + " mushroom 50 0.38 0.92\n", + " Granny Smith 50 0.9 0.96\n", + " strawberry 50 0.6 0.88\n", + " orange 50 0.7 0.92\n", + " lemon 50 0.78 0.98\n", + " fig 50 0.82 0.96\n", + " pineapple 50 0.86 0.96\n", + " banana 50 0.84 0.96\n", + " jackfruit 50 0.9 0.98\n", + " custard apple 50 0.86 0.96\n", + " pomegranate 50 0.82 0.98\n", + " hay 50 0.8 0.92\n", + " carbonara 50 0.88 0.94\n", + " chocolate syrup 50 0.46 0.84\n", + " dough 50 0.4 0.6\n", + " meatloaf 50 0.58 0.84\n", + " pizza 50 0.84 0.96\n", + " pot pie 50 0.68 0.9\n", + " burrito 50 0.8 0.98\n", + " red wine 50 0.54 0.82\n", + " espresso 50 0.64 0.88\n", + " cup 50 0.38 0.7\n", + " eggnog 50 0.38 0.7\n", + " alp 50 0.54 0.88\n", + " bubble 50 0.8 0.96\n", + " cliff 50 0.64 1\n", + " coral reef 50 0.72 0.96\n", + " geyser 50 0.94 1\n", + " lakeshore 50 0.54 0.88\n", + " promontory 50 0.58 0.94\n", + " shoal 50 0.6 0.96\n", + " seashore 50 0.44 0.78\n", + " valley 50 0.72 0.94\n", + " volcano 50 0.78 0.96\n", + " baseball player 50 0.72 0.94\n", + " bridegroom 50 0.72 0.88\n", + " scuba diver 50 0.8 1\n", + " rapeseed 50 0.94 0.98\n", + " daisy 50 0.96 0.98\n", + " yellow lady's slipper 50 1 1\n", + " corn 50 0.4 0.88\n", + " acorn 50 0.92 0.98\n", + " rose hip 50 0.92 0.98\n", + " horse chestnut seed 50 0.94 0.98\n", + " coral fungus 50 0.96 0.96\n", + " agaric 50 0.82 0.94\n", + " gyromitra 50 0.98 1\n", + " stinkhorn mushroom 50 0.8 0.94\n", + " earth star 50 0.98 1\n", + " hen-of-the-woods 50 0.8 0.96\n", + " bolete 50 0.74 0.94\n", + " ear 50 0.48 0.94\n", + " toilet paper 50 0.36 0.68\n", + "Speed: 0.1ms pre-process, 0.3ms inference, 0.0ms post-process per image at shape (1, 3, 224, 224)\n", + "Results saved to \u001b[1mruns/val-cls/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s on Imagenet val\n", + "!python classify/val.py --weights yolov5s-cls.pt --data ../datasets/imagenet --img 224 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s Classification model on the [Imagenette](https://image-net.org/) dataset with `--data imagenet`, starting from pretrained `--pretrained yolov5s-cls.pt`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **Training Results** are saved to `runs/train-cls/` with incrementing run directories, i.e. `runs/train-cls/exp2`, `runs/train-cls/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-classification-custom-data/](https://blog.roboflow.com/train-yolov5-classification-custom-data/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1KZiKUAjtARHAfZCXbJRv14-pOnIsBLPV?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " import clearml; clearml.browser_login()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "77c8d487-16db-4073-b3ea-06cabf2e7766" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1mclassify/train: \u001b[0mmodel=yolov5s-cls.pt, data=imagenette160, epochs=5, batch_size=64, imgsz=224, nosave=False, cache=ram, device=, workers=8, project=runs/train-cls, name=exp, exist_ok=False, pretrained=True, optimizer=Adam, lr0=0.001, decay=5e-05, label_smoothing=0.1, cutoff=None, dropout=None, verbose=False, seed=0, local_rank=-1\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v7.0-3-g61ebf5e Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-cls', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing path /content/datasets/imagenette160, attempting download...\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/imagenette160.zip to /content/datasets/imagenette160.zip...\n", + "100% 103M/103M [00:00<00:00, 347MB/s] \n", + "Unzipping /content/datasets/imagenette160.zip...\n", + "Dataset download success ✅ (3.3s), saved to \u001b[1m/content/datasets/imagenette160\u001b[0m\n", + "\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mRandomResizedCrop(p=1.0, height=224, width=224, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=1), HorizontalFlip(p=0.5), ColorJitter(p=0.5, brightness=[0.6, 1.4], contrast=[0.6, 1.4], saturation=[0.6, 1.4], hue=[0, 0]), Normalize(p=1.0, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), max_pixel_value=255.0), ToTensorV2(always_apply=True, p=1.0, transpose_mask=False)\n", + "Model summary: 149 layers, 4185290 parameters, 4185290 gradients, 10.5 GFLOPs\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m Adam(lr=0.001) with parameter groups 32 weight(decay=0.0), 33 weight(decay=5e-05), 33 bias\n", + "Image sizes 224 train, 224 test\n", + "Using 1 dataloader workers\n", + "Logging results to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Starting yolov5s-cls.pt training on imagenette160 dataset with 10 classes for 5 epochs...\n", + "\n", + " Epoch GPU_mem train_loss val_loss top1_acc top5_acc\n", + " 1/5 1.47G 1.05 0.974 0.828 0.975: 100% 148/148 [00:38<00:00, 3.82it/s]\n", + " 2/5 1.73G 0.895 0.766 0.911 0.994: 100% 148/148 [00:36<00:00, 4.03it/s]\n", + " 3/5 1.73G 0.82 0.704 0.934 0.996: 100% 148/148 [00:35<00:00, 4.20it/s]\n", + " 4/5 1.73G 0.766 0.664 0.951 0.998: 100% 148/148 [00:36<00:00, 4.05it/s]\n", + " 5/5 1.73G 0.724 0.634 0.959 0.997: 100% 148/148 [00:37<00:00, 3.94it/s]\n", + "\n", + "Training complete (0.052 hours)\n", + "Results saved to \u001b[1mruns/train-cls/exp\u001b[0m\n", + "Predict: python classify/predict.py --weights runs/train-cls/exp/weights/best.pt --source im.jpg\n", + "Validate: python classify/val.py --weights runs/train-cls/exp/weights/best.pt --data /content/datasets/imagenette160\n", + "Export: python export.py --weights runs/train-cls/exp/weights/best.pt --include onnx\n", + "PyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', 'runs/train-cls/exp/weights/best.pt')\n", + "Visualize: https://netron.app\n", + "\n" + ] + } + ], + "source": [ + "# Train YOLOv5s Classification on Imagenette160 for 3 epochs\n", + "!python classify/train.py --model yolov5s-cls.pt --data imagenette160 --epochs 5 --img 224 --cache" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "15glLzbQx5u0" + }, + "source": [ + "# 4. Visualize" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nWOsI5wJR1o3" + }, + "source": [ + "## Comet Logging and Visualization 🌟 NEW\n", + "\n", + "[Comet](https://www.comet.com/site/lp/yolov5-with-comet/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab) is now fully integrated with YOLOv5. Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab)! Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes!\n", + "\n", + "Getting started is easy:\n", + "```shell\n", + "pip install comet_ml # 1. install\n", + "export COMET_API_KEY= # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\n", + "\"Comet" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "YOLOv5 Classification Tutorial", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/src/train_utils/train_models/models/yolov5/classify/val.py b/src/train_utils/train_models/models/yolov5/classify/val.py new file mode 100644 index 0000000..4edd5a1 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/classify/val.py @@ -0,0 +1,170 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a trained YOLOv5 classification model on a classification dataset + +Usage: + $ bash data/scripts/get_imagenet.sh --val # download ImageNet val split (6.3G, 50000 images) + $ python classify/val.py --weights yolov5m-cls.pt --data ../datasets/imagenet --img 224 # validate ImageNet + +Usage - formats: + $ python classify/val.py --weights yolov5s-cls.pt # PyTorch + yolov5s-cls.torchscript # TorchScript + yolov5s-cls.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-cls_openvino_model # OpenVINO + yolov5s-cls.engine # TensorRT + yolov5s-cls.mlmodel # CoreML (macOS-only) + yolov5s-cls_saved_model # TensorFlow SavedModel + yolov5s-cls.pb # TensorFlow GraphDef + yolov5s-cls.tflite # TensorFlow Lite + yolov5s-cls_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-cls_paddle_model # PaddlePaddle +""" + +import argparse +import os +import sys +from pathlib import Path + +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import create_classification_dataloader +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_img_size, check_requirements, colorstr, + increment_path, print_args) +from utils.torch_utils import select_device, smart_inference_mode + + +@smart_inference_mode() +def run( + data=ROOT / '../datasets/mnist', # dataset dir + weights=ROOT / 'yolov5s-cls.pt', # model.pt path(s) + batch_size=128, # batch size + imgsz=224, # inference size (pixels) + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + verbose=False, # verbose output + project=ROOT / 'runs/val-cls', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + criterion=None, + pbar=None, +): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + save_dir.mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Dataloader + data = Path(data) + test_dir = data / 'test' if (data / 'test').exists() else data / 'val' # data/test or data/val + dataloader = create_classification_dataloader(path=test_dir, + imgsz=imgsz, + batch_size=batch_size, + augment=False, + rank=-1, + workers=workers) + + model.eval() + pred, targets, loss, dt = [], [], 0, (Profile(), Profile(), Profile()) + n = len(dataloader) # number of batches + action = 'validating' if dataloader.dataset.root.stem == 'val' else 'testing' + desc = f'{pbar.desc[:-36]}{action:>36}' if pbar else f'{action}' + bar = tqdm(dataloader, desc, n, not training, bar_format=TQDM_BAR_FORMAT, position=0) + with torch.cuda.amp.autocast(enabled=device.type != 'cpu'): + for images, labels in bar: + with dt[0]: + images, labels = images.to(device, non_blocking=True), labels.to(device) + + with dt[1]: + y = model(images) + + with dt[2]: + pred.append(y.argsort(1, descending=True)[:, :5]) + targets.append(labels) + if criterion: + loss += criterion(y, labels) + + loss /= n + pred, targets = torch.cat(pred), torch.cat(targets) + correct = (targets[:, None] == pred).float() + acc = torch.stack((correct[:, 0], correct.max(1).values), dim=1) # (top1, top5) accuracy + top1, top5 = acc.mean(0).tolist() + + if pbar: + pbar.desc = f'{pbar.desc[:-36]}{loss:>12.3g}{top1:>12.3g}{top5:>12.3g}' + if verbose: # all classes + LOGGER.info(f"{'Class':>24}{'Images':>12}{'top1_acc':>12}{'top5_acc':>12}") + LOGGER.info(f"{'all':>24}{targets.shape[0]:>12}{top1:>12.3g}{top5:>12.3g}") + for i, c in model.names.items(): + acc_i = acc[targets == i] + top1i, top5i = acc_i.mean(0).tolist() + LOGGER.info(f'{c:>24}{acc_i.shape[0]:>12}{top1i:>12.3g}{top5i:>12.3g}') + + # Print results + t = tuple(x.t / len(dataloader.dataset.samples) * 1E3 for x in dt) # speeds per image + shape = (1, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms post-process per image at shape {shape}' % t) + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + + return top1, top5, loss + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / '../datasets/mnist', help='dataset path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-cls.pt', help='model.pt path(s)') + parser.add_argument('--batch-size', type=int, default=128, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=224, help='inference size (pixels)') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--verbose', nargs='?', const=True, default=True, help='verbose output') + parser.add_argument('--project', default=ROOT / 'runs/val-cls', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/data/Argoverse.yaml b/src/train_utils/train_models/models/yolov5/data/Argoverse.yaml new file mode 100644 index 0000000..558151d --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/Argoverse.yaml @@ -0,0 +1,74 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ by Argo AI +# Example usage: python train.py --data Argoverse.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Argoverse ← downloads here (31.3 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Argoverse # dataset root dir +train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images +val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images +test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: bus + 5: truck + 6: traffic_light + 7: stop_sign + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import json + + from tqdm import tqdm + from utils.general import download, Path + + + def argoverse2yolo(set): + labels = {} + a = json.load(open(set, "rb")) + for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."): + img_id = annot['image_id'] + img_name = a['images'][img_id]['name'] + img_label_name = f'{img_name[:-3]}txt' + + cls = annot['category_id'] # instance class id + x_center, y_center, width, height = annot['bbox'] + x_center = (x_center + width / 2) / 1920.0 # offset and scale + y_center = (y_center + height / 2) / 1200.0 # offset and scale + width /= 1920.0 # scale + height /= 1200.0 # scale + + img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']] + if not img_dir.exists(): + img_dir.mkdir(parents=True, exist_ok=True) + + k = str(img_dir / img_label_name) + if k not in labels: + labels[k] = [] + labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n") + + for k in labels: + with open(k, "w") as f: + f.writelines(labels[k]) + + + # Download + dir = Path(yaml['path']) # dataset root dir + urls = ['https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip'] + download(urls, dir=dir, delete=False) + + # Convert + annotations_dir = 'Argoverse-HD/annotations/' + (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images' + for d in "train.json", "val.json": + argoverse2yolo(dir / annotations_dir / d) # convert VisDrone annotations to YOLO labels diff --git a/src/train_utils/train_models/models/yolov5/data/GlobalWheat2020.yaml b/src/train_utils/train_models/models/yolov5/data/GlobalWheat2020.yaml new file mode 100644 index 0000000..01812d0 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/GlobalWheat2020.yaml @@ -0,0 +1,54 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Global Wheat 2020 dataset http://www.global-wheat.com/ by University of Saskatchewan +# Example usage: python train.py --data GlobalWheat2020.yaml +# parent +# ├── yolov5 +# └── datasets +# └── GlobalWheat2020 ← downloads here (7.0 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/GlobalWheat2020 # dataset root dir +train: # train images (relative to 'path') 3422 images + - images/arvalis_1 + - images/arvalis_2 + - images/arvalis_3 + - images/ethz_1 + - images/rres_1 + - images/inrae_1 + - images/usask_1 +val: # val images (relative to 'path') 748 images (WARNING: train set contains ethz_1) + - images/ethz_1 +test: # test images (optional) 1276 images + - images/utokyo_1 + - images/utokyo_2 + - images/nau_1 + - images/uq_1 + +# Classes +names: + 0: wheat_head + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + from utils.general import download, Path + + + # Download + dir = Path(yaml['path']) # dataset root dir + urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip'] + download(urls, dir=dir) + + # Make Directories + for p in 'annotations', 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + + # Move + for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \ + 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1': + (dir / p).rename(dir / 'images' / p) # move to /images + f = (dir / p).with_suffix('.json') # json file + if f.exists(): + f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations diff --git a/src/train_utils/train_models/models/yolov5/data/ImageNet.yaml b/src/train_utils/train_models/models/yolov5/data/ImageNet.yaml new file mode 100644 index 0000000..14f1295 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/ImageNet.yaml @@ -0,0 +1,1022 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# ImageNet-1k dataset https://www.image-net.org/index.php by Stanford University +# Simplified class names from https://github.com/anishathalye/imagenet-simple-labels +# Example usage: python classify/train.py --data imagenet +# parent +# ├── yolov5 +# └── datasets +# └── imagenet ← downloads here (144 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/imagenet # dataset root dir +train: train # train images (relative to 'path') 1281167 images +val: val # val images (relative to 'path') 50000 images +test: # test images (optional) + +# Classes +names: + 0: tench + 1: goldfish + 2: great white shark + 3: tiger shark + 4: hammerhead shark + 5: electric ray + 6: stingray + 7: cock + 8: hen + 9: ostrich + 10: brambling + 11: goldfinch + 12: house finch + 13: junco + 14: indigo bunting + 15: American robin + 16: bulbul + 17: jay + 18: magpie + 19: chickadee + 20: American dipper + 21: kite + 22: bald eagle + 23: vulture + 24: great grey owl + 25: fire salamander + 26: smooth newt + 27: newt + 28: spotted salamander + 29: axolotl + 30: American bullfrog + 31: tree frog + 32: tailed frog + 33: loggerhead sea turtle + 34: leatherback sea turtle + 35: mud turtle + 36: terrapin + 37: box turtle + 38: banded gecko + 39: green iguana + 40: Carolina anole + 41: desert grassland whiptail lizard + 42: agama + 43: frilled-necked lizard + 44: alligator lizard + 45: Gila monster + 46: European green lizard + 47: chameleon + 48: Komodo dragon + 49: Nile crocodile + 50: American alligator + 51: triceratops + 52: worm snake + 53: ring-necked snake + 54: eastern hog-nosed snake + 55: smooth green snake + 56: kingsnake + 57: garter snake + 58: water snake + 59: vine snake + 60: night snake + 61: boa constrictor + 62: African rock python + 63: Indian cobra + 64: green mamba + 65: sea snake + 66: Saharan horned viper + 67: eastern diamondback rattlesnake + 68: sidewinder + 69: trilobite + 70: harvestman + 71: scorpion + 72: yellow garden spider + 73: barn spider + 74: European garden spider + 75: southern black widow + 76: tarantula + 77: wolf spider + 78: tick + 79: centipede + 80: black grouse + 81: ptarmigan + 82: ruffed grouse + 83: prairie grouse + 84: peacock + 85: quail + 86: partridge + 87: grey parrot + 88: macaw + 89: sulphur-crested cockatoo + 90: lorikeet + 91: coucal + 92: bee eater + 93: hornbill + 94: hummingbird + 95: jacamar + 96: toucan + 97: duck + 98: red-breasted merganser + 99: goose + 100: black swan + 101: tusker + 102: echidna + 103: platypus + 104: wallaby + 105: koala + 106: wombat + 107: jellyfish + 108: sea anemone + 109: brain coral + 110: flatworm + 111: nematode + 112: conch + 113: snail + 114: slug + 115: sea slug + 116: chiton + 117: chambered nautilus + 118: Dungeness crab + 119: rock crab + 120: fiddler crab + 121: red king crab + 122: American lobster + 123: spiny lobster + 124: crayfish + 125: hermit crab + 126: isopod + 127: white stork + 128: black stork + 129: spoonbill + 130: flamingo + 131: little blue heron + 132: great egret + 133: bittern + 134: crane (bird) + 135: limpkin + 136: common gallinule + 137: American coot + 138: bustard + 139: ruddy turnstone + 140: dunlin + 141: common redshank + 142: dowitcher + 143: oystercatcher + 144: pelican + 145: king penguin + 146: albatross + 147: grey whale + 148: killer whale + 149: dugong + 150: sea lion + 151: Chihuahua + 152: Japanese Chin + 153: Maltese + 154: Pekingese + 155: Shih Tzu + 156: King Charles Spaniel + 157: Papillon + 158: toy terrier + 159: Rhodesian Ridgeback + 160: Afghan Hound + 161: Basset Hound + 162: Beagle + 163: Bloodhound + 164: Bluetick Coonhound + 165: Black and Tan Coonhound + 166: Treeing Walker Coonhound + 167: English foxhound + 168: Redbone Coonhound + 169: borzoi + 170: Irish Wolfhound + 171: Italian Greyhound + 172: Whippet + 173: Ibizan Hound + 174: Norwegian Elkhound + 175: Otterhound + 176: Saluki + 177: Scottish Deerhound + 178: Weimaraner + 179: Staffordshire Bull Terrier + 180: American Staffordshire Terrier + 181: Bedlington Terrier + 182: Border Terrier + 183: Kerry Blue Terrier + 184: Irish Terrier + 185: Norfolk Terrier + 186: Norwich Terrier + 187: Yorkshire Terrier + 188: Wire Fox Terrier + 189: Lakeland Terrier + 190: Sealyham Terrier + 191: Airedale Terrier + 192: Cairn Terrier + 193: Australian Terrier + 194: Dandie Dinmont Terrier + 195: Boston Terrier + 196: Miniature Schnauzer + 197: Giant Schnauzer + 198: Standard Schnauzer + 199: Scottish Terrier + 200: Tibetan Terrier + 201: Australian Silky Terrier + 202: Soft-coated Wheaten Terrier + 203: West Highland White Terrier + 204: Lhasa Apso + 205: Flat-Coated Retriever + 206: Curly-coated Retriever + 207: Golden Retriever + 208: Labrador Retriever + 209: Chesapeake Bay Retriever + 210: German Shorthaired Pointer + 211: Vizsla + 212: English Setter + 213: Irish Setter + 214: Gordon Setter + 215: Brittany + 216: Clumber Spaniel + 217: English Springer Spaniel + 218: Welsh Springer Spaniel + 219: Cocker Spaniels + 220: Sussex Spaniel + 221: Irish Water Spaniel + 222: Kuvasz + 223: Schipperke + 224: Groenendael + 225: Malinois + 226: Briard + 227: Australian Kelpie + 228: Komondor + 229: Old English Sheepdog + 230: Shetland Sheepdog + 231: collie + 232: Border Collie + 233: Bouvier des Flandres + 234: Rottweiler + 235: German Shepherd Dog + 236: Dobermann + 237: Miniature Pinscher + 238: Greater Swiss Mountain Dog + 239: Bernese Mountain Dog + 240: Appenzeller Sennenhund + 241: Entlebucher Sennenhund + 242: Boxer + 243: Bullmastiff + 244: Tibetan Mastiff + 245: French Bulldog + 246: Great Dane + 247: St. Bernard + 248: husky + 249: Alaskan Malamute + 250: Siberian Husky + 251: Dalmatian + 252: Affenpinscher + 253: Basenji + 254: pug + 255: Leonberger + 256: Newfoundland + 257: Pyrenean Mountain Dog + 258: Samoyed + 259: Pomeranian + 260: Chow Chow + 261: Keeshond + 262: Griffon Bruxellois + 263: Pembroke Welsh Corgi + 264: Cardigan Welsh Corgi + 265: Toy Poodle + 266: Miniature Poodle + 267: Standard Poodle + 268: Mexican hairless dog + 269: grey wolf + 270: Alaskan tundra wolf + 271: red wolf + 272: coyote + 273: dingo + 274: dhole + 275: African wild dog + 276: hyena + 277: red fox + 278: kit fox + 279: Arctic fox + 280: grey fox + 281: tabby cat + 282: tiger cat + 283: Persian cat + 284: Siamese cat + 285: Egyptian Mau + 286: cougar + 287: lynx + 288: leopard + 289: snow leopard + 290: jaguar + 291: lion + 292: tiger + 293: cheetah + 294: brown bear + 295: American black bear + 296: polar bear + 297: sloth bear + 298: mongoose + 299: meerkat + 300: tiger beetle + 301: ladybug + 302: ground beetle + 303: longhorn beetle + 304: leaf beetle + 305: dung beetle + 306: rhinoceros beetle + 307: weevil + 308: fly + 309: bee + 310: ant + 311: grasshopper + 312: cricket + 313: stick insect + 314: cockroach + 315: mantis + 316: cicada + 317: leafhopper + 318: lacewing + 319: dragonfly + 320: damselfly + 321: red admiral + 322: ringlet + 323: monarch butterfly + 324: small white + 325: sulphur butterfly + 326: gossamer-winged butterfly + 327: starfish + 328: sea urchin + 329: sea cucumber + 330: cottontail rabbit + 331: hare + 332: Angora rabbit + 333: hamster + 334: porcupine + 335: fox squirrel + 336: marmot + 337: beaver + 338: guinea pig + 339: common sorrel + 340: zebra + 341: pig + 342: wild boar + 343: warthog + 344: hippopotamus + 345: ox + 346: water buffalo + 347: bison + 348: ram + 349: bighorn sheep + 350: Alpine ibex + 351: hartebeest + 352: impala + 353: gazelle + 354: dromedary + 355: llama + 356: weasel + 357: mink + 358: European polecat + 359: black-footed ferret + 360: otter + 361: skunk + 362: badger + 363: armadillo + 364: three-toed sloth + 365: orangutan + 366: gorilla + 367: chimpanzee + 368: gibbon + 369: siamang + 370: guenon + 371: patas monkey + 372: baboon + 373: macaque + 374: langur + 375: black-and-white colobus + 376: proboscis monkey + 377: marmoset + 378: white-headed capuchin + 379: howler monkey + 380: titi + 381: Geoffroy's spider monkey + 382: common squirrel monkey + 383: ring-tailed lemur + 384: indri + 385: Asian elephant + 386: African bush elephant + 387: red panda + 388: giant panda + 389: snoek + 390: eel + 391: coho salmon + 392: rock beauty + 393: clownfish + 394: sturgeon + 395: garfish + 396: lionfish + 397: pufferfish + 398: abacus + 399: abaya + 400: academic gown + 401: accordion + 402: acoustic guitar + 403: aircraft carrier + 404: airliner + 405: airship + 406: altar + 407: ambulance + 408: amphibious vehicle + 409: analog clock + 410: apiary + 411: apron + 412: waste container + 413: assault rifle + 414: backpack + 415: bakery + 416: balance beam + 417: balloon + 418: ballpoint pen + 419: Band-Aid + 420: banjo + 421: baluster + 422: barbell + 423: barber chair + 424: barbershop + 425: barn + 426: barometer + 427: barrel + 428: wheelbarrow + 429: baseball + 430: basketball + 431: bassinet + 432: bassoon + 433: swimming cap + 434: bath towel + 435: bathtub + 436: station wagon + 437: lighthouse + 438: beaker + 439: military cap + 440: beer bottle + 441: beer glass + 442: bell-cot + 443: bib + 444: tandem bicycle + 445: bikini + 446: ring binder + 447: binoculars + 448: birdhouse + 449: boathouse + 450: bobsleigh + 451: bolo tie + 452: poke bonnet + 453: bookcase + 454: bookstore + 455: bottle cap + 456: bow + 457: bow tie + 458: brass + 459: bra + 460: breakwater + 461: breastplate + 462: broom + 463: bucket + 464: buckle + 465: bulletproof vest + 466: high-speed train + 467: butcher shop + 468: taxicab + 469: cauldron + 470: candle + 471: cannon + 472: canoe + 473: can opener + 474: cardigan + 475: car mirror + 476: carousel + 477: tool kit + 478: carton + 479: car wheel + 480: automated teller machine + 481: cassette + 482: cassette player + 483: castle + 484: catamaran + 485: CD player + 486: cello + 487: mobile phone + 488: chain + 489: chain-link fence + 490: chain mail + 491: chainsaw + 492: chest + 493: chiffonier + 494: chime + 495: china cabinet + 496: Christmas stocking + 497: church + 498: movie theater + 499: cleaver + 500: cliff dwelling + 501: cloak + 502: clogs + 503: cocktail shaker + 504: coffee mug + 505: coffeemaker + 506: coil + 507: combination lock + 508: computer keyboard + 509: confectionery store + 510: container ship + 511: convertible + 512: corkscrew + 513: cornet + 514: cowboy boot + 515: cowboy hat + 516: cradle + 517: crane (machine) + 518: crash helmet + 519: crate + 520: infant bed + 521: Crock Pot + 522: croquet ball + 523: crutch + 524: cuirass + 525: dam + 526: desk + 527: desktop computer + 528: rotary dial telephone + 529: diaper + 530: digital clock + 531: digital watch + 532: dining table + 533: dishcloth + 534: dishwasher + 535: disc brake + 536: dock + 537: dog sled + 538: dome + 539: doormat + 540: drilling rig + 541: drum + 542: drumstick + 543: dumbbell + 544: Dutch oven + 545: electric fan + 546: electric guitar + 547: electric locomotive + 548: entertainment center + 549: envelope + 550: espresso machine + 551: face powder + 552: feather boa + 553: filing cabinet + 554: fireboat + 555: fire engine + 556: fire screen sheet + 557: flagpole + 558: flute + 559: folding chair + 560: football helmet + 561: forklift + 562: fountain + 563: fountain pen + 564: four-poster bed + 565: freight car + 566: French horn + 567: frying pan + 568: fur coat + 569: garbage truck + 570: gas mask + 571: gas pump + 572: goblet + 573: go-kart + 574: golf ball + 575: golf cart + 576: gondola + 577: gong + 578: gown + 579: grand piano + 580: greenhouse + 581: grille + 582: grocery store + 583: guillotine + 584: barrette + 585: hair spray + 586: half-track + 587: hammer + 588: hamper + 589: hair dryer + 590: hand-held computer + 591: handkerchief + 592: hard disk drive + 593: harmonica + 594: harp + 595: harvester + 596: hatchet + 597: holster + 598: home theater + 599: honeycomb + 600: hook + 601: hoop skirt + 602: horizontal bar + 603: horse-drawn vehicle + 604: hourglass + 605: iPod + 606: clothes iron + 607: jack-o'-lantern + 608: jeans + 609: jeep + 610: T-shirt + 611: jigsaw puzzle + 612: pulled rickshaw + 613: joystick + 614: kimono + 615: knee pad + 616: knot + 617: lab coat + 618: ladle + 619: lampshade + 620: laptop computer + 621: lawn mower + 622: lens cap + 623: paper knife + 624: library + 625: lifeboat + 626: lighter + 627: limousine + 628: ocean liner + 629: lipstick + 630: slip-on shoe + 631: lotion + 632: speaker + 633: loupe + 634: sawmill + 635: magnetic compass + 636: mail bag + 637: mailbox + 638: tights + 639: tank suit + 640: manhole cover + 641: maraca + 642: marimba + 643: mask + 644: match + 645: maypole + 646: maze + 647: measuring cup + 648: medicine chest + 649: megalith + 650: microphone + 651: microwave oven + 652: military uniform + 653: milk can + 654: minibus + 655: miniskirt + 656: minivan + 657: missile + 658: mitten + 659: mixing bowl + 660: mobile home + 661: Model T + 662: modem + 663: monastery + 664: monitor + 665: moped + 666: mortar + 667: square academic cap + 668: mosque + 669: mosquito net + 670: scooter + 671: mountain bike + 672: tent + 673: computer mouse + 674: mousetrap + 675: moving van + 676: muzzle + 677: nail + 678: neck brace + 679: necklace + 680: nipple + 681: notebook computer + 682: obelisk + 683: oboe + 684: ocarina + 685: odometer + 686: oil filter + 687: organ + 688: oscilloscope + 689: overskirt + 690: bullock cart + 691: oxygen mask + 692: packet + 693: paddle + 694: paddle wheel + 695: padlock + 696: paintbrush + 697: pajamas + 698: palace + 699: pan flute + 700: paper towel + 701: parachute + 702: parallel bars + 703: park bench + 704: parking meter + 705: passenger car + 706: patio + 707: payphone + 708: pedestal + 709: pencil case + 710: pencil sharpener + 711: perfume + 712: Petri dish + 713: photocopier + 714: plectrum + 715: Pickelhaube + 716: picket fence + 717: pickup truck + 718: pier + 719: piggy bank + 720: pill bottle + 721: pillow + 722: ping-pong ball + 723: pinwheel + 724: pirate ship + 725: pitcher + 726: hand plane + 727: planetarium + 728: plastic bag + 729: plate rack + 730: plow + 731: plunger + 732: Polaroid camera + 733: pole + 734: police van + 735: poncho + 736: billiard table + 737: soda bottle + 738: pot + 739: potter's wheel + 740: power drill + 741: prayer rug + 742: printer + 743: prison + 744: projectile + 745: projector + 746: hockey puck + 747: punching bag + 748: purse + 749: quill + 750: quilt + 751: race car + 752: racket + 753: radiator + 754: radio + 755: radio telescope + 756: rain barrel + 757: recreational vehicle + 758: reel + 759: reflex camera + 760: refrigerator + 761: remote control + 762: restaurant + 763: revolver + 764: rifle + 765: rocking chair + 766: rotisserie + 767: eraser + 768: rugby ball + 769: ruler + 770: running shoe + 771: safe + 772: safety pin + 773: salt shaker + 774: sandal + 775: sarong + 776: saxophone + 777: scabbard + 778: weighing scale + 779: school bus + 780: schooner + 781: scoreboard + 782: CRT screen + 783: screw + 784: screwdriver + 785: seat belt + 786: sewing machine + 787: shield + 788: shoe store + 789: shoji + 790: shopping basket + 791: shopping cart + 792: shovel + 793: shower cap + 794: shower curtain + 795: ski + 796: ski mask + 797: sleeping bag + 798: slide rule + 799: sliding door + 800: slot machine + 801: snorkel + 802: snowmobile + 803: snowplow + 804: soap dispenser + 805: soccer ball + 806: sock + 807: solar thermal collector + 808: sombrero + 809: soup bowl + 810: space bar + 811: space heater + 812: space shuttle + 813: spatula + 814: motorboat + 815: spider web + 816: spindle + 817: sports car + 818: spotlight + 819: stage + 820: steam locomotive + 821: through arch bridge + 822: steel drum + 823: stethoscope + 824: scarf + 825: stone wall + 826: stopwatch + 827: stove + 828: strainer + 829: tram + 830: stretcher + 831: couch + 832: stupa + 833: submarine + 834: suit + 835: sundial + 836: sunglass + 837: sunglasses + 838: sunscreen + 839: suspension bridge + 840: mop + 841: sweatshirt + 842: swimsuit + 843: swing + 844: switch + 845: syringe + 846: table lamp + 847: tank + 848: tape player + 849: teapot + 850: teddy bear + 851: television + 852: tennis ball + 853: thatched roof + 854: front curtain + 855: thimble + 856: threshing machine + 857: throne + 858: tile roof + 859: toaster + 860: tobacco shop + 861: toilet seat + 862: torch + 863: totem pole + 864: tow truck + 865: toy store + 866: tractor + 867: semi-trailer truck + 868: tray + 869: trench coat + 870: tricycle + 871: trimaran + 872: tripod + 873: triumphal arch + 874: trolleybus + 875: trombone + 876: tub + 877: turnstile + 878: typewriter keyboard + 879: umbrella + 880: unicycle + 881: upright piano + 882: vacuum cleaner + 883: vase + 884: vault + 885: velvet + 886: vending machine + 887: vestment + 888: viaduct + 889: violin + 890: volleyball + 891: waffle iron + 892: wall clock + 893: wallet + 894: wardrobe + 895: military aircraft + 896: sink + 897: washing machine + 898: water bottle + 899: water jug + 900: water tower + 901: whiskey jug + 902: whistle + 903: wig + 904: window screen + 905: window shade + 906: Windsor tie + 907: wine bottle + 908: wing + 909: wok + 910: wooden spoon + 911: wool + 912: split-rail fence + 913: shipwreck + 914: yawl + 915: yurt + 916: website + 917: comic book + 918: crossword + 919: traffic sign + 920: traffic light + 921: dust jacket + 922: menu + 923: plate + 924: guacamole + 925: consomme + 926: hot pot + 927: trifle + 928: ice cream + 929: ice pop + 930: baguette + 931: bagel + 932: pretzel + 933: cheeseburger + 934: hot dog + 935: mashed potato + 936: cabbage + 937: broccoli + 938: cauliflower + 939: zucchini + 940: spaghetti squash + 941: acorn squash + 942: butternut squash + 943: cucumber + 944: artichoke + 945: bell pepper + 946: cardoon + 947: mushroom + 948: Granny Smith + 949: strawberry + 950: orange + 951: lemon + 952: fig + 953: pineapple + 954: banana + 955: jackfruit + 956: custard apple + 957: pomegranate + 958: hay + 959: carbonara + 960: chocolate syrup + 961: dough + 962: meatloaf + 963: pizza + 964: pot pie + 965: burrito + 966: red wine + 967: espresso + 968: cup + 969: eggnog + 970: alp + 971: bubble + 972: cliff + 973: coral reef + 974: geyser + 975: lakeshore + 976: promontory + 977: shoal + 978: seashore + 979: valley + 980: volcano + 981: baseball player + 982: bridegroom + 983: scuba diver + 984: rapeseed + 985: daisy + 986: yellow lady's slipper + 987: corn + 988: acorn + 989: rose hip + 990: horse chestnut seed + 991: coral fungus + 992: agaric + 993: gyromitra + 994: stinkhorn mushroom + 995: earth star + 996: hen-of-the-woods + 997: bolete + 998: ear + 999: toilet paper + + +# Download script/URL (optional) +download: data/scripts/get_imagenet.sh diff --git a/src/train_utils/train_models/models/yolov5/data/Objects365.yaml b/src/train_utils/train_models/models/yolov5/data/Objects365.yaml new file mode 100644 index 0000000..05b26a1 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/Objects365.yaml @@ -0,0 +1,438 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Objects365 dataset https://www.objects365.org/ by Megvii +# Example usage: python train.py --data Objects365.yaml +# parent +# ├── yolov5 +# └── datasets +# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/Objects365 # dataset root dir +train: images/train # train images (relative to 'path') 1742289 images +val: images/val # val images (relative to 'path') 80000 images +test: # test images (optional) + +# Classes +names: + 0: Person + 1: Sneakers + 2: Chair + 3: Other Shoes + 4: Hat + 5: Car + 6: Lamp + 7: Glasses + 8: Bottle + 9: Desk + 10: Cup + 11: Street Lights + 12: Cabinet/shelf + 13: Handbag/Satchel + 14: Bracelet + 15: Plate + 16: Picture/Frame + 17: Helmet + 18: Book + 19: Gloves + 20: Storage box + 21: Boat + 22: Leather Shoes + 23: Flower + 24: Bench + 25: Potted Plant + 26: Bowl/Basin + 27: Flag + 28: Pillow + 29: Boots + 30: Vase + 31: Microphone + 32: Necklace + 33: Ring + 34: SUV + 35: Wine Glass + 36: Belt + 37: Monitor/TV + 38: Backpack + 39: Umbrella + 40: Traffic Light + 41: Speaker + 42: Watch + 43: Tie + 44: Trash bin Can + 45: Slippers + 46: Bicycle + 47: Stool + 48: Barrel/bucket + 49: Van + 50: Couch + 51: Sandals + 52: Basket + 53: Drum + 54: Pen/Pencil + 55: Bus + 56: Wild Bird + 57: High Heels + 58: Motorcycle + 59: Guitar + 60: Carpet + 61: Cell Phone + 62: Bread + 63: Camera + 64: Canned + 65: Truck + 66: Traffic cone + 67: Cymbal + 68: Lifesaver + 69: Towel + 70: Stuffed Toy + 71: Candle + 72: Sailboat + 73: Laptop + 74: Awning + 75: Bed + 76: Faucet + 77: Tent + 78: Horse + 79: Mirror + 80: Power outlet + 81: Sink + 82: Apple + 83: Air Conditioner + 84: Knife + 85: Hockey Stick + 86: Paddle + 87: Pickup Truck + 88: Fork + 89: Traffic Sign + 90: Balloon + 91: Tripod + 92: Dog + 93: Spoon + 94: Clock + 95: Pot + 96: Cow + 97: Cake + 98: Dinning Table + 99: Sheep + 100: Hanger + 101: Blackboard/Whiteboard + 102: Napkin + 103: Other Fish + 104: Orange/Tangerine + 105: Toiletry + 106: Keyboard + 107: Tomato + 108: Lantern + 109: Machinery Vehicle + 110: Fan + 111: Green Vegetables + 112: Banana + 113: Baseball Glove + 114: Airplane + 115: Mouse + 116: Train + 117: Pumpkin + 118: Soccer + 119: Skiboard + 120: Luggage + 121: Nightstand + 122: Tea pot + 123: Telephone + 124: Trolley + 125: Head Phone + 126: Sports Car + 127: Stop Sign + 128: Dessert + 129: Scooter + 130: Stroller + 131: Crane + 132: Remote + 133: Refrigerator + 134: Oven + 135: Lemon + 136: Duck + 137: Baseball Bat + 138: Surveillance Camera + 139: Cat + 140: Jug + 141: Broccoli + 142: Piano + 143: Pizza + 144: Elephant + 145: Skateboard + 146: Surfboard + 147: Gun + 148: Skating and Skiing shoes + 149: Gas stove + 150: Donut + 151: Bow Tie + 152: Carrot + 153: Toilet + 154: Kite + 155: Strawberry + 156: Other Balls + 157: Shovel + 158: Pepper + 159: Computer Box + 160: Toilet Paper + 161: Cleaning Products + 162: Chopsticks + 163: Microwave + 164: Pigeon + 165: Baseball + 166: Cutting/chopping Board + 167: Coffee Table + 168: Side Table + 169: Scissors + 170: Marker + 171: Pie + 172: Ladder + 173: Snowboard + 174: Cookies + 175: Radiator + 176: Fire Hydrant + 177: Basketball + 178: Zebra + 179: Grape + 180: Giraffe + 181: Potato + 182: Sausage + 183: Tricycle + 184: Violin + 185: Egg + 186: Fire Extinguisher + 187: Candy + 188: Fire Truck + 189: Billiards + 190: Converter + 191: Bathtub + 192: Wheelchair + 193: Golf Club + 194: Briefcase + 195: Cucumber + 196: Cigar/Cigarette + 197: Paint Brush + 198: Pear + 199: Heavy Truck + 200: Hamburger + 201: Extractor + 202: Extension Cord + 203: Tong + 204: Tennis Racket + 205: Folder + 206: American Football + 207: earphone + 208: Mask + 209: Kettle + 210: Tennis + 211: Ship + 212: Swing + 213: Coffee Machine + 214: Slide + 215: Carriage + 216: Onion + 217: Green beans + 218: Projector + 219: Frisbee + 220: Washing Machine/Drying Machine + 221: Chicken + 222: Printer + 223: Watermelon + 224: Saxophone + 225: Tissue + 226: Toothbrush + 227: Ice cream + 228: Hot-air balloon + 229: Cello + 230: French Fries + 231: Scale + 232: Trophy + 233: Cabbage + 234: Hot dog + 235: Blender + 236: Peach + 237: Rice + 238: Wallet/Purse + 239: Volleyball + 240: Deer + 241: Goose + 242: Tape + 243: Tablet + 244: Cosmetics + 245: Trumpet + 246: Pineapple + 247: Golf Ball + 248: Ambulance + 249: Parking meter + 250: Mango + 251: Key + 252: Hurdle + 253: Fishing Rod + 254: Medal + 255: Flute + 256: Brush + 257: Penguin + 258: Megaphone + 259: Corn + 260: Lettuce + 261: Garlic + 262: Swan + 263: Helicopter + 264: Green Onion + 265: Sandwich + 266: Nuts + 267: Speed Limit Sign + 268: Induction Cooker + 269: Broom + 270: Trombone + 271: Plum + 272: Rickshaw + 273: Goldfish + 274: Kiwi fruit + 275: Router/modem + 276: Poker Card + 277: Toaster + 278: Shrimp + 279: Sushi + 280: Cheese + 281: Notepaper + 282: Cherry + 283: Pliers + 284: CD + 285: Pasta + 286: Hammer + 287: Cue + 288: Avocado + 289: Hamimelon + 290: Flask + 291: Mushroom + 292: Screwdriver + 293: Soap + 294: Recorder + 295: Bear + 296: Eggplant + 297: Board Eraser + 298: Coconut + 299: Tape Measure/Ruler + 300: Pig + 301: Showerhead + 302: Globe + 303: Chips + 304: Steak + 305: Crosswalk Sign + 306: Stapler + 307: Camel + 308: Formula 1 + 309: Pomegranate + 310: Dishwasher + 311: Crab + 312: Hoverboard + 313: Meat ball + 314: Rice Cooker + 315: Tuba + 316: Calculator + 317: Papaya + 318: Antelope + 319: Parrot + 320: Seal + 321: Butterfly + 322: Dumbbell + 323: Donkey + 324: Lion + 325: Urinal + 326: Dolphin + 327: Electric Drill + 328: Hair Dryer + 329: Egg tart + 330: Jellyfish + 331: Treadmill + 332: Lighter + 333: Grapefruit + 334: Game board + 335: Mop + 336: Radish + 337: Baozi + 338: Target + 339: French + 340: Spring Rolls + 341: Monkey + 342: Rabbit + 343: Pencil Case + 344: Yak + 345: Red Cabbage + 346: Binoculars + 347: Asparagus + 348: Barbell + 349: Scallop + 350: Noddles + 351: Comb + 352: Dumpling + 353: Oyster + 354: Table Tennis paddle + 355: Cosmetics Brush/Eyeliner Pencil + 356: Chainsaw + 357: Eraser + 358: Lobster + 359: Durian + 360: Okra + 361: Lipstick + 362: Cosmetics Mirror + 363: Curling + 364: Table Tennis + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + from tqdm import tqdm + + from utils.general import Path, check_requirements, download, np, xyxy2xywhn + + check_requirements(('pycocotools>=2.0',)) + from pycocotools.coco import COCO + + # Make Directories + dir = Path(yaml['path']) # dataset root dir + for p in 'images', 'labels': + (dir / p).mkdir(parents=True, exist_ok=True) + for q in 'train', 'val': + (dir / p / q).mkdir(parents=True, exist_ok=True) + + # Train, Val Splits + for split, patches in [('train', 50 + 1), ('val', 43 + 1)]: + print(f"Processing {split} in {patches} patches ...") + images, labels = dir / 'images' / split, dir / 'labels' / split + + # Download + url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/" + if split == 'train': + download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir, delete=False) # annotations json + download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, delete=False, threads=8) + elif split == 'val': + download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir, delete=False) # annotations json + download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, delete=False, threads=8) + download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, delete=False, threads=8) + + # Move + for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'): + f.rename(images / f.name) # move to /images/{split} + + # Labels + coco = COCO(dir / f'zhiyuan_objv2_{split}.json') + names = [x["name"] for x in coco.loadCats(coco.getCatIds())] + for cid, cat in enumerate(names): + catIds = coco.getCatIds(catNms=[cat]) + imgIds = coco.getImgIds(catIds=catIds) + for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'): + width, height = im["width"], im["height"] + path = Path(im["file_name"]) # image filename + try: + with open(labels / path.with_suffix('.txt').name, 'a') as file: + annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None) + for a in coco.loadAnns(annIds): + x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner) + xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4) + x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped + file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n") + except Exception as e: + print(e) diff --git a/src/train_utils/train_models/models/yolov5/data/SKU-110K.yaml b/src/train_utils/train_models/models/yolov5/data/SKU-110K.yaml new file mode 100644 index 0000000..edae717 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/SKU-110K.yaml @@ -0,0 +1,53 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19 by Trax Retail +# Example usage: python train.py --data SKU-110K.yaml +# parent +# ├── yolov5 +# └── datasets +# └── SKU-110K ← downloads here (13.6 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/SKU-110K # dataset root dir +train: train.txt # train images (relative to 'path') 8219 images +val: val.txt # val images (relative to 'path') 588 images +test: test.txt # test images (optional) 2936 images + +# Classes +names: + 0: object + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import shutil + from tqdm import tqdm + from utils.general import np, pd, Path, download, xyxy2xywh + + + # Download + dir = Path(yaml['path']) # dataset root dir + parent = Path(dir.parent) # download dir + urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz'] + download(urls, dir=parent, delete=False) + + # Rename directories + if dir.exists(): + shutil.rmtree(dir) + (parent / 'SKU110K_fixed').rename(dir) # rename dir + (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir + + # Convert labels + names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names + for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv': + x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations + images, unique_images = x[:, 0], np.unique(x[:, 0]) + with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f: + f.writelines(f'./images/{s}\n' for s in unique_images) + for im in tqdm(unique_images, desc=f'Converting {dir / d}'): + cls = 0 # single-class dataset + with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f: + for r in x[images == im]: + w, h = r[6], r[7] # image width, height + xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance + f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label diff --git a/src/train_utils/train_models/models/yolov5/data/VOC.yaml b/src/train_utils/train_models/models/yolov5/data/VOC.yaml new file mode 100644 index 0000000..27d3810 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/VOC.yaml @@ -0,0 +1,100 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC by University of Oxford +# Example usage: python train.py --data VOC.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VOC ← downloads here (2.8 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VOC +train: # train images (relative to 'path') 16551 images + - images/train2012 + - images/train2007 + - images/val2012 + - images/val2007 +val: # val images (relative to 'path') 4952 images + - images/test2007 +test: # test images (optional) + - images/test2007 + +# Classes +names: + 0: aeroplane + 1: bicycle + 2: bird + 3: boat + 4: bottle + 5: bus + 6: car + 7: cat + 8: chair + 9: cow + 10: diningtable + 11: dog + 12: horse + 13: motorbike + 14: person + 15: pottedplant + 16: sheep + 17: sofa + 18: train + 19: tvmonitor + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + import xml.etree.ElementTree as ET + + from tqdm import tqdm + from utils.general import download, Path + + + def convert_label(path, lb_path, year, image_id): + def convert_box(size, box): + dw, dh = 1. / size[0], 1. / size[1] + x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2] + return x * dw, y * dh, w * dw, h * dh + + in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml') + out_file = open(lb_path, 'w') + tree = ET.parse(in_file) + root = tree.getroot() + size = root.find('size') + w = int(size.find('width').text) + h = int(size.find('height').text) + + names = list(yaml['names'].values()) # names list + for obj in root.iter('object'): + cls = obj.find('name').text + if cls in names and int(obj.find('difficult').text) != 1: + xmlbox = obj.find('bndbox') + bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')]) + cls_id = names.index(cls) # class id + out_file.write(" ".join([str(a) for a in (cls_id, *bb)]) + '\n') + + + # Download + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images + f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images + f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images + download(urls, dir=dir / 'images', delete=False, curl=True, threads=3) + + # Convert + path = dir / 'images/VOCdevkit' + for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'): + imgs_path = dir / 'images' / f'{image_set}{year}' + lbs_path = dir / 'labels' / f'{image_set}{year}' + imgs_path.mkdir(exist_ok=True, parents=True) + lbs_path.mkdir(exist_ok=True, parents=True) + + with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f: + image_ids = f.read().strip().split() + for id in tqdm(image_ids, desc=f'{image_set}{year}'): + f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path + lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path + f.rename(imgs_path / f.name) # move image + convert_label(path, lb_path, year, id) # convert labels to YOLO format diff --git a/src/train_utils/train_models/models/yolov5/data/VisDrone.yaml b/src/train_utils/train_models/models/yolov5/data/VisDrone.yaml new file mode 100644 index 0000000..a8bcf8e --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/VisDrone.yaml @@ -0,0 +1,70 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset by Tianjin University +# Example usage: python train.py --data VisDrone.yaml +# parent +# ├── yolov5 +# └── datasets +# └── VisDrone ← downloads here (2.3 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/VisDrone # dataset root dir +train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images +val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images +test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images + +# Classes +names: + 0: pedestrian + 1: people + 2: bicycle + 3: car + 4: van + 5: truck + 6: tricycle + 7: awning-tricycle + 8: bus + 9: motor + + +# Download script/URL (optional) --------------------------------------------------------------------------------------- +download: | + from utils.general import download, os, Path + + def visdrone2yolo(dir): + from PIL import Image + from tqdm import tqdm + + def convert_box(size, box): + # Convert VisDrone box to YOLO xywh box + dw = 1. / size[0] + dh = 1. / size[1] + return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh + + (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory + pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}') + for f in pbar: + img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size + lines = [] + with open(f, 'r') as file: # read annotation.txt + for row in [x.split(',') for x in file.read().strip().splitlines()]: + if row[4] == '0': # VisDrone 'ignored regions' class 0 + continue + cls = int(row[5]) - 1 + box = convert_box(img_size, tuple(map(int, row[:4]))) + lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n") + with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl: + fl.writelines(lines) # write label.txt + + + # Download + dir = Path(yaml['path']) # dataset root dir + urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip', + 'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip'] + download(urls, dir=dir, curl=True, threads=4) + + # Convert + for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev': + visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels diff --git a/src/train_utils/train_models/models/yolov5/data/coco.yaml b/src/train_utils/train_models/models/yolov5/data/coco.yaml new file mode 100644 index 0000000..d64dfc7 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/coco.yaml @@ -0,0 +1,116 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO 2017 dataset http://cocodataset.org by Microsoft +# Example usage: python train.py --data coco.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco ← downloads here (20.1 GB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco # dataset root dir +train: train2017.txt # train images (relative to 'path') 118287 images +val: val2017.txt # val images (relative to 'path') 5000 images +test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: | + from utils.general import download, Path + + + # Download labels + segments = False # segment or box labels + dir = Path(yaml['path']) # dataset root dir + url = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/' + urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels + download(urls, dir=dir.parent) + + # Download data + urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images + 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images + 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional) + download(urls, dir=dir / 'images', threads=3) diff --git a/src/train_utils/train_models/models/yolov5/data/coco128-seg.yaml b/src/train_utils/train_models/models/yolov5/data/coco128-seg.yaml new file mode 100644 index 0000000..5e81910 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/coco128-seg.yaml @@ -0,0 +1,101 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO128-seg dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Example usage: python train.py --data coco128.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco128-seg ← downloads here (7 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128-seg # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco128-seg.zip diff --git a/src/train_utils/train_models/models/yolov5/data/coco128.yaml b/src/train_utils/train_models/models/yolov5/data/coco128.yaml new file mode 100644 index 0000000..1255673 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/coco128.yaml @@ -0,0 +1,101 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics +# Example usage: python train.py --data coco128.yaml +# parent +# ├── yolov5 +# └── datasets +# └── coco128 ← downloads here (7 MB) + + +# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..] +path: ../datasets/coco128 # dataset root dir +train: images/train2017 # train images (relative to 'path') 128 images +val: images/train2017 # val images (relative to 'path') 128 images +test: # test images (optional) + +# Classes +names: + 0: person + 1: bicycle + 2: car + 3: motorcycle + 4: airplane + 5: bus + 6: train + 7: truck + 8: boat + 9: traffic light + 10: fire hydrant + 11: stop sign + 12: parking meter + 13: bench + 14: bird + 15: cat + 16: dog + 17: horse + 18: sheep + 19: cow + 20: elephant + 21: bear + 22: zebra + 23: giraffe + 24: backpack + 25: umbrella + 26: handbag + 27: tie + 28: suitcase + 29: frisbee + 30: skis + 31: snowboard + 32: sports ball + 33: kite + 34: baseball bat + 35: baseball glove + 36: skateboard + 37: surfboard + 38: tennis racket + 39: bottle + 40: wine glass + 41: cup + 42: fork + 43: knife + 44: spoon + 45: bowl + 46: banana + 47: apple + 48: sandwich + 49: orange + 50: broccoli + 51: carrot + 52: hot dog + 53: pizza + 54: donut + 55: cake + 56: chair + 57: couch + 58: potted plant + 59: bed + 60: dining table + 61: toilet + 62: tv + 63: laptop + 64: mouse + 65: remote + 66: keyboard + 67: cell phone + 68: microwave + 69: oven + 70: toaster + 71: sink + 72: refrigerator + 73: book + 74: clock + 75: vase + 76: scissors + 77: teddy bear + 78: hair drier + 79: toothbrush + + +# Download script/URL (optional) +download: https://ultralytics.com/assets/coco128.zip diff --git a/src/train_utils/train_models/models/yolov5/data/hyps/hyp.Objects365.yaml b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.Objects365.yaml new file mode 100644 index 0000000..7497174 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.Objects365.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for Objects365 training +# python train.py --weights yolov5m.pt --data Objects365.yaml --evolve +# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.00258 +lrf: 0.17 +momentum: 0.779 +weight_decay: 0.00058 +warmup_epochs: 1.33 +warmup_momentum: 0.86 +warmup_bias_lr: 0.0711 +box: 0.0539 +cls: 0.299 +cls_pw: 0.825 +obj: 0.632 +obj_pw: 1.0 +iou_t: 0.2 +anchor_t: 3.44 +anchors: 3.2 +fl_gamma: 0.0 +hsv_h: 0.0188 +hsv_s: 0.704 +hsv_v: 0.36 +degrees: 0.0 +translate: 0.0902 +scale: 0.491 +shear: 0.0 +perspective: 0.0 +flipud: 0.0 +fliplr: 0.5 +mosaic: 1.0 +mixup: 0.0 +copy_paste: 0.0 diff --git a/src/train_utils/train_models/models/yolov5/data/hyps/hyp.VOC.yaml b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.VOC.yaml new file mode 100644 index 0000000..0aa4e7d --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.VOC.yaml @@ -0,0 +1,40 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for VOC training +# python train.py --batch 128 --weights yolov5m6.pt --data VOC.yaml --epochs 50 --img 512 --hyp hyp.scratch-med.yaml --evolve +# See Hyperparameter Evolution tutorial for details https://github.com/ultralytics/yolov5#tutorials + +# YOLOv5 Hyperparameter Evolution Results +# Best generation: 467 +# Last generation: 996 +# metrics/precision, metrics/recall, metrics/mAP_0.5, metrics/mAP_0.5:0.95, val/box_loss, val/obj_loss, val/cls_loss +# 0.87729, 0.85125, 0.91286, 0.72664, 0.0076739, 0.0042529, 0.0013865 + +lr0: 0.00334 +lrf: 0.15135 +momentum: 0.74832 +weight_decay: 0.00025 +warmup_epochs: 3.3835 +warmup_momentum: 0.59462 +warmup_bias_lr: 0.18657 +box: 0.02 +cls: 0.21638 +cls_pw: 0.5 +obj: 0.51728 +obj_pw: 0.67198 +iou_t: 0.2 +anchor_t: 3.3744 +fl_gamma: 0.0 +hsv_h: 0.01041 +hsv_s: 0.54703 +hsv_v: 0.27739 +degrees: 0.0 +translate: 0.04591 +scale: 0.75544 +shear: 0.0 +perspective: 0.0 +flipud: 0.0 +fliplr: 0.5 +mosaic: 0.85834 +mixup: 0.04266 +copy_paste: 0.0 +anchors: 3.412 diff --git a/src/train_utils/train_models/models/yolov5/data/hyps/hyp.no-augmentation.yaml b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.no-augmentation.yaml new file mode 100644 index 0000000..8fbd5b2 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.no-augmentation.yaml @@ -0,0 +1,35 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters when using Albumentations frameworks +# python train.py --hyp hyp.no-augmentation.yaml +# See https://github.com/ultralytics/yolov5/pull/3882 for YOLOv5 + Albumentations Usage examples + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +# this parameters are all zero since we want to use albumentation framework +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0 # image HSV-Hue augmentation (fraction) +hsv_s: 00 # image HSV-Saturation augmentation (fraction) +hsv_v: 0 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0 # image translation (+/- fraction) +scale: 0 # image scale (+/- gain) +shear: 0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.0 # image flip left-right (probability) +mosaic: 0.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/src/train_utils/train_models/models/yolov5/data/hyps/hyp.scratch-high.yaml b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.scratch-high.yaml new file mode 100644 index 0000000..123cc84 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.scratch-high.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for high-augmentation COCO training from scratch +# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.1 # image mixup (probability) +copy_paste: 0.1 # segment copy-paste (probability) diff --git a/src/train_utils/train_models/models/yolov5/data/hyps/hyp.scratch-low.yaml b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.scratch-low.yaml new file mode 100644 index 0000000..b9ef1d5 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.scratch-low.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for low-augmentation COCO training from scratch +# python train.py --batch 64 --cfg yolov5n6.yaml --weights '' --data coco.yaml --img 640 --epochs 300 --linear +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.5 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 1.0 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/src/train_utils/train_models/models/yolov5/data/hyps/hyp.scratch-med.yaml b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.scratch-med.yaml new file mode 100644 index 0000000..d6867d7 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/hyps/hyp.scratch-med.yaml @@ -0,0 +1,34 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Hyperparameters for medium-augmentation COCO training from scratch +# python train.py --batch 32 --cfg yolov5m6.yaml --weights '' --data coco.yaml --img 1280 --epochs 300 +# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials + +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.1 # image mixup (probability) +copy_paste: 0.0 # segment copy-paste (probability) diff --git a/src/train_utils/train_models/models/yolov5/data/images/bus.jpg b/src/train_utils/train_models/models/yolov5/data/images/bus.jpg new file mode 100644 index 0000000..b43e311 Binary files /dev/null and b/src/train_utils/train_models/models/yolov5/data/images/bus.jpg differ diff --git a/src/train_utils/train_models/models/yolov5/data/images/zidane.jpg b/src/train_utils/train_models/models/yolov5/data/images/zidane.jpg new file mode 100644 index 0000000..92d72ea Binary files /dev/null and b/src/train_utils/train_models/models/yolov5/data/images/zidane.jpg differ diff --git a/src/train_utils/train_models/models/yolov5/data/scripts/download_weights.sh b/src/train_utils/train_models/models/yolov5/data/scripts/download_weights.sh new file mode 100644 index 0000000..31e0a15 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/data/scripts/download_weights.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Download latest models from https://github.com/ultralytics/yolov5/releases +# Example usage: bash data/scripts/download_weights.sh +# parent +# └── yolov5 +# ├── yolov5s.pt ← downloads here +# ├── yolov5m.pt +# └── ... + +python - <= cls >= 0, f'incorrect class index {cls}' + + # Write YOLO label + if id not in shapes: + shapes[id] = Image.open(file).size + box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True) + with open((labels / id).with_suffix('.txt'), 'a') as f: + f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt + except Exception as e: + print(f'WARNING: skipping one label for {file}: {e}') + + + # Download manually from https://challenge.xviewdataset.org + dir = Path(yaml['path']) # dataset root dir + # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels + # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images + # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels) + # download(urls, dir=dir, delete=False) + + # Convert labels + convert_labels(dir / 'xView_train.geojson') + + # Move images + images = Path(dir / 'images') + images.mkdir(parents=True, exist_ok=True) + Path(dir / 'train_images').rename(dir / 'images' / 'train') + Path(dir / 'val_images').rename(dir / 'images' / 'val') + + # Split + autosplit(dir / 'images' / 'train') diff --git a/src/train_utils/train_models/models/yolov5/detect.py b/src/train_utils/train_models/models/yolov5/detect.py new file mode 100644 index 0000000..3f32d7a --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/detect.py @@ -0,0 +1,261 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 detection inference on images, videos, directories, globs, YouTube, webcam, streams, etc. + +Usage - sources: + $ python detect.py --weights yolov5s.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s_openvino_model # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle +""" + +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh) +from utils.plots import Annotator, colors, save_one_box +from utils.torch_utils import select_device, smart_inference_mode + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s.pt', # model path or triton URL + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/detect', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + bs = 1 # batch_size + if webcam: + view_img = check_imshow(warn=True) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt or model.triton else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: + with dt[0]: + im = torch.from_numpy(im).to(model.device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred = model(im, augment=augment, visualize=visualize) + + # NMS + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) + + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Write results + for *xyxy, conf, cls in reversed(det): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(f'{txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + cv2.waitKey(1) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path or triton URL') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/export.py b/src/train_utils/train_models/models/yolov5/export.py new file mode 100644 index 0000000..e167b20 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/export.py @@ -0,0 +1,672 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit + +Format | `export.py --include` | Model +--- | --- | --- +PyTorch | - | yolov5s.pt +TorchScript | `torchscript` | yolov5s.torchscript +ONNX | `onnx` | yolov5s.onnx +OpenVINO | `openvino` | yolov5s_openvino_model/ +TensorRT | `engine` | yolov5s.engine +CoreML | `coreml` | yolov5s.mlmodel +TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/ +TensorFlow GraphDef | `pb` | yolov5s.pb +TensorFlow Lite | `tflite` | yolov5s.tflite +TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite +TensorFlow.js | `tfjs` | yolov5s_web_model/ +PaddlePaddle | `paddle` | yolov5s_paddle_model/ + +Requirements: + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime openvino-dev tensorflow-cpu # CPU + $ pip install -r requirements.txt coremltools onnx onnx-simplifier onnxruntime-gpu openvino-dev tensorflow # GPU + +Usage: + $ python export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ... + +Inference: + $ python detect.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s_openvino_model # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle + +TensorFlow.js: + $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example + $ npm install + $ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model + $ npm start +""" + +import argparse +import contextlib +import json +import os +import platform +import re +import subprocess +import sys +import time +import warnings +from pathlib import Path + +import pandas as pd +import torch +from torch.utils.mobile_optimizer import optimize_for_mobile + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +if platform.system() != 'Windows': + ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.experimental import attempt_load +from models.yolo import ClassificationModel, Detect, DetectionModel, SegmentationModel +from utils.dataloaders import LoadImages +from utils.general import (LOGGER, Profile, check_dataset, check_img_size, check_requirements, check_version, + check_yaml, colorstr, file_size, get_default_args, print_args, url2file, yaml_save) +from utils.torch_utils import select_device, smart_inference_mode + +MACOS = platform.system() == 'Darwin' # macOS environment + + +def export_formats(): + # YOLOv5 export formats + x = [ + ['PyTorch', '-', '.pt', True, True], + ['TorchScript', 'torchscript', '.torchscript', True, True], + ['ONNX', 'onnx', '.onnx', True, True], + ['OpenVINO', 'openvino', '_openvino_model', True, False], + ['TensorRT', 'engine', '.engine', False, True], + ['CoreML', 'coreml', '.mlmodel', True, False], + ['TensorFlow SavedModel', 'saved_model', '_saved_model', True, True], + ['TensorFlow GraphDef', 'pb', '.pb', True, True], + ['TensorFlow Lite', 'tflite', '.tflite', True, False], + ['TensorFlow Edge TPU', 'edgetpu', '_edgetpu.tflite', False, False], + ['TensorFlow.js', 'tfjs', '_web_model', False, False], + ['PaddlePaddle', 'paddle', '_paddle_model', True, True],] + return pd.DataFrame(x, columns=['Format', 'Argument', 'Suffix', 'CPU', 'GPU']) + + +def try_export(inner_func): + # YOLOv5 export decorator, i..e @try_export + inner_args = get_default_args(inner_func) + + def outer_func(*args, **kwargs): + prefix = inner_args['prefix'] + try: + with Profile() as dt: + f, model = inner_func(*args, **kwargs) + LOGGER.info(f'{prefix} export success ✅ {dt.t:.1f}s, saved as {f} ({file_size(f):.1f} MB)') + return f, model + except Exception as e: + LOGGER.info(f'{prefix} export failure ❌ {dt.t:.1f}s: {e}') + return None, None + + return outer_func + + +@try_export +def export_torchscript(model, im, file, optimize, prefix=colorstr('TorchScript:')): + # YOLOv5 TorchScript model export + LOGGER.info(f'\n{prefix} starting export with torch {torch.__version__}...') + f = file.with_suffix('.torchscript') + + ts = torch.jit.trace(model, im, strict=False) + d = {'shape': im.shape, 'stride': int(max(model.stride)), 'names': model.names} + extra_files = {'config.txt': json.dumps(d)} # torch._C.ExtraFilesMap() + if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html + optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files) + else: + ts.save(str(f), _extra_files=extra_files) + return f, None + + +@try_export +def export_onnx(model, im, file, opset, dynamic, simplify, prefix=colorstr('ONNX:')): + # YOLOv5 ONNX export + check_requirements('onnx>=1.12.0') + import onnx + + LOGGER.info(f'\n{prefix} starting export with onnx {onnx.__version__}...') + f = file.with_suffix('.onnx') + + output_names = ['output0', 'output1'] if isinstance(model, SegmentationModel) else ['output0'] + if dynamic: + dynamic = {'images': {0: 'batch', 2: 'height', 3: 'width'}} # shape(1,3,640,640) + if isinstance(model, SegmentationModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + dynamic['output1'] = {0: 'batch', 2: 'mask_height', 3: 'mask_width'} # shape(1,32,160,160) + elif isinstance(model, DetectionModel): + dynamic['output0'] = {0: 'batch', 1: 'anchors'} # shape(1,25200,85) + + torch.onnx.export( + model.cpu() if dynamic else model, # --dynamic only compatible with cpu + im.cpu() if dynamic else im, + f, + verbose=False, + opset_version=opset, + do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False + input_names=['images'], + output_names=output_names, + dynamic_axes=dynamic or None) + + # Checks + model_onnx = onnx.load(f) # load onnx model + onnx.checker.check_model(model_onnx) # check onnx model + + # Metadata + d = {'stride': int(max(model.stride)), 'names': model.names} + for k, v in d.items(): + meta = model_onnx.metadata_props.add() + meta.key, meta.value = k, str(v) + onnx.save(model_onnx, f) + + # Simplify + if simplify: + try: + cuda = torch.cuda.is_available() + check_requirements(('onnxruntime-gpu' if cuda else 'onnxruntime', 'onnx-simplifier>=0.4.1')) + import onnxsim + + LOGGER.info(f'{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...') + model_onnx, check = onnxsim.simplify(model_onnx) + assert check, 'assert check failed' + onnx.save(model_onnx, f) + except Exception as e: + LOGGER.info(f'{prefix} simplifier failure: {e}') + return f, model_onnx + + +@try_export +def export_openvino(file, metadata, half, prefix=colorstr('OpenVINO:')): + # YOLOv5 OpenVINO export + check_requirements('openvino-dev') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + import openvino.inference_engine as ie + + LOGGER.info(f'\n{prefix} starting export with openvino {ie.__version__}...') + f = str(file).replace('.pt', f'_openvino_model{os.sep}') + + args = [ + 'mo', + '--input_model', + str(file.with_suffix('.onnx')), + '--output_dir', + f, + '--data_type', + ('FP16' if half else 'FP32'),] + subprocess.run(args, check=True, env=os.environ) # export + yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml + return f, None + + +@try_export +def export_paddle(model, im, file, metadata, prefix=colorstr('PaddlePaddle:')): + # YOLOv5 Paddle export + check_requirements(('paddlepaddle', 'x2paddle')) + import x2paddle + from x2paddle.convert import pytorch2paddle + + LOGGER.info(f'\n{prefix} starting export with X2Paddle {x2paddle.__version__}...') + f = str(file).replace('.pt', f'_paddle_model{os.sep}') + + pytorch2paddle(module=model, save_dir=f, jit_type='trace', input_examples=[im]) # export + yaml_save(Path(f) / file.with_suffix('.yaml').name, metadata) # add metadata.yaml + return f, None + + +@try_export +def export_coreml(model, im, file, int8, half, prefix=colorstr('CoreML:')): + # YOLOv5 CoreML export + check_requirements('coremltools') + import coremltools as ct + + LOGGER.info(f'\n{prefix} starting export with coremltools {ct.__version__}...') + f = file.with_suffix('.mlmodel') + + ts = torch.jit.trace(model, im, strict=False) # TorchScript model + ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]) + bits, mode = (8, 'kmeans_lut') if int8 else (16, 'linear') if half else (32, None) + if bits < 32: + if MACOS: # quantization only supported on macOS + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=DeprecationWarning) # suppress numpy==1.20 float warning + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + else: + print(f'{prefix} quantization only supported on macOS, skipping...') + ct_model.save(f) + return f, ct_model + + +@try_export +def export_engine(model, im, file, half, dynamic, simplify, workspace=4, verbose=False, prefix=colorstr('TensorRT:')): + # YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt + assert im.device.type != 'cpu', 'export running on CPU but must be on GPU, i.e. `python export.py --device 0`' + try: + import tensorrt as trt + except Exception: + if platform.system() == 'Linux': + check_requirements('nvidia-tensorrt', cmds='-U --index-url https://pypi.ngc.nvidia.com') + import tensorrt as trt + + if trt.__version__[0] == '7': # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012 + grid = model.model[-1].anchor_grid + model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid] + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 + model.model[-1].anchor_grid = grid + else: # TensorRT >= 8 + check_version(trt.__version__, '8.0.0', hard=True) # require tensorrt>=8.0.0 + export_onnx(model, im, file, 12, dynamic, simplify) # opset 12 + onnx = file.with_suffix('.onnx') + + LOGGER.info(f'\n{prefix} starting export with TensorRT {trt.__version__}...') + assert onnx.exists(), f'failed to export ONNX file: {onnx}' + f = file.with_suffix('.engine') # TensorRT engine file + logger = trt.Logger(trt.Logger.INFO) + if verbose: + logger.min_severity = trt.Logger.Severity.VERBOSE + + builder = trt.Builder(logger) + config = builder.create_builder_config() + config.max_workspace_size = workspace * 1 << 30 + # config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace << 30) # fix TRT 8.4 deprecation notice + + flag = (1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)) + network = builder.create_network(flag) + parser = trt.OnnxParser(network, logger) + if not parser.parse_from_file(str(onnx)): + raise RuntimeError(f'failed to load ONNX file: {onnx}') + + inputs = [network.get_input(i) for i in range(network.num_inputs)] + outputs = [network.get_output(i) for i in range(network.num_outputs)] + for inp in inputs: + LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}') + for out in outputs: + LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}') + + if dynamic: + if im.shape[0] <= 1: + LOGGER.warning(f'{prefix} WARNING ⚠️ --dynamic model requires maximum --batch-size argument') + profile = builder.create_optimization_profile() + for inp in inputs: + profile.set_shape(inp.name, (1, *im.shape[1:]), (max(1, im.shape[0] // 2), *im.shape[1:]), im.shape) + config.add_optimization_profile(profile) + + LOGGER.info(f'{prefix} building FP{16 if builder.platform_has_fast_fp16 and half else 32} engine as {f}') + if builder.platform_has_fast_fp16 and half: + config.set_flag(trt.BuilderFlag.FP16) + with builder.build_engine(network, config) as engine, open(f, 'wb') as t: + t.write(engine.serialize()) + return f, None + + +@try_export +def export_saved_model(model, + im, + file, + dynamic, + tf_nms=False, + agnostic_nms=False, + topk_per_class=100, + topk_all=100, + iou_thres=0.45, + conf_thres=0.25, + keras=False, + prefix=colorstr('TensorFlow SavedModel:')): + # YOLOv5 TensorFlow SavedModel export + try: + import tensorflow as tf + except Exception: + check_requirements(f"tensorflow{'' if torch.cuda.is_available() else '-macos' if MACOS else '-cpu'}") + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + from models.tf import TFModel + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = str(file).replace('.pt', '_saved_model') + batch_size, ch, *imgsz = list(im.shape) # BCHW + + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + im = tf.zeros((batch_size, *imgsz, ch)) # BHWC order for TensorFlow + _ = tf_model.predict(im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + inputs = tf.keras.Input(shape=(*imgsz, ch), batch_size=None if dynamic else batch_size) + outputs = tf_model.predict(inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres) + keras_model = tf.keras.Model(inputs=inputs, outputs=outputs) + keras_model.trainable = False + keras_model.summary() + if keras: + keras_model.save(f, save_format='tf') + else: + spec = tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype) + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(spec) + frozen_func = convert_variables_to_constants_v2(m) + tfm = tf.Module() + tfm.__call__ = tf.function(lambda x: frozen_func(x)[:4] if tf_nms else frozen_func(x), [spec]) + tfm.__call__(im) + tf.saved_model.save(tfm, + f, + options=tf.saved_model.SaveOptions(experimental_custom_gradients=False) if check_version( + tf.__version__, '2.6') else tf.saved_model.SaveOptions()) + return f, keras_model + + +@try_export +def export_pb(keras_model, file, prefix=colorstr('TensorFlow GraphDef:')): + # YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow + import tensorflow as tf + from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + f = file.with_suffix('.pb') + + m = tf.function(lambda x: keras_model(x)) # full model + m = m.get_concrete_function(tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)) + frozen_func = convert_variables_to_constants_v2(m) + frozen_func.graph.as_graph_def() + tf.io.write_graph(graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False) + return f, None + + +@try_export +def export_tflite(keras_model, im, file, int8, data, nms, agnostic_nms, prefix=colorstr('TensorFlow Lite:')): + # YOLOv5 TensorFlow Lite export + import tensorflow as tf + + LOGGER.info(f'\n{prefix} starting export with tensorflow {tf.__version__}...') + batch_size, ch, *imgsz = list(im.shape) # BCHW + f = str(file).replace('.pt', '-fp16.tflite') + + converter = tf.lite.TFLiteConverter.from_keras_model(keras_model) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS] + converter.target_spec.supported_types = [tf.float16] + converter.optimizations = [tf.lite.Optimize.DEFAULT] + if int8: + from models.tf import representative_dataset_gen + dataset = LoadImages(check_dataset(check_yaml(data))['train'], img_size=imgsz, auto=False) + converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib=100) + converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] + converter.target_spec.supported_types = [] + converter.inference_input_type = tf.uint8 # or tf.int8 + converter.inference_output_type = tf.uint8 # or tf.int8 + converter.experimental_new_quantizer = True + f = str(file).replace('.pt', '-int8.tflite') + if nms or agnostic_nms: + converter.target_spec.supported_ops.append(tf.lite.OpsSet.SELECT_TF_OPS) + + tflite_model = converter.convert() + open(f, 'wb').write(tflite_model) + return f, None + + +@try_export +def export_edgetpu(file, prefix=colorstr('Edge TPU:')): + # YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/ + cmd = 'edgetpu_compiler --version' + help_url = 'https://coral.ai/docs/edgetpu/compiler/' + assert platform.system() == 'Linux', f'export only supported on Linux. See {help_url}' + if subprocess.run(f'{cmd} > /dev/null 2>&1', shell=True).returncode != 0: + LOGGER.info(f'\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}') + sudo = subprocess.run('sudo --version >/dev/null', shell=True).returncode == 0 # sudo installed on system + for c in ( + 'curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -', + 'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list', + 'sudo apt-get update', 'sudo apt-get install edgetpu-compiler'): + subprocess.run(c if sudo else c.replace('sudo ', ''), shell=True, check=True) + ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1] + + LOGGER.info(f'\n{prefix} starting export with Edge TPU compiler {ver}...') + f = str(file).replace('.pt', '-int8_edgetpu.tflite') # Edge TPU model + f_tfl = str(file).replace('.pt', '-int8.tflite') # TFLite model + + subprocess.run([ + 'edgetpu_compiler', + '-s', + '-d', + '-k', + '10', + '--out_dir', + str(file.parent), + f_tfl,], check=True) + return f, None + + +@try_export +def export_tfjs(file, int8, prefix=colorstr('TensorFlow.js:')): + # YOLOv5 TensorFlow.js export + check_requirements('tensorflowjs') + import tensorflowjs as tfjs + + LOGGER.info(f'\n{prefix} starting export with tensorflowjs {tfjs.__version__}...') + f = str(file).replace('.pt', '_web_model') # js dir + f_pb = file.with_suffix('.pb') # *.pb path + f_json = f'{f}/model.json' # *.json path + + args = [ + 'tensorflowjs_converter', + '--input_format=tf_frozen_model', + '--quantize_uint8' if int8 else '', + '--output_node_names=Identity,Identity_1,Identity_2,Identity_3', + str(f_pb), + str(f),] + subprocess.run([arg for arg in args if arg], check=True) + + json = Path(f_json).read_text() + with open(f_json, 'w') as j: # sort JSON Identity_* in ascending order + subst = re.sub( + r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}, ' + r'"Identity.?.?": {"name": "Identity.?.?"}}}', r'{"outputs": {"Identity": {"name": "Identity"}, ' + r'"Identity_1": {"name": "Identity_1"}, ' + r'"Identity_2": {"name": "Identity_2"}, ' + r'"Identity_3": {"name": "Identity_3"}}}', json) + j.write(subst) + return f, None + + +def add_tflite_metadata(file, metadata, num_outputs): + # Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata + with contextlib.suppress(ImportError): + # check_requirements('tflite_support') + from tflite_support import flatbuffers + from tflite_support import metadata as _metadata + from tflite_support import metadata_schema_py_generated as _metadata_fb + + tmp_file = Path('/tmp/meta.txt') + with open(tmp_file, 'w') as meta_f: + meta_f.write(str(metadata)) + + model_meta = _metadata_fb.ModelMetadataT() + label_file = _metadata_fb.AssociatedFileT() + label_file.name = tmp_file.name + model_meta.associatedFiles = [label_file] + + subgraph = _metadata_fb.SubGraphMetadataT() + subgraph.inputTensorMetadata = [_metadata_fb.TensorMetadataT()] + subgraph.outputTensorMetadata = [_metadata_fb.TensorMetadataT()] * num_outputs + model_meta.subgraphMetadata = [subgraph] + + b = flatbuffers.Builder(0) + b.Finish(model_meta.Pack(b), _metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER) + metadata_buf = b.Output() + + populator = _metadata.MetadataPopulator.with_model_file(file) + populator.load_metadata_buffer(metadata_buf) + populator.load_associated_files([str(tmp_file)]) + populator.populate() + tmp_file.unlink() + + +@smart_inference_mode() +def run( + data=ROOT / 'data/coco128.yaml', # 'dataset.yaml path' + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # image (height, width) + batch_size=1, # batch size + device='cpu', # cuda device, i.e. 0 or 0,1,2,3 or cpu + include=('torchscript', 'onnx'), # include formats + half=False, # FP16 half-precision export + inplace=False, # set YOLOv5 Detect() inplace=True + keras=False, # use Keras + optimize=False, # TorchScript: optimize for mobile + int8=False, # CoreML/TF INT8 quantization + dynamic=False, # ONNX/TF/TensorRT: dynamic axes + simplify=False, # ONNX: simplify model + opset=12, # ONNX: opset version + verbose=False, # TensorRT: verbose log + workspace=4, # TensorRT: workspace size (GB) + nms=False, # TF: add NMS to model + agnostic_nms=False, # TF: add agnostic NMS to model + topk_per_class=100, # TF.js NMS: topk per class to keep + topk_all=100, # TF.js NMS: topk for all classes to keep + iou_thres=0.45, # TF.js NMS: IoU threshold + conf_thres=0.25, # TF.js NMS: confidence threshold +): + t = time.time() + include = [x.lower() for x in include] # to lowercase + fmts = tuple(export_formats()['Argument'][1:]) # --include arguments + flags = [x in include for x in fmts] + assert sum(flags) == len(include), f'ERROR: Invalid --include {include}, valid --include arguments are {fmts}' + jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle = flags # export booleans + file = Path(url2file(weights) if str(weights).startswith(('http:/', 'https:/')) else weights) # PyTorch weights + + # Load PyTorch model + device = select_device(device) + if half: + assert device.type != 'cpu' or coreml, '--half only compatible with GPU export, i.e. use --device 0' + assert not dynamic, '--half not compatible with --dynamic, i.e. use either --half or --dynamic but not both' + model = attempt_load(weights, device=device, inplace=True, fuse=True) # load FP32 model + + # Checks + imgsz *= 2 if len(imgsz) == 1 else 1 # expand + if optimize: + assert device.type == 'cpu', '--optimize not compatible with cuda devices, i.e. use --device cpu' + + # Input + gs = int(max(model.stride)) # grid size (max stride) + imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples + im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection + + # Update model + model.eval() + for k, m in model.named_modules(): + if isinstance(m, Detect): + m.inplace = inplace + m.dynamic = dynamic + m.export = True + + for _ in range(2): + y = model(im) # dry runs + if half and not coreml: + im, model = im.half(), model.half() # to FP16 + shape = tuple((y[0] if isinstance(y, tuple) else y).shape) # model output shape + metadata = {'stride': int(max(model.stride)), 'names': model.names} # model metadata + LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} with output shape {shape} ({file_size(file):.1f} MB)") + + # Exports + f = [''] * len(fmts) # exported filenames + warnings.filterwarnings(action='ignore', category=torch.jit.TracerWarning) # suppress TracerWarning + if jit: # TorchScript + f[0], _ = export_torchscript(model, im, file, optimize) + if engine: # TensorRT required before ONNX + f[1], _ = export_engine(model, im, file, half, dynamic, simplify, workspace, verbose) + if onnx or xml: # OpenVINO requires ONNX + f[2], _ = export_onnx(model, im, file, opset, dynamic, simplify) + if xml: # OpenVINO + f[3], _ = export_openvino(file, metadata, half) + if coreml: # CoreML + f[4], _ = export_coreml(model, im, file, int8, half) + if any((saved_model, pb, tflite, edgetpu, tfjs)): # TensorFlow formats + assert not tflite or not tfjs, 'TFLite and TF.js models must be exported separately, please pass only one type.' + assert not isinstance(model, ClassificationModel), 'ClassificationModel export to TF formats not yet supported.' + f[5], s_model = export_saved_model(model.cpu(), + im, + file, + dynamic, + tf_nms=nms or agnostic_nms or tfjs, + agnostic_nms=agnostic_nms or tfjs, + topk_per_class=topk_per_class, + topk_all=topk_all, + iou_thres=iou_thres, + conf_thres=conf_thres, + keras=keras) + if pb or tfjs: # pb prerequisite to tfjs + f[6], _ = export_pb(s_model, file) + if tflite or edgetpu: + f[7], _ = export_tflite(s_model, im, file, int8 or edgetpu, data=data, nms=nms, agnostic_nms=agnostic_nms) + if edgetpu: + f[8], _ = export_edgetpu(file) + add_tflite_metadata(f[8] or f[7], metadata, num_outputs=len(s_model.outputs)) + if tfjs: + f[9], _ = export_tfjs(file, int8) + if paddle: # PaddlePaddle + f[10], _ = export_paddle(model, im, file, metadata) + + # Finish + f = [str(x) for x in f if x] # filter out '' and None + if any(f): + cls, det, seg = (isinstance(model, x) for x in (ClassificationModel, DetectionModel, SegmentationModel)) # type + det &= not seg # segmentation models inherit from SegmentationModel(DetectionModel) + dir = Path('segment' if seg else 'classify' if cls else '') + h = '--half' if half else '' # --half FP16 inference arg + s = '# WARNING ⚠️ ClassificationModel not yet supported for PyTorch Hub AutoShape inference' if cls else \ + '# WARNING ⚠️ SegmentationModel not yet supported for PyTorch Hub AutoShape inference' if seg else '' + LOGGER.info(f'\nExport complete ({time.time() - t:.1f}s)' + f"\nResults saved to {colorstr('bold', file.parent.resolve())}" + f"\nDetect: python {dir / ('detect.py' if det else 'predict.py')} --weights {f[-1]} {h}" + f"\nValidate: python {dir / 'val.py'} --weights {f[-1]} {h}" + f"\nPyTorch Hub: model = torch.hub.load('ultralytics/yolov5', 'custom', '{f[-1]}') {s}" + f'\nVisualize: https://netron.app') + return f # return list of exported files/dirs + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640, 640], help='image (h, w)') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--half', action='store_true', help='FP16 half-precision export') + parser.add_argument('--inplace', action='store_true', help='set YOLOv5 Detect() inplace=True') + parser.add_argument('--keras', action='store_true', help='TF: use Keras') + parser.add_argument('--optimize', action='store_true', help='TorchScript: optimize for mobile') + parser.add_argument('--int8', action='store_true', help='CoreML/TF INT8 quantization') + parser.add_argument('--dynamic', action='store_true', help='ONNX/TF/TensorRT: dynamic axes') + parser.add_argument('--simplify', action='store_true', help='ONNX: simplify model') + parser.add_argument('--opset', type=int, default=17, help='ONNX: opset version') + parser.add_argument('--verbose', action='store_true', help='TensorRT: verbose log') + parser.add_argument('--workspace', type=int, default=4, help='TensorRT: workspace size (GB)') + parser.add_argument('--nms', action='store_true', help='TF: add NMS to model') + parser.add_argument('--agnostic-nms', action='store_true', help='TF: add agnostic NMS to model') + parser.add_argument('--topk-per-class', type=int, default=100, help='TF.js NMS: topk per class to keep') + parser.add_argument('--topk-all', type=int, default=100, help='TF.js NMS: topk for all classes to keep') + parser.add_argument('--iou-thres', type=float, default=0.45, help='TF.js NMS: IoU threshold') + parser.add_argument('--conf-thres', type=float, default=0.25, help='TF.js NMS: confidence threshold') + parser.add_argument( + '--include', + nargs='+', + default=['torchscript'], + help='torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle') + opt = parser.parse_known_args()[0] if known else parser.parse_args() + print_args(vars(opt)) + return opt + + +def main(opt): + for opt.weights in (opt.weights if isinstance(opt.weights, list) else [opt.weights]): + run(**vars(opt)) + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/hubconf.py b/src/train_utils/train_models/models/yolov5/hubconf.py new file mode 100644 index 0000000..41af8e3 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/hubconf.py @@ -0,0 +1,169 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch Hub models https://pytorch.org/hub/ultralytics_yolov5 + +Usage: + import torch + model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # official model + model = torch.hub.load('ultralytics/yolov5:master', 'yolov5s') # from branch + model = torch.hub.load('ultralytics/yolov5', 'custom', 'yolov5s.pt') # custom/local model + model = torch.hub.load('.', 'custom', 'yolov5s.pt', source='local') # local repo +""" + +import torch + + +def _create(name, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True, device=None): + """Creates or loads a YOLOv5 model + + Arguments: + name (str): model name 'yolov5s' or path 'path/to/best.pt' + pretrained (bool): load pretrained weights into the model + channels (int): number of input channels + classes (int): number of model classes + autoshape (bool): apply YOLOv5 .autoshape() wrapper to model + verbose (bool): print all information to screen + device (str, torch.device, None): device to use for model parameters + + Returns: + YOLOv5 model + """ + from pathlib import Path + + from models.common import AutoShape, DetectMultiBackend + from models.experimental import attempt_load + from models.yolo import ClassificationModel, DetectionModel, SegmentationModel + from utils.downloads import attempt_download + from utils.general import LOGGER, check_requirements, intersect_dicts, logging + from utils.torch_utils import select_device + + if not verbose: + LOGGER.setLevel(logging.WARNING) + check_requirements(exclude=('opencv-python', 'tensorboard', 'thop')) + name = Path(name) + path = name.with_suffix('.pt') if name.suffix == '' and not name.is_dir() else name # checkpoint path + try: + device = select_device(device) + if pretrained and channels == 3 and classes == 80: + try: + model = DetectMultiBackend(path, device=device, fuse=autoshape) # detection model + if autoshape: + if model.pt and isinstance(model.model, ClassificationModel): + LOGGER.warning('WARNING ⚠️ YOLOv5 ClassificationModel is not yet AutoShape compatible. ' + 'You must pass torch tensors in BCHW to this model, i.e. shape(1,3,224,224).') + elif model.pt and isinstance(model.model, SegmentationModel): + LOGGER.warning('WARNING ⚠️ YOLOv5 SegmentationModel is not yet AutoShape compatible. ' + 'You will not be able to run inference with this model.') + else: + model = AutoShape(model) # for file/URI/PIL/cv2/np inputs and NMS + except Exception: + model = attempt_load(path, device=device, fuse=False) # arbitrary model + else: + cfg = list((Path(__file__).parent / 'models').rglob(f'{path.stem}.yaml'))[0] # model.yaml path + model = DetectionModel(cfg, channels, classes) # create model + if pretrained: + ckpt = torch.load(attempt_download(path), map_location=device) # load + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=['anchors']) # intersect + model.load_state_dict(csd, strict=False) # load + if len(ckpt['model'].names) == classes: + model.names = ckpt['model'].names # set class names attribute + if not verbose: + LOGGER.setLevel(logging.INFO) # reset to default + return model.to(device) + + except Exception as e: + help_url = 'https://github.com/ultralytics/yolov5/issues/36' + s = f'{e}. Cache may be out of date, try `force_reload=True` or see {help_url} for help.' + raise Exception(s) from e + + +def custom(path='path/to/model.pt', autoshape=True, _verbose=True, device=None): + # YOLOv5 custom or local model + return _create(path, autoshape=autoshape, verbose=_verbose, device=device) + + +def yolov5n(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-nano model https://github.com/ultralytics/yolov5 + return _create('yolov5n', pretrained, channels, classes, autoshape, _verbose, device) + + +def yolov5s(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-small model https://github.com/ultralytics/yolov5 + return _create('yolov5s', pretrained, channels, classes, autoshape, _verbose, device) + + +def yolov5m(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-medium model https://github.com/ultralytics/yolov5 + return _create('yolov5m', pretrained, channels, classes, autoshape, _verbose, device) + + +def yolov5l(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-large model https://github.com/ultralytics/yolov5 + return _create('yolov5l', pretrained, channels, classes, autoshape, _verbose, device) + + +def yolov5x(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-xlarge model https://github.com/ultralytics/yolov5 + return _create('yolov5x', pretrained, channels, classes, autoshape, _verbose, device) + + +def yolov5n6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-nano-P6 model https://github.com/ultralytics/yolov5 + return _create('yolov5n6', pretrained, channels, classes, autoshape, _verbose, device) + + +def yolov5s6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-small-P6 model https://github.com/ultralytics/yolov5 + return _create('yolov5s6', pretrained, channels, classes, autoshape, _verbose, device) + + +def yolov5m6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-medium-P6 model https://github.com/ultralytics/yolov5 + return _create('yolov5m6', pretrained, channels, classes, autoshape, _verbose, device) + + +def yolov5l6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-large-P6 model https://github.com/ultralytics/yolov5 + return _create('yolov5l6', pretrained, channels, classes, autoshape, _verbose, device) + + +def yolov5x6(pretrained=True, channels=3, classes=80, autoshape=True, _verbose=True, device=None): + # YOLOv5-xlarge-P6 model https://github.com/ultralytics/yolov5 + return _create('yolov5x6', pretrained, channels, classes, autoshape, _verbose, device) + + +if __name__ == '__main__': + import argparse + from pathlib import Path + + import numpy as np + from PIL import Image + + from utils.general import cv2, print_args + + # Argparser + parser = argparse.ArgumentParser() + parser.add_argument('--model', type=str, default='yolov5s', help='model name') + opt = parser.parse_args() + print_args(vars(opt)) + + # Model + model = _create(name=opt.model, pretrained=True, channels=3, classes=80, autoshape=True, verbose=True) + # model = custom(path='path/to/model.pt') # custom + + # Images + imgs = [ + 'data/images/zidane.jpg', # filename + Path('data/images/zidane.jpg'), # Path + 'https://ultralytics.com/images/zidane.jpg', # URI + cv2.imread('data/images/bus.jpg')[:, :, ::-1], # OpenCV + Image.open('data/images/bus.jpg'), # PIL + np.zeros((320, 640, 3))] # numpy + + # Inference + results = model(imgs, size=320) # batched inference + + # Results + results.print() + results.save() diff --git a/src/train_utils/train_models/models/yolov5/models/__init__.py b/src/train_utils/train_models/models/yolov5/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/train_utils/train_models/models/yolov5/models/common.py b/src/train_utils/train_models/models/yolov5/models/common.py new file mode 100644 index 0000000..aa8ae67 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/common.py @@ -0,0 +1,870 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Common modules +""" + +import ast +import contextlib +import json +import math +import platform +import warnings +import zipfile +from collections import OrderedDict, namedtuple +from copy import copy +from pathlib import Path +from urllib.parse import urlparse + +import cv2 +import numpy as np +import pandas as pd +import requests +import torch +import torch.nn as nn +from PIL import Image +from torch.cuda import amp + +from utils import TryExcept +from utils.dataloaders import exif_transpose, letterbox +from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suffix, check_version, colorstr, + increment_path, is_jupyter, make_divisible, non_max_suppression, scale_boxes, xywh2xyxy, + xyxy2xywh, yaml_load) +from utils.plots import Annotator, colors, save_one_box +from utils.torch_utils import copy_attr, smart_inference_mode + + +def autopad(k, p=None, d=1): # kernel, padding, dilation + # Pad to 'same' shape outputs + if d > 1: + k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +class Conv(nn.Module): + # Standard convolution with args(ch_in, ch_out, kernel, stride, padding, groups, dilation, activation) + default_act = nn.SiLU() # default activation + + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, d=1, act=True): + super().__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p, d), groups=g, dilation=d, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = self.default_act if act is True else act if isinstance(act, nn.Module) else nn.Identity() + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def forward_fuse(self, x): + return self.act(self.conv(x)) + + +class DWConv(Conv): + # Depth-wise convolution + def __init__(self, c1, c2, k=1, s=1, d=1, act=True): # ch_in, ch_out, kernel, stride, dilation, activation + super().__init__(c1, c2, k, s, g=math.gcd(c1, c2), d=d, act=act) + + +class DWConvTranspose2d(nn.ConvTranspose2d): + # Depth-wise transpose convolution + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0): # ch_in, ch_out, kernel, stride, padding, padding_out + super().__init__(c1, c2, k, s, p1, p2, groups=math.gcd(c1, c2)) + + +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers))) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2).permute(2, 0, 1) + return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h) + + +class Bottleneck(nn.Module): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class BottleneckCSP(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False) + self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3) + self.act = nn.SiLU() + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(self.act(self.bn(torch.cat((y1, y2), 1)))) + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class C3(nn.Module): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1) # optional act=FReLU(c2) + self.m = nn.Sequential(*(Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n))) + + def forward(self, x): + return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), 1)) + + +class C3x(C3): + # C3 module with cross-convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = nn.Sequential(*(CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n))) + + +class C3TR(C3): + # C3 module with TransformerBlock() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = TransformerBlock(c_, c_, 4, n) + + +class C3SPP(C3): + # C3 module with SPP() + def __init__(self, c1, c2, k=(5, 9, 13), n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) + self.m = SPP(c_, c_, k) + + +class C3Ghost(C3): + # C3 module with GhostBottleneck() + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*(GhostBottleneck(c_, c_) for _ in range(n))) + + +class SPP(nn.Module): + # Spatial Pyramid Pooling (SPP) layer https://arxiv.org/abs/1406.4729 + def __init__(self, c1, c2, k=(5, 9, 13)): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress torch 1.9.0 max_pool2d() warning + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat((x, y1, y2, self.m(y2)), 1)) + + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act=act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat((x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]), 1)) + # return self.conv(self.contract(x)) + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super().__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act=act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act=act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat((y, self.cv2(y)), 1) + + +class GhostBottleneck(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super().__init__() + c_ = c2 // 2 + self.conv = nn.Sequential( + GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), Conv(c1, c2, 1, 1, + act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + b, c, h, w = x.size() # assert (h / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(b, c, h // s, s, w // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(b, c * s * s, h // s, w // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + b, c, h, w = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(b, s, s, c // s ** 2, h, w) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(b, c // s ** 2, h * s, w * s) # x(1,16,160,160) + + +class Concat(nn.Module): + # Concatenate a list of tensors along dimension + def __init__(self, dimension=1): + super().__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class DetectMultiBackend(nn.Module): + # YOLOv5 MultiBackend class for python inference on various backends + def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True): + # Usage: + # PyTorch: weights = *.pt + # TorchScript: *.torchscript + # ONNX Runtime: *.onnx + # ONNX OpenCV DNN: *.onnx --dnn + # OpenVINO: *_openvino_model + # CoreML: *.mlmodel + # TensorRT: *.engine + # TensorFlow SavedModel: *_saved_model + # TensorFlow GraphDef: *.pb + # TensorFlow Lite: *.tflite + # TensorFlow Edge TPU: *_edgetpu.tflite + # PaddlePaddle: *_paddle_model + from models.experimental import attempt_download, attempt_load # scoped to avoid circular import + + super().__init__() + w = str(weights[0] if isinstance(weights, list) else weights) + pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w) + fp16 &= pt or jit or onnx or engine # FP16 + nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH) + stride = 32 # default stride + cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA + if not (pt or triton): + w = attempt_download(w) # download if not local + + if pt: # PyTorch + model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse) + stride = max(int(model.stride.max()), 32) # model stride + names = model.module.names if hasattr(model, 'module') else model.names # get class names + model.half() if fp16 else model.float() + self.model = model # explicitly assign for to(), cpu(), cuda(), half() + elif jit: # TorchScript + LOGGER.info(f'Loading {w} for TorchScript inference...') + extra_files = {'config.txt': ''} # model metadata + model = torch.jit.load(w, _extra_files=extra_files, map_location=device) + model.half() if fp16 else model.float() + if extra_files['config.txt']: # load metadata dict + d = json.loads(extra_files['config.txt'], + object_hook=lambda d: {int(k) if k.isdigit() else k: v + for k, v in d.items()}) + stride, names = int(d['stride']), d['names'] + elif dnn: # ONNX OpenCV DNN + LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...') + check_requirements('opencv-python>=4.5.4') + net = cv2.dnn.readNetFromONNX(w) + elif onnx: # ONNX Runtime + LOGGER.info(f'Loading {w} for ONNX Runtime inference...') + check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime')) + import onnxruntime + providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider'] + session = onnxruntime.InferenceSession(w, providers=providers) + output_names = [x.name for x in session.get_outputs()] + meta = session.get_modelmeta().custom_metadata_map # metadata + if 'stride' in meta: + stride, names = int(meta['stride']), eval(meta['names']) + elif xml: # OpenVINO + LOGGER.info(f'Loading {w} for OpenVINO inference...') + check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/ + from openvino.runtime import Core, Layout, get_batch + ie = Core() + if not Path(w).is_file(): # if not *.xml + w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir + network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin')) + if network.get_parameters()[0].get_layout().empty: + network.get_parameters()[0].set_layout(Layout('NCHW')) + batch_dim = get_batch(network) + if batch_dim.is_static: + batch_size = batch_dim.get_length() + executable_network = ie.compile_model(network, device_name='CPU') # device_name="MYRIAD" for Intel NCS2 + stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata + elif engine: # TensorRT + LOGGER.info(f'Loading {w} for TensorRT inference...') + import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download + check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0 + if device.type == 'cpu': + device = torch.device('cuda:0') + Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr')) + logger = trt.Logger(trt.Logger.INFO) + with open(w, 'rb') as f, trt.Runtime(logger) as runtime: + model = runtime.deserialize_cuda_engine(f.read()) + context = model.create_execution_context() + bindings = OrderedDict() + output_names = [] + fp16 = False # default updated below + dynamic = False + for i in range(model.num_bindings): + name = model.get_binding_name(i) + dtype = trt.nptype(model.get_binding_dtype(i)) + if model.binding_is_input(i): + if -1 in tuple(model.get_binding_shape(i)): # dynamic + dynamic = True + context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2])) + if dtype == np.float16: + fp16 = True + else: # output + output_names.append(name) + shape = tuple(context.get_binding_shape(i)) + im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device) + bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr())) + binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items()) + batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size + elif coreml: # CoreML + LOGGER.info(f'Loading {w} for CoreML inference...') + import coremltools as ct + model = ct.models.MLModel(w) + elif saved_model: # TF SavedModel + LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...') + import tensorflow as tf + keras = False # assume TF1 saved_model + model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w) + elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt + LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...') + import tensorflow as tf + + def wrap_frozen_graph(gd, inputs, outputs): + x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped + ge = x.graph.as_graph_element + return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs)) + + def gd_outputs(gd): + name_list, input_list = [], [] + for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef + name_list.append(node.name) + input_list.extend(node.input) + return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp')) + + gd = tf.Graph().as_graph_def() # TF GraphDef + with open(w, 'rb') as f: + gd.ParseFromString(f.read()) + frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd)) + elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python + try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu + from tflite_runtime.interpreter import Interpreter, load_delegate + except ImportError: + import tensorflow as tf + Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate, + if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime + LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...') + delegate = { + 'Linux': 'libedgetpu.so.1', + 'Darwin': 'libedgetpu.1.dylib', + 'Windows': 'edgetpu.dll'}[platform.system()] + interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)]) + else: # TFLite + LOGGER.info(f'Loading {w} for TensorFlow Lite inference...') + interpreter = Interpreter(model_path=w) # load TFLite model + interpreter.allocate_tensors() # allocate + input_details = interpreter.get_input_details() # inputs + output_details = interpreter.get_output_details() # outputs + # load metadata + with contextlib.suppress(zipfile.BadZipFile): + with zipfile.ZipFile(w, 'r') as model: + meta_file = model.namelist()[0] + meta = ast.literal_eval(model.read(meta_file).decode('utf-8')) + stride, names = int(meta['stride']), meta['names'] + elif tfjs: # TF.js + raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported') + elif paddle: # PaddlePaddle + LOGGER.info(f'Loading {w} for PaddlePaddle inference...') + check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle') + import paddle.inference as pdi + if not Path(w).is_file(): # if not *.pdmodel + w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir + weights = Path(w).with_suffix('.pdiparams') + config = pdi.Config(str(w), str(weights)) + if cuda: + config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0) + predictor = pdi.create_predictor(config) + input_handle = predictor.get_input_handle(predictor.get_input_names()[0]) + output_names = predictor.get_output_names() + elif triton: # NVIDIA Triton Inference Server + LOGGER.info(f'Using {w} as Triton Inference Server...') + check_requirements('tritonclient[all]') + from utils.triton import TritonRemoteModel + model = TritonRemoteModel(url=w) + nhwc = model.runtime.startswith('tensorflow') + else: + raise NotImplementedError(f'ERROR: {w} is not a supported format') + + # class names + if 'names' not in locals(): + names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)} + if names[0] == 'n01440764' and len(names) == 1000: # ImageNet + names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names + + self.__dict__.update(locals()) # assign all variables to self + + def forward(self, im, augment=False, visualize=False): + # YOLOv5 MultiBackend inference + b, ch, h, w = im.shape # batch, channel, height, width + if self.fp16 and im.dtype != torch.float16: + im = im.half() # to FP16 + if self.nhwc: + im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3) + + if self.pt: # PyTorch + y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im) + elif self.jit: # TorchScript + y = self.model(im) + elif self.dnn: # ONNX OpenCV DNN + im = im.cpu().numpy() # torch to numpy + self.net.setInput(im) + y = self.net.forward() + elif self.onnx: # ONNX Runtime + im = im.cpu().numpy() # torch to numpy + y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im}) + elif self.xml: # OpenVINO + im = im.cpu().numpy() # FP32 + y = list(self.executable_network([im]).values()) + elif self.engine: # TensorRT + if self.dynamic and im.shape != self.bindings['images'].shape: + i = self.model.get_binding_index('images') + self.context.set_binding_shape(i, im.shape) # reshape if dynamic + self.bindings['images'] = self.bindings['images']._replace(shape=im.shape) + for name in self.output_names: + i = self.model.get_binding_index(name) + self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i))) + s = self.bindings['images'].shape + assert im.shape == s, f"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}" + self.binding_addrs['images'] = int(im.data_ptr()) + self.context.execute_v2(list(self.binding_addrs.values())) + y = [self.bindings[x].data for x in sorted(self.output_names)] + elif self.coreml: # CoreML + im = im.cpu().numpy() + im = Image.fromarray((im[0] * 255).astype('uint8')) + # im = im.resize((192, 320), Image.ANTIALIAS) + y = self.model.predict({'image': im}) # coordinates are xywh normalized + if 'confidence' in y: + box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels + conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float) + y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1) + else: + y = list(reversed(y.values())) # reversed for segmentation models (pred, proto) + elif self.paddle: # PaddlePaddle + im = im.cpu().numpy().astype(np.float32) + self.input_handle.copy_from_cpu(im) + self.predictor.run() + y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names] + elif self.triton: # NVIDIA Triton Inference Server + y = self.model(im) + else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU) + im = im.cpu().numpy() + if self.saved_model: # SavedModel + y = self.model(im, training=False) if self.keras else self.model(im) + elif self.pb: # GraphDef + y = self.frozen_func(x=self.tf.constant(im)) + else: # Lite or Edge TPU + input = self.input_details[0] + int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model + if int8: + scale, zero_point = input['quantization'] + im = (im / scale + zero_point).astype(np.uint8) # de-scale + self.interpreter.set_tensor(input['index'], im) + self.interpreter.invoke() + y = [] + for output in self.output_details: + x = self.interpreter.get_tensor(output['index']) + if int8: + scale, zero_point = output['quantization'] + x = (x.astype(np.float32) - zero_point) * scale # re-scale + y.append(x) + y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y] + y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels + + if isinstance(y, (list, tuple)): + return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y] + else: + return self.from_numpy(y) + + def from_numpy(self, x): + return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x + + def warmup(self, imgsz=(1, 3, 640, 640)): + # Warmup model by running inference once + warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton + if any(warmup_types) and (self.device.type != 'cpu' or self.triton): + im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input + for _ in range(2 if self.jit else 1): # + self.forward(im) # warmup + + @staticmethod + def _model_type(p='path/to/model.pt'): + # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx + # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle] + from export import export_formats + from utils.downloads import is_url + sf = list(export_formats().Suffix) # export suffixes + if not is_url(p, check=False): + check_suffix(p, sf) # checks + url = urlparse(p) # if url may be Triton inference server + types = [s in Path(p).name for s in sf] + types[8] &= not types[9] # tflite &= not edgetpu + triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc]) + return types + [triton] + + @staticmethod + def _load_metadata(f=Path('path/to/meta.yaml')): + # Load metadata from meta.yaml if it exists + if f.exists(): + d = yaml_load(f) + return d['stride'], d['names'] # assign stride, names + return None, None + + +class AutoShape(nn.Module): + # YOLOv5 input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + agnostic = False # NMS class-agnostic + multi_label = False # NMS multiple labels per box + classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for COCO persons, cats and dogs + max_det = 1000 # maximum number of detections per image + amp = False # Automatic Mixed Precision (AMP) inference + + def __init__(self, model, verbose=True): + super().__init__() + if verbose: + LOGGER.info('Adding AutoShape... ') + copy_attr(self, model, include=('yaml', 'nc', 'hyp', 'names', 'stride', 'abc'), exclude=()) # copy attributes + self.dmb = isinstance(model, DetectMultiBackend) # DetectMultiBackend() instance + self.pt = not self.dmb or model.pt # PyTorch model + self.model = model.eval() + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.inplace = False # Detect.inplace=False for safe multithread inference + m.export = True # do not output loss values + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + if self.pt: + m = self.model.model.model[-1] if self.dmb else self.model.model[-1] # Detect() + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + @smart_inference_mode() + def forward(self, ims, size=640, augment=False, profile=False): + # Inference from various sources. For size(height=640, width=1280), RGB images example inputs are: + # file: ims = 'data/images/zidane.jpg' # str or PosixPath + # URI: = 'https://ultralytics.com/images/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') or ImageGrab.grab() # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + dt = (Profile(), Profile(), Profile()) + with dt[0]: + if isinstance(size, int): # expand + size = (size, size) + p = next(self.model.parameters()) if self.pt else torch.empty(1, device=self.model.device) # param + autocast = self.amp and (p.device.type != 'cpu') # Automatic Mixed Precision (AMP) inference + if isinstance(ims, torch.Tensor): # torch + with amp.autocast(autocast): + return self.model(ims.to(p.device).type_as(p), augment=augment) # inference + + # Pre-process + n, ims = (len(ims), list(ims)) if isinstance(ims, (list, tuple)) else (1, [ims]) # number, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(ims): + f = f'image{i}' # filename + if isinstance(im, (str, Path)): # filename or uri + im, f = Image.open(requests.get(im, stream=True).raw if str(im).startswith('http') else im), im + im = np.asarray(exif_transpose(im)) + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(exif_transpose(im)), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[..., :3] if im.ndim == 3 else cv2.cvtColor(im, cv2.COLOR_GRAY2BGR) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = max(size) / max(s) # gain + shape1.append([int(y * g) for y in s]) + ims[i] = im if im.data.contiguous else np.ascontiguousarray(im) # update + shape1 = [make_divisible(x, self.stride) for x in np.array(shape1).max(0)] # inf shape + x = [letterbox(im, shape1, auto=False)[0] for im in ims] # pad + x = np.ascontiguousarray(np.array(x).transpose((0, 3, 1, 2))) # stack and BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255 # uint8 to fp16/32 + + with amp.autocast(autocast): + # Inference + with dt[1]: + y = self.model(x, augment=augment) # forward + + # Post-process + with dt[2]: + y = non_max_suppression(y if self.dmb else y[0], + self.conf, + self.iou, + self.classes, + self.agnostic, + self.multi_label, + max_det=self.max_det) # NMS + for i in range(n): + scale_boxes(shape1, y[i][:, :4], shape0[i]) + + return Detections(ims, y, files, dt, self.names, x.shape) + + +class Detections: + # YOLOv5 detections class for inference results + def __init__(self, ims, pred, files, times=(0, 0, 0), names=None, shape=None): + super().__init__() + d = pred[0].device # device + gn = [torch.tensor([*(im.shape[i] for i in [1, 0, 1, 0]), 1, 1], device=d) for im in ims] # normalizations + self.ims = ims # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.times = times # profiling times + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple(x.t / self.n * 1E3 for x in times) # timestamps (ms) + self.s = tuple(shape) # inference BCHW shape + + def _run(self, pprint=False, show=False, save=False, crop=False, render=False, labels=True, save_dir=Path('')): + s, crops = '', [] + for i, (im, pred) in enumerate(zip(self.ims, self.pred)): + s += f'\nimage {i + 1}/{len(self.pred)}: {im.shape[0]}x{im.shape[1]} ' # string + if pred.shape[0]: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + s += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + s = s.rstrip(', ') + if show or save or render or crop: + annotator = Annotator(im, example=str(self.names)) + for *box, conf, cls in reversed(pred): # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + if crop: + file = save_dir / 'crops' / self.names[int(cls)] / self.files[i] if save else None + crops.append({ + 'box': box, + 'conf': conf, + 'cls': cls, + 'label': label, + 'im': save_one_box(box, im, file=file, save=save)}) + else: # all others + annotator.box_label(box, label if labels else '', color=colors(cls)) + im = annotator.im + else: + s += '(no detections)' + + im = Image.fromarray(im.astype(np.uint8)) if isinstance(im, np.ndarray) else im # from np + if show: + if is_jupyter(): + from IPython.display import display + display(im) + else: + im.show(self.files[i]) + if save: + f = self.files[i] + im.save(save_dir / f) # save + if i == self.n - 1: + LOGGER.info(f"Saved {self.n} image{'s' * (self.n > 1)} to {colorstr('bold', save_dir)}") + if render: + self.ims[i] = np.asarray(im) + if pprint: + s = s.lstrip('\n') + return f'{s}\nSpeed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {self.s}' % self.t + if crop: + if save: + LOGGER.info(f'Saved results to {save_dir}\n') + return crops + + @TryExcept('Showing images is not supported in this environment') + def show(self, labels=True): + self._run(show=True, labels=labels) # show results + + def save(self, labels=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) # increment save_dir + self._run(save=True, labels=labels, save_dir=save_dir) # save results + + def crop(self, save=True, save_dir='runs/detect/exp', exist_ok=False): + save_dir = increment_path(save_dir, exist_ok, mkdir=True) if save else None + return self._run(crop=True, save=save, save_dir=save_dir) # crop results + + def render(self, labels=True): + self._run(render=True, labels=labels) # render results + return self.ims + + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + r = range(self.n) # iterable + x = [Detections([self.ims[i]], [self.pred[i]], [self.files[i]], self.times, self.names, self.s) for i in r] + # for d in x: + # for k in ['ims', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + # setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def print(self): + LOGGER.info(self.__str__()) + + def __len__(self): # override len(results) + return self.n + + def __str__(self): # override print(results) + return self._run(pprint=True) # print results + + def __repr__(self): + return f'YOLOv5 {self.__class__} instance\n' + self.__str__() + + +class Proto(nn.Module): + # YOLOv5 mask Proto module for segmentation models + def __init__(self, c1, c_=256, c2=32): # ch_in, number of protos, number of masks + super().__init__() + self.cv1 = Conv(c1, c_, k=3) + self.upsample = nn.Upsample(scale_factor=2, mode='nearest') + self.cv2 = Conv(c_, c_, k=3) + self.cv3 = Conv(c_, c2) + + def forward(self, x): + return self.cv3(self.cv2(self.upsample(self.cv1(x)))) + + +class Classify(nn.Module): + # YOLOv5 classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, + c1, + c2, + k=1, + s=1, + p=None, + g=1, + dropout_p=0.0): # ch_in, ch_out, kernel, stride, padding, groups, dropout probability + super().__init__() + c_ = 1280 # efficientnet_b0 size + self.conv = Conv(c1, c_, k, s, autopad(k, p), g) + self.pool = nn.AdaptiveAvgPool2d(1) # to x(b,c_,1,1) + self.drop = nn.Dropout(p=dropout_p, inplace=True) + self.linear = nn.Linear(c_, c2) # to x(b,c2) + + def forward(self, x): + if isinstance(x, list): + x = torch.cat(x, 1) + return self.linear(self.drop(self.pool(self.conv(x)).flatten(1))) diff --git a/src/train_utils/train_models/models/yolov5/models/experimental.py b/src/train_utils/train_models/models/yolov5/models/experimental.py new file mode 100644 index 0000000..02d35b9 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/experimental.py @@ -0,0 +1,111 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Experimental modules +""" +import math + +import numpy as np +import torch +import torch.nn as nn + +from utils.downloads import attempt_download + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super().__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1.0, n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class MixConv2d(nn.Module): + # Mixed Depth-wise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): # ch_in, ch_out, kernel, stride, ch_strategy + super().__init__() + n = len(k) # number of convolutions + if equal_ch: # equal c_ per group + i = torch.linspace(0, n - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(n)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * n + a = np.eye(n + 1, n, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList([ + nn.Conv2d(c1, int(c_), k, s, k // 2, groups=math.gcd(c1, int(c_)), bias=False) for k, c_ in zip(k, c_)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() + + def forward(self, x): + return self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super().__init__() + + def forward(self, x, augment=False, profile=False, visualize=False): + y = [module(x, augment, profile, visualize)[0] for module in self] + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + +def attempt_load(weights, device=None, inplace=True, fuse=True): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + from models.yolo import Detect, Model + + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + ckpt = torch.load(attempt_download(w), map_location='cpu') # load + ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model + + # Model compatibility updates + if not hasattr(ckpt, 'stride'): + ckpt.stride = torch.tensor([32.]) + if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)): + ckpt.names = dict(enumerate(ckpt.names)) # convert to dict + + model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode + + # Module compatibility updates + for m in model.modules(): + t = type(m) + if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model): + m.inplace = inplace # torch 1.7.0 compatibility + if t is Detect and not isinstance(m.anchor_grid, list): + delattr(m, 'anchor_grid') + setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl) + elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'): + m.recompute_scale_factor = None # torch 1.11.0 compatibility + + # Return model + if len(model) == 1: + return model[-1] + + # Return detection ensemble + print(f'Ensemble created with {weights}\n') + for k in 'names', 'nc', 'yaml': + setattr(model, k, getattr(model[0], k)) + model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride + assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}' + return model diff --git a/src/train_utils/train_models/models/yolov5/models/hub/anchors.yaml b/src/train_utils/train_models/models/yolov5/models/hub/anchors.yaml new file mode 100644 index 0000000..e4d7beb --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/anchors.yaml @@ -0,0 +1,59 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Default anchors for COCO data + + +# P5 ------------------------------------------------------------------------------------------------------------------- +# P5-640: +anchors_p5_640: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + + +# P6 ------------------------------------------------------------------------------------------------------------------- +# P6-640: thr=0.25: 0.9964 BPR, 5.54 anchors past thr, n=12, img_size=640, metric_all=0.281/0.716-mean/best, past_thr=0.469-mean: 9,11, 21,19, 17,41, 43,32, 39,70, 86,64, 65,131, 134,130, 120,265, 282,180, 247,354, 512,387 +anchors_p6_640: + - [9,11, 21,19, 17,41] # P3/8 + - [43,32, 39,70, 86,64] # P4/16 + - [65,131, 134,130, 120,265] # P5/32 + - [282,180, 247,354, 512,387] # P6/64 + +# P6-1280: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1280, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 19,27, 44,40, 38,94, 96,68, 86,152, 180,137, 140,301, 303,264, 238,542, 436,615, 739,380, 925,792 +anchors_p6_1280: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# P6-1920: thr=0.25: 0.9950 BPR, 5.55 anchors past thr, n=12, img_size=1920, metric_all=0.281/0.714-mean/best, past_thr=0.468-mean: 28,41, 67,59, 57,141, 144,103, 129,227, 270,205, 209,452, 455,396, 358,812, 653,922, 1109,570, 1387,1187 +anchors_p6_1920: + - [28,41, 67,59, 57,141] # P3/8 + - [144,103, 129,227, 270,205] # P4/16 + - [209,452, 455,396, 358,812] # P5/32 + - [653,922, 1109,570, 1387,1187] # P6/64 + + +# P7 ------------------------------------------------------------------------------------------------------------------- +# P7-640: thr=0.25: 0.9962 BPR, 6.76 anchors past thr, n=15, img_size=640, metric_all=0.275/0.733-mean/best, past_thr=0.466-mean: 11,11, 13,30, 29,20, 30,46, 61,38, 39,92, 78,80, 146,66, 79,163, 149,150, 321,143, 157,303, 257,402, 359,290, 524,372 +anchors_p7_640: + - [11,11, 13,30, 29,20] # P3/8 + - [30,46, 61,38, 39,92] # P4/16 + - [78,80, 146,66, 79,163] # P5/32 + - [149,150, 321,143, 157,303] # P6/64 + - [257,402, 359,290, 524,372] # P7/128 + +# P7-1280: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1280, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 19,22, 54,36, 32,77, 70,83, 138,71, 75,173, 165,159, 148,334, 375,151, 334,317, 251,626, 499,474, 750,326, 534,814, 1079,818 +anchors_p7_1280: + - [19,22, 54,36, 32,77] # P3/8 + - [70,83, 138,71, 75,173] # P4/16 + - [165,159, 148,334, 375,151] # P5/32 + - [334,317, 251,626, 499,474] # P6/64 + - [750,326, 534,814, 1079,818] # P7/128 + +# P7-1920: thr=0.25: 0.9968 BPR, 6.71 anchors past thr, n=15, img_size=1920, metric_all=0.273/0.732-mean/best, past_thr=0.463-mean: 29,34, 81,55, 47,115, 105,124, 207,107, 113,259, 247,238, 222,500, 563,227, 501,476, 376,939, 749,711, 1126,489, 801,1222, 1618,1227 +anchors_p7_1920: + - [29,34, 81,55, 47,115] # P3/8 + - [105,124, 207,107, 113,259] # P4/16 + - [247,238, 222,500, 563,227] # P5/32 + - [501,476, 376,939, 749,711] # P6/64 + - [1126,489, 801,1222, 1618,1227] # P7/128 diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov3-spp.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov3-spp.yaml new file mode 100644 index 0000000..c669821 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov3-spp.yaml @@ -0,0 +1,51 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3-SPP head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov3-tiny.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov3-tiny.yaml new file mode 100644 index 0000000..b28b443 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov3-tiny.yaml @@ -0,0 +1,41 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,14, 23,27, 37,58] # P4/16 + - [81,82, 135,169, 344,319] # P5/32 + +# YOLOv3-tiny backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [16, 3, 1]], # 0 + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 1-P1/2 + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 3-P2/4 + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 5-P3/8 + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 7-P4/16 + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, nn.MaxPool2d, [2, 2, 0]], # 9-P5/32 + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, nn.ZeroPad2d, [[0, 1, 0, 1]]], # 11 + [-1, 1, nn.MaxPool2d, [2, 1, 0]], # 12 + ] + +# YOLOv3-tiny head +head: + [[-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [256, 3, 1]], # 19 (P4/16-medium) + + [[19, 15], 1, Detect, [nc, anchors]], # Detect(P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov3.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov3.yaml new file mode 100644 index 0000000..d1ef912 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov3.yaml @@ -0,0 +1,51 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3 head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5-bifpn.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-bifpn.yaml new file mode 100644 index 0000000..504815f --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-bifpn.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 BiFPN head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14, 6], 1, Concat, [1]], # cat P4 <--- BiFPN change + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5-fpn.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-fpn.yaml new file mode 100644 index 0000000..a23e9c6 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-fpn.yaml @@ -0,0 +1,42 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 FPN head +head: + [[-1, 3, C3, [1024, False]], # 10 (P5/32-large) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Conv, [512, 1, 1]], + [-1, 3, C3, [512, False]], # 14 (P4/16-medium) + + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Conv, [256, 1, 1]], + [-1, 3, C3, [256, False]], # 18 (P3/8-small) + + [[18, 14, 10], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p2.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p2.yaml new file mode 100644 index 0000000..554117d --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p2.yaml @@ -0,0 +1,54 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head with (P2, P3, P4, P5) outputs +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 2], 1, Concat, [1]], # cat backbone P2 + [-1, 1, C3, [128, False]], # 21 (P2/4-xsmall) + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P3 + [-1, 3, C3, [256, False]], # 24 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 27 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 30 (P5/32-large) + + [[21, 24, 27, 30], 1, Detect, [nc, anchors]], # Detect(P2, P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p34.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p34.yaml new file mode 100644 index 0000000..dbf0f85 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p34.yaml @@ -0,0 +1,41 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [ [ -1, 1, Conv, [ 64, 6, 2, 2 ] ], # 0-P1/2 + [ -1, 1, Conv, [ 128, 3, 2 ] ], # 1-P2/4 + [ -1, 3, C3, [ 128 ] ], + [ -1, 1, Conv, [ 256, 3, 2 ] ], # 3-P3/8 + [ -1, 6, C3, [ 256 ] ], + [ -1, 1, Conv, [ 512, 3, 2 ] ], # 5-P4/16 + [ -1, 9, C3, [ 512 ] ], + [ -1, 1, Conv, [ 1024, 3, 2 ] ], # 7-P5/32 + [ -1, 3, C3, [ 1024 ] ], + [ -1, 1, SPPF, [ 1024, 5 ] ], # 9 + ] + +# YOLOv5 v6.0 head with (P3, P4) outputs +head: + [ [ -1, 1, Conv, [ 512, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 6 ], 1, Concat, [ 1 ] ], # cat backbone P4 + [ -1, 3, C3, [ 512, False ] ], # 13 + + [ -1, 1, Conv, [ 256, 1, 1 ] ], + [ -1, 1, nn.Upsample, [ None, 2, 'nearest' ] ], + [ [ -1, 4 ], 1, Concat, [ 1 ] ], # cat backbone P3 + [ -1, 3, C3, [ 256, False ] ], # 17 (P3/8-small) + + [ -1, 1, Conv, [ 256, 3, 2 ] ], + [ [ -1, 14 ], 1, Concat, [ 1 ] ], # cat head P4 + [ -1, 3, C3, [ 512, False ] ], # 20 (P4/16-medium) + + [ [ 17, 20 ], 1, Detect, [ nc, anchors ] ], # Detect(P3, P4) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p6.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p6.yaml new file mode 100644 index 0000000..a17202f --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p6.yaml @@ -0,0 +1,56 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head with (P3, P4, P5, P6) outputs +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p7.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p7.yaml new file mode 100644 index 0000000..edd7d13 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-p7.yaml @@ -0,0 +1,67 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: 3 # AutoAnchor evolves 3 anchors per P output layer + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, Conv, [1280, 3, 2]], # 11-P7/128 + [-1, 3, C3, [1280]], + [-1, 1, SPPF, [1280, 5]], # 13 + ] + +# YOLOv5 v6.0 head with (P3, P4, P5, P6, P7) outputs +head: + [[-1, 1, Conv, [1024, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 10], 1, Concat, [1]], # cat backbone P6 + [-1, 3, C3, [1024, False]], # 17 + + [-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 21 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 25 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 29 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 26], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 32 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 22], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 35 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 18], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 38 (P6/64-xlarge) + + [-1, 1, Conv, [1024, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P7 + [-1, 3, C3, [1280, False]], # 41 (P7/128-xxlarge) + + [[29, 32, 35, 38, 41], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6, P7) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5-panet.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-panet.yaml new file mode 100644 index 0000000..ccfbf90 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5-panet.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 PANet head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5l6.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5l6.yaml new file mode 100644 index 0000000..632c2cb --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5l6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5m6.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5m6.yaml new file mode 100644 index 0000000..ecc53fd --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5m6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5n6.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5n6.yaml new file mode 100644 index 0000000..0c0c71d --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5n6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5s-LeakyReLU.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5s-LeakyReLU.yaml new file mode 100644 index 0000000..3a179bf --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5s-LeakyReLU.yaml @@ -0,0 +1,49 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +activation: nn.LeakyReLU(0.1) # <----- Conv() activation used throughout entire YOLOv5 model +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5s-ghost.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5s-ghost.yaml new file mode 100644 index 0000000..ff9519c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5s-ghost.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, GhostConv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3Ghost, [128]], + [-1, 1, GhostConv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3Ghost, [256]], + [-1, 1, GhostConv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3Ghost, [512]], + [-1, 1, GhostConv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3Ghost, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, GhostConv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3Ghost, [512, False]], # 13 + + [-1, 1, GhostConv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3Ghost, [256, False]], # 17 (P3/8-small) + + [-1, 1, GhostConv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3Ghost, [512, False]], # 20 (P4/16-medium) + + [-1, 1, GhostConv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3Ghost, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5s-transformer.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5s-transformer.yaml new file mode 100644 index 0000000..100d7c4 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5s-transformer.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3TR, [1024]], # 9 <--- C3TR() Transformer module + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5s6.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5s6.yaml new file mode 100644 index 0000000..a28fb55 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5s6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/hub/yolov5x6.yaml b/src/train_utils/train_models/models/yolov5/models/hub/yolov5x6.yaml new file mode 100644 index 0000000..ba795c4 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/hub/yolov5x6.yaml @@ -0,0 +1,60 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple +anchors: + - [19,27, 44,40, 38,94] # P3/8 + - [96,68, 86,152, 180,137] # P4/16 + - [140,301, 303,264, 238,542] # P5/32 + - [436,615, 739,380, 925,792] # P6/64 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [768, 3, 2]], # 7-P5/32 + [-1, 3, C3, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P6/64 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 11 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [768, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P5 + [-1, 3, C3, [768, False]], # 15 + + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 19 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 23 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 20], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 26 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [768, False]], # 29 (P5/32-large) + + [-1, 1, Conv, [768, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat head P6 + [-1, 3, C3, [1024, False]], # 32 (P6/64-xlarge) + + [[23, 26, 29, 32], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/segment/yolov5l-seg.yaml b/src/train_utils/train_models/models/yolov5/models/segment/yolov5l-seg.yaml new file mode 100644 index 0000000..4782de1 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/segment/yolov5l-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/segment/yolov5m-seg.yaml b/src/train_utils/train_models/models/yolov5/models/segment/yolov5m-seg.yaml new file mode 100644 index 0000000..07ec25b --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/segment/yolov5m-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/segment/yolov5n-seg.yaml b/src/train_utils/train_models/models/yolov5/models/segment/yolov5n-seg.yaml new file mode 100644 index 0000000..c28225a --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/segment/yolov5n-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/segment/yolov5s-seg.yaml b/src/train_utils/train_models/models/yolov5/models/segment/yolov5s-seg.yaml new file mode 100644 index 0000000..a827814 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/segment/yolov5s-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.5 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/segment/yolov5x-seg.yaml b/src/train_utils/train_models/models/yolov5/models/segment/yolov5x-seg.yaml new file mode 100644 index 0000000..5d0c452 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/segment/yolov5x-seg.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Segment, [nc, anchors, 32, 256]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/tf.py b/src/train_utils/train_models/models/yolov5/models/tf.py new file mode 100644 index 0000000..8290cf2 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/tf.py @@ -0,0 +1,608 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +TensorFlow, Keras and TFLite versions of YOLOv5 +Authored by https://github.com/zldrobit in PR https://github.com/ultralytics/yolov5/pull/1127 + +Usage: + $ python models/tf.py --weights yolov5s.pt + +Export: + $ python export.py --weights yolov5s.pt --include saved_model pb tflite tfjs +""" + +import argparse +import sys +from copy import deepcopy +from pathlib import Path + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +# ROOT = ROOT.relative_to(Path.cwd()) # relative + +import numpy as np +import tensorflow as tf +import torch +import torch.nn as nn +from tensorflow import keras + +from models.common import (C3, SPP, SPPF, Bottleneck, BottleneckCSP, C3x, Concat, Conv, CrossConv, DWConv, + DWConvTranspose2d, Focus, autopad) +from models.experimental import MixConv2d, attempt_load +from models.yolo import Detect, Segment +from utils.activations import SiLU +from utils.general import LOGGER, make_divisible, print_args + + +class TFBN(keras.layers.Layer): + # TensorFlow BatchNormalization wrapper + def __init__(self, w=None): + super().__init__() + self.bn = keras.layers.BatchNormalization( + beta_initializer=keras.initializers.Constant(w.bias.numpy()), + gamma_initializer=keras.initializers.Constant(w.weight.numpy()), + moving_mean_initializer=keras.initializers.Constant(w.running_mean.numpy()), + moving_variance_initializer=keras.initializers.Constant(w.running_var.numpy()), + epsilon=w.eps) + + def call(self, inputs): + return self.bn(inputs) + + +class TFPad(keras.layers.Layer): + # Pad inputs in spatial dimensions 1 and 2 + def __init__(self, pad): + super().__init__() + if isinstance(pad, int): + self.pad = tf.constant([[0, 0], [pad, pad], [pad, pad], [0, 0]]) + else: # tuple/list + self.pad = tf.constant([[0, 0], [pad[0], pad[0]], [pad[1], pad[1]], [0, 0]]) + + def call(self, inputs): + return tf.pad(inputs, self.pad, mode='constant', constant_values=0) + + +class TFConv(keras.layers.Layer): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super().__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + # TensorFlow convolution padding is inconsistent with PyTorch (e.g. k=3 s=2 'SAME' padding) + # see https://stackoverflow.com/questions/52975843/comparing-conv2d-with-padding-between-tensorflow-and-pytorch + conv = keras.layers.Conv2D( + filters=c2, + kernel_size=k, + strides=s, + padding='SAME' if s == 1 else 'VALID', + use_bias=not hasattr(w, 'bn'), + kernel_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) + self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) + self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity + self.act = activations(w.act) if act else tf.identity + + def call(self, inputs): + return self.act(self.bn(self.conv(inputs))) + + +class TFDWConv(keras.layers.Layer): + # Depthwise convolution + def __init__(self, c1, c2, k=1, s=1, p=None, act=True, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super().__init__() + assert c2 % c1 == 0, f'TFDWConv() output={c2} must be a multiple of input={c1} channels' + conv = keras.layers.DepthwiseConv2D( + kernel_size=k, + depth_multiplier=c2 // c1, + strides=s, + padding='SAME' if s == 1 else 'VALID', + use_bias=not hasattr(w, 'bn'), + depthwise_initializer=keras.initializers.Constant(w.conv.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer='zeros' if hasattr(w, 'bn') else keras.initializers.Constant(w.conv.bias.numpy())) + self.conv = conv if s == 1 else keras.Sequential([TFPad(autopad(k, p)), conv]) + self.bn = TFBN(w.bn) if hasattr(w, 'bn') else tf.identity + self.act = activations(w.act) if act else tf.identity + + def call(self, inputs): + return self.act(self.bn(self.conv(inputs))) + + +class TFDWConvTranspose2d(keras.layers.Layer): + # Depthwise ConvTranspose2d + def __init__(self, c1, c2, k=1, s=1, p1=0, p2=0, w=None): + # ch_in, ch_out, weights, kernel, stride, padding, groups + super().__init__() + assert c1 == c2, f'TFDWConv() output={c2} must be equal to input={c1} channels' + assert k == 4 and p1 == 1, 'TFDWConv() only valid for k=4 and p1=1' + weight, bias = w.weight.permute(2, 3, 1, 0).numpy(), w.bias.numpy() + self.c1 = c1 + self.conv = [ + keras.layers.Conv2DTranspose(filters=1, + kernel_size=k, + strides=s, + padding='VALID', + output_padding=p2, + use_bias=True, + kernel_initializer=keras.initializers.Constant(weight[..., i:i + 1]), + bias_initializer=keras.initializers.Constant(bias[i])) for i in range(c1)] + + def call(self, inputs): + return tf.concat([m(x) for m, x in zip(self.conv, tf.split(inputs, self.c1, 3))], 3)[:, 1:-1, 1:-1] + + +class TFFocus(keras.layers.Layer): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True, w=None): + # ch_in, ch_out, kernel, stride, padding, groups + super().__init__() + self.conv = TFConv(c1 * 4, c2, k, s, p, g, act, w.conv) + + def call(self, inputs): # x(b,w,h,c) -> y(b,w/2,h/2,4c) + # inputs = inputs / 255 # normalize 0-255 to 0-1 + inputs = [inputs[:, ::2, ::2, :], inputs[:, 1::2, ::2, :], inputs[:, ::2, 1::2, :], inputs[:, 1::2, 1::2, :]] + return self.conv(tf.concat(inputs, 3)) + + +class TFBottleneck(keras.layers.Layer): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5, w=None): # ch_in, ch_out, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_, c2, 3, 1, g=g, w=w.cv2) + self.add = shortcut and c1 == c2 + + def call(self, inputs): + return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) + + +class TFCrossConv(keras.layers.Layer): + # Cross Convolution + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False, w=None): + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, (1, k), (1, s), w=w.cv1) + self.cv2 = TFConv(c_, c2, (k, 1), (s, 1), g=g, w=w.cv2) + self.add = shortcut and c1 == c2 + + def call(self, inputs): + return inputs + self.cv2(self.cv1(inputs)) if self.add else self.cv2(self.cv1(inputs)) + + +class TFConv2d(keras.layers.Layer): + # Substitution for PyTorch nn.Conv2D + def __init__(self, c1, c2, k, s=1, g=1, bias=True, w=None): + super().__init__() + assert g == 1, "TF v2.2 Conv2D does not support 'groups' argument" + self.conv = keras.layers.Conv2D(filters=c2, + kernel_size=k, + strides=s, + padding='VALID', + use_bias=bias, + kernel_initializer=keras.initializers.Constant( + w.weight.permute(2, 3, 1, 0).numpy()), + bias_initializer=keras.initializers.Constant(w.bias.numpy()) if bias else None) + + def call(self, inputs): + return self.conv(inputs) + + +class TFBottleneckCSP(keras.layers.Layer): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv2d(c1, c_, 1, 1, bias=False, w=w.cv2) + self.cv3 = TFConv2d(c_, c_, 1, 1, bias=False, w=w.cv3) + self.cv4 = TFConv(2 * c_, c2, 1, 1, w=w.cv4) + self.bn = TFBN(w.bn) + self.act = lambda x: keras.activations.swish(x) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + y1 = self.cv3(self.m(self.cv1(inputs))) + y2 = self.cv2(inputs) + return self.cv4(self.act(self.bn(tf.concat((y1, y2), axis=3)))) + + +class TFC3(keras.layers.Layer): + # CSP Bottleneck with 3 convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([TFBottleneck(c_, c_, shortcut, g, e=1.0, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) + + +class TFC3x(keras.layers.Layer): + # 3 module with cross-convolutions + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5, w=None): + # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c1, c_, 1, 1, w=w.cv2) + self.cv3 = TFConv(2 * c_, c2, 1, 1, w=w.cv3) + self.m = keras.Sequential([ + TFCrossConv(c_, c_, k=3, s=1, g=g, e=1.0, shortcut=shortcut, w=w.m[j]) for j in range(n)]) + + def call(self, inputs): + return self.cv3(tf.concat((self.m(self.cv1(inputs)), self.cv2(inputs)), axis=3)) + + +class TFSPP(keras.layers.Layer): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13), w=None): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * (len(k) + 1), c2, 1, 1, w=w.cv2) + self.m = [keras.layers.MaxPool2D(pool_size=x, strides=1, padding='SAME') for x in k] + + def call(self, inputs): + x = self.cv1(inputs) + return self.cv2(tf.concat([x] + [m(x) for m in self.m], 3)) + + +class TFSPPF(keras.layers.Layer): + # Spatial pyramid pooling-Fast layer + def __init__(self, c1, c2, k=5, w=None): + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = TFConv(c1, c_, 1, 1, w=w.cv1) + self.cv2 = TFConv(c_ * 4, c2, 1, 1, w=w.cv2) + self.m = keras.layers.MaxPool2D(pool_size=k, strides=1, padding='SAME') + + def call(self, inputs): + x = self.cv1(inputs) + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(tf.concat([x, y1, y2, self.m(y2)], 3)) + + +class TFDetect(keras.layers.Layer): + # TF YOLOv5 Detect layer + def __init__(self, nc=80, anchors=(), ch=(), imgsz=(640, 640), w=None): # detection layer + super().__init__() + self.stride = tf.convert_to_tensor(w.stride.numpy(), dtype=tf.float32) + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [tf.zeros(1)] * self.nl # init grid + self.anchors = tf.convert_to_tensor(w.anchors.numpy(), dtype=tf.float32) + self.anchor_grid = tf.reshape(self.anchors * tf.reshape(self.stride, [self.nl, 1, 1]), [self.nl, 1, -1, 1, 2]) + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] + self.training = False # set to False after building model + self.imgsz = imgsz + for i in range(self.nl): + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] + self.grid[i] = self._make_grid(nx, ny) + + def call(self, inputs): + z = [] # inference output + x = [] + for i in range(self.nl): + x.append(self.m[i](inputs[i])) + # x(bs,20,20,255) to x(bs,3,20,20,85) + ny, nx = self.imgsz[0] // self.stride[i], self.imgsz[1] // self.stride[i] + x[i] = tf.reshape(x[i], [-1, ny * nx, self.na, self.no]) + + if not self.training: # inference + y = x[i] + grid = tf.transpose(self.grid[i], [0, 2, 1, 3]) - 0.5 + anchor_grid = tf.transpose(self.anchor_grid[i], [0, 2, 1, 3]) * 4 + xy = (tf.sigmoid(y[..., 0:2]) * 2 + grid) * self.stride[i] # xy + wh = tf.sigmoid(y[..., 2:4]) ** 2 * anchor_grid + # Normalize xywh to 0-1 to reduce calibration error + xy /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + wh /= tf.constant([[self.imgsz[1], self.imgsz[0]]], dtype=tf.float32) + y = tf.concat([xy, wh, tf.sigmoid(y[..., 4:5 + self.nc]), y[..., 5 + self.nc:]], -1) + z.append(tf.reshape(y, [-1, self.na * ny * nx, self.no])) + + return tf.transpose(x, [0, 2, 1, 3]) if self.training else (tf.concat(z, 1),) + + @staticmethod + def _make_grid(nx=20, ny=20): + # yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + # return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + xv, yv = tf.meshgrid(tf.range(nx), tf.range(ny)) + return tf.cast(tf.reshape(tf.stack([xv, yv], 2), [1, 1, ny * nx, 2]), dtype=tf.float32) + + +class TFSegment(TFDetect): + # YOLOv5 Segment head for segmentation models + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), imgsz=(640, 640), w=None): + super().__init__(nc, anchors, ch, imgsz, w) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.no = 5 + nc + self.nm # number of outputs per anchor + self.m = [TFConv2d(x, self.no * self.na, 1, w=w.m[i]) for i, x in enumerate(ch)] # output conv + self.proto = TFProto(ch[0], self.npr, self.nm, w=w.proto) # protos + self.detect = TFDetect.call + + def call(self, x): + p = self.proto(x[0]) + # p = TFUpsample(None, scale_factor=4, mode='nearest')(self.proto(x[0])) # (optional) full-size protos + p = tf.transpose(p, [0, 3, 1, 2]) # from shape(1,160,160,32) to shape(1,32,160,160) + x = self.detect(self, x) + return (x, p) if self.training else (x[0], p) + + +class TFProto(keras.layers.Layer): + + def __init__(self, c1, c_=256, c2=32, w=None): + super().__init__() + self.cv1 = TFConv(c1, c_, k=3, w=w.cv1) + self.upsample = TFUpsample(None, scale_factor=2, mode='nearest') + self.cv2 = TFConv(c_, c_, k=3, w=w.cv2) + self.cv3 = TFConv(c_, c2, w=w.cv3) + + def call(self, inputs): + return self.cv3(self.cv2(self.upsample(self.cv1(inputs)))) + + +class TFUpsample(keras.layers.Layer): + # TF version of torch.nn.Upsample() + def __init__(self, size, scale_factor, mode, w=None): # warning: all arguments needed including 'w' + super().__init__() + assert scale_factor % 2 == 0, 'scale_factor must be multiple of 2' + self.upsample = lambda x: tf.image.resize(x, (x.shape[1] * scale_factor, x.shape[2] * scale_factor), mode) + # self.upsample = keras.layers.UpSampling2D(size=scale_factor, interpolation=mode) + # with default arguments: align_corners=False, half_pixel_centers=False + # self.upsample = lambda x: tf.raw_ops.ResizeNearestNeighbor(images=x, + # size=(x.shape[1] * 2, x.shape[2] * 2)) + + def call(self, inputs): + return self.upsample(inputs) + + +class TFConcat(keras.layers.Layer): + # TF version of torch.concat() + def __init__(self, dimension=1, w=None): + super().__init__() + assert dimension == 1, 'convert only NCHW to NHWC concat' + self.d = 3 + + def call(self, inputs): + return tf.concat(inputs, self.d) + + +def parse_model(d, ch, model, imgsz): # model_dict, input_channels(3) + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m_str = m + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except NameError: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [ + nn.Conv2d, Conv, DWConv, DWConvTranspose2d, Bottleneck, SPP, SPPF, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3x]: + c1, c2 = ch[f], args[0] + c2 = make_divisible(c2 * gw, 8) if c2 != no else c2 + + args = [c1, c2, *args[1:]] + if m in [BottleneckCSP, C3, C3x]: + args.insert(2, n) + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[-1 if x == -1 else x + 1] for x in f) + elif m in [Detect, Segment]: + args.append([ch[x + 1] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + if m is Segment: + args[3] = make_divisible(args[3] * gw, 8) + args.append(imgsz) + else: + c2 = ch[f] + + tf_m = eval('TF' + m_str.replace('nn.', '')) + m_ = keras.Sequential([tf_m(*args, w=model.model[i][j]) for j in range(n)]) if n > 1 \ + else tf_m(*args, w=model.model[i]) # module + + torch_m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum(x.numel() for x in torch_m_.parameters()) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + LOGGER.info(f'{i:>3}{str(f):>18}{str(n):>3}{np:>10} {t:<40}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + ch.append(c2) + return keras.Sequential(layers), sorted(save) + + +class TFModel: + # TF YOLOv5 model + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, model=None, imgsz=(640, 640)): # model, channels, classes + super().__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.FullLoader) # model dict + + # Define model + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding {cfg} nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + self.model, self.savelist = parse_model(deepcopy(self.yaml), ch=[ch], model=model, imgsz=imgsz) + + def predict(self, + inputs, + tf_nms=False, + agnostic_nms=False, + topk_per_class=100, + topk_all=100, + iou_thres=0.45, + conf_thres=0.25): + y = [] # outputs + x = inputs + for m in self.model.layers: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + x = m(x) # run + y.append(x if m.i in self.savelist else None) # save output + + # Add TensorFlow NMS + if tf_nms: + boxes = self._xywh2xyxy(x[0][..., :4]) + probs = x[0][:, :, 4:5] + classes = x[0][:, :, 5:] + scores = probs * classes + if agnostic_nms: + nms = AgnosticNMS()((boxes, classes, scores), topk_all, iou_thres, conf_thres) + else: + boxes = tf.expand_dims(boxes, 2) + nms = tf.image.combined_non_max_suppression(boxes, + scores, + topk_per_class, + topk_all, + iou_thres, + conf_thres, + clip_boxes=False) + return (nms,) + return x # output [1,6300,85] = [xywh, conf, class0, class1, ...] + # x = x[0] # [x(1,6300,85), ...] to x(6300,85) + # xywh = x[..., :4] # x(6300,4) boxes + # conf = x[..., 4:5] # x(6300,1) confidences + # cls = tf.reshape(tf.cast(tf.argmax(x[..., 5:], axis=1), tf.float32), (-1, 1)) # x(6300,1) classes + # return tf.concat([conf, cls, xywh], 1) + + @staticmethod + def _xywh2xyxy(xywh): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + x, y, w, h = tf.split(xywh, num_or_size_splits=4, axis=-1) + return tf.concat([x - w / 2, y - h / 2, x + w / 2, y + h / 2], axis=-1) + + +class AgnosticNMS(keras.layers.Layer): + # TF Agnostic NMS + def call(self, input, topk_all, iou_thres, conf_thres): + # wrap map_fn to avoid TypeSpec related error https://stackoverflow.com/a/65809989/3036450 + return tf.map_fn(lambda x: self._nms(x, topk_all, iou_thres, conf_thres), + input, + fn_output_signature=(tf.float32, tf.float32, tf.float32, tf.int32), + name='agnostic_nms') + + @staticmethod + def _nms(x, topk_all=100, iou_thres=0.45, conf_thres=0.25): # agnostic NMS + boxes, classes, scores = x + class_inds = tf.cast(tf.argmax(classes, axis=-1), tf.float32) + scores_inp = tf.reduce_max(scores, -1) + selected_inds = tf.image.non_max_suppression(boxes, + scores_inp, + max_output_size=topk_all, + iou_threshold=iou_thres, + score_threshold=conf_thres) + selected_boxes = tf.gather(boxes, selected_inds) + padded_boxes = tf.pad(selected_boxes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]], [0, 0]], + mode='CONSTANT', + constant_values=0.0) + selected_scores = tf.gather(scores_inp, selected_inds) + padded_scores = tf.pad(selected_scores, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode='CONSTANT', + constant_values=-1.0) + selected_classes = tf.gather(class_inds, selected_inds) + padded_classes = tf.pad(selected_classes, + paddings=[[0, topk_all - tf.shape(selected_boxes)[0]]], + mode='CONSTANT', + constant_values=-1.0) + valid_detections = tf.shape(selected_inds)[0] + return padded_boxes, padded_scores, padded_classes, valid_detections + + +def activations(act=nn.SiLU): + # Returns TF activation from input PyTorch activation + if isinstance(act, nn.LeakyReLU): + return lambda x: keras.activations.relu(x, alpha=0.1) + elif isinstance(act, nn.Hardswish): + return lambda x: x * tf.nn.relu6(x + 3) * 0.166666667 + elif isinstance(act, (nn.SiLU, SiLU)): + return lambda x: keras.activations.swish(x) + else: + raise Exception(f'no matching TensorFlow activation found for PyTorch activation {act}') + + +def representative_dataset_gen(dataset, ncalib=100): + # Representative dataset generator for use with converter.representative_dataset, returns a generator of np arrays + for n, (path, img, im0s, vid_cap, string) in enumerate(dataset): + im = np.transpose(img, [1, 2, 0]) + im = np.expand_dims(im, axis=0).astype(np.float32) + im /= 255 + yield [im] + if n >= ncalib: + break + + +def run( + weights=ROOT / 'yolov5s.pt', # weights path + imgsz=(640, 640), # inference size h,w + batch_size=1, # batch size + dynamic=False, # dynamic batch size +): + # PyTorch model + im = torch.zeros((batch_size, 3, *imgsz)) # BCHW image + model = attempt_load(weights, device=torch.device('cpu'), inplace=True, fuse=False) + _ = model(im) # inference + model.info() + + # TensorFlow model + im = tf.zeros((batch_size, *imgsz, 3)) # BHWC image + tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz) + _ = tf_model.predict(im) # inference + + # Keras model + im = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size) + keras_model = keras.Model(inputs=im, outputs=tf_model.predict(im)) + keras_model.summary() + + LOGGER.info('PyTorch, TensorFlow and Keras models successfully verified.\nUse export.py for TF model export.') + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='weights path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic', action='store_true', help='dynamic batch size') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + run(**vars(opt)) + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/models/yolo.py b/src/train_utils/train_models/models/yolov5/models/yolo.py new file mode 100644 index 0000000..ed21c06 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/yolo.py @@ -0,0 +1,391 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +YOLO-specific modules + +Usage: + $ python models/yolo.py --cfg yolov5s.yaml +""" + +import argparse +import contextlib +import os +import platform +import sys +from copy import deepcopy +from pathlib import Path + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +if platform.system() != 'Windows': + ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args +from utils.plots import feature_visualization +from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, + time_sync) + +try: + import thop # for FLOPs computation +except ImportError: + thop = None + + +class Detect(nn.Module): + # YOLOv5 Detect head for detection models + stride = None # strides computed during build + dynamic = False # force grid reconstruction + export = False # export mode + + def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer + super().__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid + self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid + self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + self.inplace = inplace # use inplace ops (e.g. slice assignment) + + def forward(self, x): + z = [] # inference output + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) + + if isinstance(self, Segment): # (boxes + masks) + xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) + xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy + wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) + else: # Detect (boxes only) + xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) + xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy + wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, self.na * nx * ny, self.no)) + + return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) + + def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): + d = self.anchors[i].device + t = self.anchors[i].dtype + shape = 1, self.na, ny, nx, 2 # grid shape + y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) + yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility + grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 + anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) + return grid, anchor_grid + + +class Segment(Detect): + # YOLOv5 Segment head for segmentation models + def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): + super().__init__(nc, anchors, ch, inplace) + self.nm = nm # number of masks + self.npr = npr # number of protos + self.no = 5 + nc + self.nm # number of outputs per anchor + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + self.proto = Proto(ch[0], self.npr, self.nm) # protos + self.detect = Detect.forward + + def forward(self, x): + p = self.proto(x[0]) + x = self.detect(self, x) + return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) + + +class BaseModel(nn.Module): + # YOLOv5 base model + def forward(self, x, profile=False, visualize=False): + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_once(self, x, profile=False, visualize=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + if profile: + self._profile_one_layer(m, x, dt) + x = m(x) # run + y.append(x if m.i in self.save else None) # save output + if visualize: + feature_visualization(x, m.type, m.i, save_dir=visualize) + return x + + def _profile_one_layer(self, m, x, dt): + c = m == self.model[-1] # is final layer, copy input as inplace fix + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPs + t = time_sync() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_sync() - t) * 100) + if m == self.model[0]: + LOGGER.info(f"{'time (ms)':>10s} {'GFLOPs':>10s} {'params':>10s} module") + LOGGER.info(f'{dt[-1]:10.2f} {o:10.2f} {m.np:10.0f} {m.type}') + if c: + LOGGER.info(f"{sum(dt):10.2f} {'-':>10s} {'-':>10s} Total") + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + LOGGER.info('Fusing layers... ') + for m in self.model.modules(): + if isinstance(m, (Conv, DWConv)) and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.forward_fuse # update forward + self.info() + return self + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + def _apply(self, fn): + # Apply to(), cpu(), cuda(), half() to model tensors that are not parameters or registered buffers + self = super()._apply(fn) + m = self.model[-1] # Detect() + if isinstance(m, (Detect, Segment)): + m.stride = fn(m.stride) + m.grid = list(map(fn, m.grid)) + if isinstance(m.anchor_grid, list): + m.anchor_grid = list(map(fn, m.anchor_grid)) + return self + + +class DetectionModel(BaseModel): + # YOLOv5 detection model + def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super().__init__() + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg, encoding='ascii', errors='ignore') as f: + self.yaml = yaml.safe_load(f) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + self.inplace = self.yaml.get('inplace', True) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, (Detect, Segment)): + s = 256 # 2x min stride + m.inplace = self.inplace + forward = lambda x: self.forward(x)[0] if isinstance(m, Segment) else self.forward(x) + m.stride = torch.tensor([s / x.shape[-2] for x in forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases() # only run once + + # Init weights, biases + initialize_weights(self) + self.info() + LOGGER.info('') + + def forward(self, x, augment=False, profile=False, visualize=False): + if augment: + return self._forward_augment(x) # augmented inference, None + return self._forward_once(x, profile, visualize) # single-scale inference, train + + def _forward_augment(self, x): + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self._forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi = self._descale_pred(yi, fi, si, img_size) + y.append(yi) + y = self._clip_augmented(y) # clip augmented tails + return torch.cat(y, 1), None # augmented inference, train + + def _descale_pred(self, p, flips, scale, img_size): + # de-scale predictions following augmented inference (inverse operation) + if self.inplace: + p[..., :4] /= scale # de-scale + if flips == 2: + p[..., 1] = img_size[0] - p[..., 1] # de-flip ud + elif flips == 3: + p[..., 0] = img_size[1] - p[..., 0] # de-flip lr + else: + x, y, wh = p[..., 0:1] / scale, p[..., 1:2] / scale, p[..., 2:4] / scale # de-scale + if flips == 2: + y = img_size[0] - y # de-flip ud + elif flips == 3: + x = img_size[1] - x # de-flip lr + p = torch.cat((x, y, wh, p[..., 4:]), -1) + return p + + def _clip_augmented(self, y): + # Clip YOLOv5 augmented inference tails + nl = self.model[-1].nl # number of detection layers (P3-P5) + g = sum(4 ** x for x in range(nl)) # grid points + e = 1 # exclude layer count + i = (y[0].shape[1] // g) * sum(4 ** x for x in range(e)) # indices + y[0] = y[0][:, :-i] # large + i = (y[-1].shape[1] // g) * sum(4 ** (nl - 1 - x) for x in range(e)) # indices + y[-1] = y[-1][:, i:] # small + return y + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:5 + m.nc] += math.log(0.6 / (m.nc - 0.99999)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + +Model = DetectionModel # retain YOLOv5 'Model' class for backwards compatibility + + +class SegmentationModel(DetectionModel): + # YOLOv5 segmentation model + def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None): + super().__init__(cfg, ch, nc, anchors) + + +class ClassificationModel(BaseModel): + # YOLOv5 classification model + def __init__(self, cfg=None, model=None, nc=1000, cutoff=10): # yaml, model, number of classes, cutoff index + super().__init__() + self._from_detection_model(model, nc, cutoff) if model is not None else self._from_yaml(cfg) + + def _from_detection_model(self, model, nc=1000, cutoff=10): + # Create a YOLOv5 classification model from a YOLOv5 detection model + if isinstance(model, DetectMultiBackend): + model = model.model # unwrap DetectMultiBackend + model.model = model.model[:cutoff] # backbone + m = model.model[-1] # last layer + ch = m.conv.in_channels if hasattr(m, 'conv') else m.cv1.conv.in_channels # ch into module + c = Classify(ch, nc) # Classify() + c.i, c.f, c.type = m.i, m.f, 'models.common.Classify' # index, from, type + model.model[-1] = c # replace + self.model = model.model + self.stride = model.stride + self.save = [] + self.nc = nc + + def _from_yaml(self, cfg): + # Create a YOLOv5 classification model from a *.yaml file + self.model = None + + +def parse_model(d, ch): # model_dict, input_channels(3) + # Parse a YOLOv5 model.yaml dictionary + LOGGER.info(f"\n{'':>3}{'from':>18}{'n':>3}{'params':>10} {'module':<40}{'arguments':<30}") + anchors, nc, gd, gw, act = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'], d.get('activation') + if act: + Conv.default_act = eval(act) # redefine default activation, i.e. Conv.default_act = nn.SiLU() + LOGGER.info(f"{colorstr('activation:')} {act}") # print + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + with contextlib.suppress(NameError): + args[j] = eval(a) if isinstance(a, str) else a # eval strings + + n = n_ = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in { + Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv, + BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x}: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in {BottleneckCSP, C3, C3TR, C3Ghost, C3x}: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum(ch[x] for x in f) + # TODO: channel, gw, gd + elif m in {Detect, Segment}: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + if m is Segment: + args[3] = make_divisible(args[3] * gw, 8) + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*(m(*args) for _ in range(n))) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum(x.numel() for x in m_.parameters()) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + LOGGER.info(f'{i:>3}{str(f):>18}{n_:>3}{np:10.0f} {t:<40}{str(args):<30}') # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolov5s.yaml', help='model.yaml') + parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--profile', action='store_true', help='profile model speed') + parser.add_argument('--line-profile', action='store_true', help='profile model speed layer by layer') + parser.add_argument('--test', action='store_true', help='test all yolo*.yaml') + opt = parser.parse_args() + opt.cfg = check_yaml(opt.cfg) # check YAML + print_args(vars(opt)) + device = select_device(opt.device) + + # Create model + im = torch.rand(opt.batch_size, 3, 640, 640).to(device) + model = Model(opt.cfg).to(device) + + # Options + if opt.line_profile: # profile layer by layer + model(im, profile=True) + + elif opt.profile: # profile forward-backward + results = profile(input=im, ops=[model], n=3) + + elif opt.test: # test all models + for cfg in Path(ROOT / 'models').rglob('yolo*.yaml'): + try: + _ = Model(cfg) + except Exception as e: + print(f'Error in {cfg}: {e}') + + else: # report fused model summary + model.fuse() diff --git a/src/train_utils/train_models/models/yolov5/models/yolov5l.yaml b/src/train_utils/train_models/models/yolov5/models/yolov5l.yaml new file mode 100644 index 0000000..ce8a5de --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/yolov5l.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/yolov5m.yaml b/src/train_utils/train_models/models/yolov5/models/yolov5m.yaml new file mode 100644 index 0000000..ad13ab3 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/yolov5m.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.67 # model depth multiple +width_multiple: 0.75 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/yolov5n.yaml b/src/train_utils/train_models/models/yolov5/models/yolov5n.yaml new file mode 100644 index 0000000..8a28a40 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/yolov5n.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/yolov5s.yaml b/src/train_utils/train_models/models/yolov5/models/yolov5s.yaml new file mode 100644 index 0000000..f35beab --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/yolov5s.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 0.33 # model depth multiple +width_multiple: 0.50 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/models/yolov5x.yaml b/src/train_utils/train_models/models/yolov5/models/yolov5x.yaml new file mode 100644 index 0000000..f617a02 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/models/yolov5x.yaml @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license + +# Parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv5 v6.0 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 1-P2/4 + [-1, 3, C3, [128]], + [-1, 1, Conv, [256, 3, 2]], # 3-P3/8 + [-1, 6, C3, [256]], + [-1, 1, Conv, [512, 3, 2]], # 5-P4/16 + [-1, 9, C3, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 7-P5/32 + [-1, 3, C3, [1024]], + [-1, 1, SPPF, [1024, 5]], # 9 + ] + +# YOLOv5 v6.0 head +head: + [[-1, 1, Conv, [512, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P4 + [-1, 3, C3, [512, False]], # 13 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 4], 1, Concat, [1]], # cat backbone P3 + [-1, 3, C3, [256, False]], # 17 (P3/8-small) + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 14], 1, Concat, [1]], # cat head P4 + [-1, 3, C3, [512, False]], # 20 (P4/16-medium) + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 10], 1, Concat, [1]], # cat head P5 + [-1, 3, C3, [1024, False]], # 23 (P5/32-large) + + [[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov5/requirements.txt b/src/train_utils/train_models/models/yolov5/requirements.txt new file mode 100644 index 0000000..11cb9aa --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/requirements.txt @@ -0,0 +1,50 @@ +# YOLOv5 requirements +# Usage: pip install -r requirements.txt + +# Base ------------------------------------------------------------------------ +gitpython>=3.1.30 +matplotlib>=3.2.2 +numpy>=1.18.5 +opencv-python>=4.1.1 +Pillow>=7.1.2 +psutil # system resources +PyYAML>=5.3.1 +requests>=2.23.0 +scipy>=1.4.1 +thop>=0.1.1 # FLOPs computation +torch>=1.7.0 # see https://pytorch.org/get-started/locally (recommended) +torchvision>=0.8.1 +tqdm>=4.64.0 +# protobuf<=3.20.1 # https://github.com/ultralytics/yolov5/issues/8012 + +# Logging --------------------------------------------------------------------- +tensorboard>=2.4.1 +# clearml>=1.2.0 +# comet + +# Plotting -------------------------------------------------------------------- +pandas>=1.1.4 +seaborn>=0.11.0 + +# Export ---------------------------------------------------------------------- +# coremltools>=6.0 # CoreML export +# onnx>=1.12.0 # ONNX export +# onnx-simplifier>=0.4.1 # ONNX simplifier +# nvidia-pyindex # TensorRT export +# nvidia-tensorrt # TensorRT export +# scikit-learn<=1.1.2 # CoreML quantization +# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos) +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export + +# Deploy ---------------------------------------------------------------------- +setuptools>=65.5.1 # Snyk vulnerability fix +# tritonclient[all]~=2.24.0 + +# Extras ---------------------------------------------------------------------- +# ipython # interactive notebook +# mss # screenshots +# albumentations>=1.0.3 +# pycocotools>=2.0.6 # COCO mAP +# roboflow +# ultralytics # HUB https://hub.ultralytics.com diff --git a/src/train_utils/train_models/models/yolov5/segment/predict.py b/src/train_utils/train_models/models/yolov5/segment/predict.py new file mode 100644 index 0000000..d82df89 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/segment/predict.py @@ -0,0 +1,284 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. + +Usage - sources: + $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam + img.jpg # image + vid.mp4 # video + screen # screenshot + path/ # directory + list.txt # list of images + list.streams # list of streams + 'path/*.jpg' # glob + 'https://youtu.be/Zgi9g1ksQHc' # YouTube + 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream + +Usage - formats: + $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch + yolov5s-seg.torchscript # TorchScript + yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-seg_openvino_model # OpenVINO + yolov5s-seg.engine # TensorRT + yolov5s-seg.mlmodel # CoreML (macOS-only) + yolov5s-seg_saved_model # TensorFlow SavedModel + yolov5s-seg.pb # TensorFlow GraphDef + yolov5s-seg.tflite # TensorFlow Lite + yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-seg_paddle_model # PaddlePaddle +""" + +import argparse +import os +import platform +import sys +from pathlib import Path + +import torch + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams +from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, + increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, + strip_optimizer) +from utils.plots import Annotator, colors, save_one_box +from utils.segment.general import masks2segments, process_mask, process_mask_native +from utils.torch_utils import select_device, smart_inference_mode + + +@smart_inference_mode() +def run( + weights=ROOT / 'yolov5s-seg.pt', # model.pt path(s) + source=ROOT / 'data/images', # file/dir/URL/glob/screen/0(webcam) + data=ROOT / 'data/coco128.yaml', # dataset.yaml path + imgsz=(640, 640), # inference size (height, width) + conf_thres=0.25, # confidence threshold + iou_thres=0.45, # NMS IOU threshold + max_det=1000, # maximum detections per image + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + view_img=False, # show results + save_txt=False, # save results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_crop=False, # save cropped prediction boxes + nosave=False, # do not save images/videos + classes=None, # filter by class: --class 0, or --class 0 2 3 + agnostic_nms=False, # class-agnostic NMS + augment=False, # augmented inference + visualize=False, # visualize features + update=False, # update all models + project=ROOT / 'runs/predict-seg', # save results to project/name + name='exp', # save results to project/name + exist_ok=False, # existing project/name ok, do not increment + line_thickness=3, # bounding box thickness (pixels) + hide_labels=False, # hide labels + hide_conf=False, # hide confidences + half=False, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + vid_stride=1, # video frame-rate stride + retina_masks=False, +): + source = str(source) + save_img = not nosave and not source.endswith('.txt') # save inference images + is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) + is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) + webcam = source.isnumeric() or source.endswith('.streams') or (is_url and not is_file) + screenshot = source.lower().startswith('screen') + if is_url and is_file: + source = check_file(source) # download + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + device = select_device(device) + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, names, pt = model.stride, model.names, model.pt + imgsz = check_img_size(imgsz, s=stride) # check image size + + # Dataloader + bs = 1 # batch_size + if webcam: + view_img = check_imshow(warn=True) + dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + bs = len(dataset) + elif screenshot: + dataset = LoadScreenshots(source, img_size=imgsz, stride=stride, auto=pt) + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride) + vid_path, vid_writer = [None] * bs, [None] * bs + + # Run inference + model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup + seen, windows, dt = 0, [], (Profile(), Profile(), Profile()) + for path, im, im0s, vid_cap, s in dataset: + with dt[0]: + im = torch.from_numpy(im).to(model.device) + im = im.half() if model.fp16 else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + if len(im.shape) == 3: + im = im[None] # expand for batch dim + + # Inference + with dt[1]: + visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False + pred, proto = model(im, augment=augment, visualize=visualize)[:2] + + # NMS + with dt[2]: + pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det, nm=32) + + # Second-stage classifier (optional) + # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) + + # Process predictions + for i, det in enumerate(pred): # per image + seen += 1 + if webcam: # batch_size >= 1 + p, im0, frame = path[i], im0s[i].copy(), dataset.count + s += f'{i}: ' + else: + p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # im.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt + s += '%gx%g ' % im.shape[2:] # print string + imc = im0.copy() if save_crop else im0 # for save_crop + annotator = Annotator(im0, line_width=line_thickness, example=str(names)) + if len(det): + if retina_masks: + # scale bbox first the crop masks + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + masks = process_mask_native(proto[i], det[:, 6:], det[:, :4], im0.shape[:2]) # HWC + else: + masks = process_mask(proto[i], det[:, 6:], det[:, :4], im.shape[2:], upsample=True) # HWC + det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size + + # Segments + if save_txt: + segments = [ + scale_segments(im0.shape if retina_masks else im.shape[2:], x, im0.shape, normalize=True) + for x in reversed(masks2segments(masks))] + + # Print results + for c in det[:, 5].unique(): + n = (det[:, 5] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Mask plotting + annotator.masks( + masks, + colors=[colors(x, True) for x in det[:, 5]], + im_gpu=torch.as_tensor(im0, dtype=torch.float16).to(device).permute(2, 0, 1).flip(0).contiguous() / + 255 if retina_masks else im[i]) + + # Write results + for j, (*xyxy, conf, cls) in enumerate(reversed(det[:, :6])): + if save_txt: # Write to file + seg = segments[j].reshape(-1) # (n,2) to (n*2) + line = (cls, *seg, conf) if save_conf else (cls, *seg) # label format + with open(f'{txt_path}.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or save_crop or view_img: # Add bbox to image + c = int(cls) # integer class + label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') + annotator.box_label(xyxy, label, color=colors(c, True)) + # annotator.draw.polygon(segments[j], outline=colors(c, True), width=3) + if save_crop: + save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) + + # Stream results + im0 = annotator.result() + if view_img: + if platform.system() == 'Linux' and p not in windows: + windows.append(p) + cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux) + cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0]) + cv2.imshow(str(p), im0) + if cv2.waitKey(1) == ord('q'): # 1 millisecond + exit() + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + else: # 'video' or 'stream' + if vid_path[i] != save_path: # new video + vid_path[i] = save_path + if isinstance(vid_writer[i], cv2.VideoWriter): + vid_writer[i].release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos + vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer[i].write(im0) + + # Print time (inference-only) + LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms") + + # Print results + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + if update: + strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning) + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob/screen/0(webcam)') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path') + parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') + parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='show results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--visualize', action='store_true', help='visualize features') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default=ROOT / 'runs/predict-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)') + parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels') + parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride') + parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution') + opt = parser.parse_args() + opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + run(**vars(opt)) + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/segment/train.py b/src/train_utils/train_models/models/yolov5/segment/train.py new file mode 100644 index 0000000..de5f703 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/segment/train.py @@ -0,0 +1,664 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 segment model on a segment dataset +Models and datasets download automatically from the latest YOLOv5 release. + +Usage - Single-GPU training: + $ python segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 # from pretrained (recommended) + $ python segment/train.py --data coco128-seg.yaml --weights '' --cfg yolov5s-seg.yaml --img 640 # from scratch + +Usage - Multi-GPU DDP training: + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 segment/train.py --data coco128-seg.yaml --weights yolov5s-seg.pt --img 640 --device 0,1,2,3 + +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +""" + +import argparse +import math +import os +import random +import subprocess +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import segment.val as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import SegmentationModel +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, one_cycle, print_args, print_mutation, strip_optimizer, yaml_save) +from utils.loggers import GenericLogger +from utils.plots import plot_evolve, plot_labels +from utils.segment.dataloaders import create_dataloader +from utils.segment.loss import ComputeLoss +from utils.segment.metrics import KEYS, fitness +from utils.segment.plots import plot_images_and_masks, plot_results_with_masks +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze, mask_ratio = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze, opt.mask_ratio + # callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + logger = GenericLogger(opt=opt, console_logger=LOGGER) + + # Config + plots = not evolve and not opt.noplots # create plots + overlap = not opt.no_overlap + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = SegmentationModel(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = SegmentationModel(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + logger.update_params({'batch_size': batch_size}) + # loggers.on_params_update({"batch_size": batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader( + train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + ) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + mask_downsample_ratio=mask_ratio, + overlap_mask=overlap, + prefix=colorstr('val: '))[0] + + if not resume: + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + if plots: + plot_labels(labels, names, save_dir) + # callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + hyp['box'] *= 3 / nl # scale to layers + hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model, overlap=overlap) # init loss class + # callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + # callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(4, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 8) % + ('Epoch', 'GPU_mem', 'box_loss', 'seg_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _, masks) in pbar: # batch ------------------------------------------------------ + # callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device), masks=masks.to(device).float()) + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 6) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + # callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths) + # if callbacks.stop_training: + # return + + # Mosaic plots + if plots: + if ni < 3: + plot_images_and_masks(imgs, targets, masks, paths, save_dir / f'train_batch{ni}.jpg') + if ni == 10: + files = sorted(save_dir.glob('train*.jpg')) + logger.log_images(files, 'Mosaics', epoch) + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + # callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + # callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + # Log val metrics and media + metrics_dict = dict(zip(KEYS, log_vals)) + logger.log_metrics(metrics_dict, epoch) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'opt': vars(opt), + 'git': GIT_INFO, # {remote, branch, commit} if a git repo + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + logger.log_model(w / f'epoch{epoch}.pt') + del ckpt + # callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss, + mask_downsample_ratio=mask_ratio, + overlap=overlap) # val best model with plots + if is_coco: + # callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + metrics_dict = dict(zip(KEYS, list(mloss) + list(results) + lr)) + logger.log_metrics(metrics_dict, epoch) + + # callbacks.run('on_train_end', last, best, epoch, results) + # on train end callback using genericLogger + logger.log_metrics(dict(zip(KEYS[4:16], results)), epochs) + if not opt.evolve: + logger.log_model(best, epoch) + if plots: + plot_results_with_masks(file=save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(save_dir / f) for f in files if (save_dir / f).exists()] # filter + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}") + logger.log_images(files, 'Results', epoch + 1) + logger.log_images(sorted(save_dir.glob('val*.jpg')), 'Validation', epoch + 1) + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s-seg.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train-seg', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Instance Segmentation Args + parser.add_argument('--mask-ratio', type=int, default=4, help='Downsample the truth masks to saving memory') + parser.add_argument('--no-overlap', action='store_true', help='Overlap masks train faster at slightly less mAP') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + check_git_status() + check_requirements() + + # Resume + if opt.resume and not opt.evolve: # resume from specified or most recent last.pt + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train-seg'): # if default project name, rename to runs/evolve-seg + opt.project = str(ROOT / 'runs/evolve-seg') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 12] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + print_mutation(KEYS[4:16], results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/segment/tutorial.ipynb b/src/train_utils/train_models/models/yolov5/segment/tutorial.ipynb new file mode 100644 index 0000000..cb52045 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/segment/tutorial.ipynb @@ -0,0 +1,594 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "t6MPjfT5NrKQ" + }, + "source": [ + "
\n", + "\n", + " \n", + " \n", + "\n", + "\n", + "
\n", + " \"Run\n", + " \"Open\n", + " \"Open\n", + "
\n", + "\n", + "This YOLOv5 🚀 notebook by Ultralytics presents simple train, validate and predict examples to help start your AI adventure.
See GitHub for community support or contact us for professional support.\n", + "\n", + "
" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mGmQbAO5pQb" + }, + "source": [ + "# Setup\n", + "\n", + "Clone GitHub [repository](https://github.com/ultralytics/yolov5), install [dependencies](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) and check PyTorch and GPU." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "wbvMlHd_QwMG", + "outputId": "171b23f0-71b9-4cbf-b666-6fa2ecef70c8" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stderr", + "text": [ + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Setup complete ✅ (2 CPUs, 12.7 GB RAM, 22.6/78.2 GB disk)\n" + ] + } + ], + "source": [ + "!git clone https://github.com/ultralytics/yolov5 # clone\n", + "%cd yolov5\n", + "%pip install -qr requirements.txt # install\n", + "\n", + "import torch\n", + "import utils\n", + "display = utils.notebook_init() # checks" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4JnkELT0cIJg" + }, + "source": [ + "# 1. Predict\n", + "\n", + "`segment/predict.py` runs YOLOv5 instance segmentation inference on a variety of sources, downloading models automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases), and saving results to `runs/predict`. Example inference sources are:\n", + "\n", + "```shell\n", + "python segment/predict.py --source 0 # webcam\n", + " img.jpg # image \n", + " vid.mp4 # video\n", + " screen # screenshot\n", + " path/ # directory\n", + " 'path/*.jpg' # glob\n", + " 'https://youtu.be/Zgi9g1ksQHc' # YouTube\n", + " 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "zR9ZbuQCH7FX", + "outputId": "3f67f1c7-f15e-4fa5-d251-967c3b77eaad" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1msegment/predict: \u001b[0mweights=['yolov5s-seg.pt'], source=data/images, data=data/coco128.yaml, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/predict-seg, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False, vid_stride=1, retina_masks=False\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Downloading https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s-seg.pt to yolov5s-seg.pt...\n", + "100% 14.9M/14.9M [00:01<00:00, 12.0MB/s]\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 18.2ms\n", + "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, 13.4ms\n", + "Speed: 0.5ms pre-process, 15.8ms inference, 18.5ms NMS per image at shape (1, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/predict-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "!python segment/predict.py --weights yolov5s-seg.pt --img 640 --conf 0.25 --source data/images\n", + "#display.Image(filename='runs/predict-seg/exp/zidane.jpg', width=600)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hkAzDWJ7cWTr" + }, + "source": [ + "        \n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0eq1SMWl6Sfn" + }, + "source": [ + "# 2. Validate\n", + "Validate a model's accuracy on the [COCO](https://cocodataset.org/#home) dataset's `val` or `test` splits. Models are downloaded automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases). To show results by class use the `--verbose` flag." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "WQPtK1QYVaD_", + "outputId": "9d751d8c-bee8-4339-cf30-9854ca530449" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Downloading https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017labels-segments.zip ...\n", + "Downloading http://images.cocodataset.org/zips/val2017.zip ...\n", + "######################################################################## 100.0%\n", + "######################################################################## 100.0%\n" + ] + } + ], + "source": [ + "# Download COCO val\n", + "!bash data/scripts/get_coco.sh --val --segments # download (780M - 5000 images)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "X58w8JLpMnjH", + "outputId": "a140d67a-02da-479e-9ddb-7d54bf9e407a" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1msegment/val: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5s-seg.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, max_det=300, task=val, device=, workers=8, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=False, project=runs/val-seg, name=exp, exist_ok=False, half=True, dnn=False\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "Fusing layers... \n", + "YOLOv5s-seg summary: 224 layers, 7611485 parameters, 0 gradients, 26.4 GFLOPs\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco/val2017... 4952 images, 48 backgrounds, 0 corrupt: 100% 5000/5000 [00:03<00:00, 1361.31it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: /content/datasets/coco/val2017.cache\n", + " Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100% 157/157 [01:54<00:00, 1.37it/s]\n", + " all 5000 36335 0.673 0.517 0.566 0.373 0.672 0.49 0.532 0.319\n", + "Speed: 0.6ms pre-process, 4.4ms inference, 2.9ms NMS per image at shape (32, 3, 640, 640)\n", + "Results saved to \u001b[1mruns/val-seg/exp\u001b[0m\n" + ] + } + ], + "source": [ + "# Validate YOLOv5s-seg on COCO val\n", + "!python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 --half" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZY2VXXXu74w5" + }, + "source": [ + "# 3. Train\n", + "\n", + "

\n", + "Close the active learning loop by sampling images from your inference conditions with the `roboflow` pip package\n", + "

\n", + "\n", + "Train a YOLOv5s-seg model on the [COCO128](https://www.kaggle.com/ultralytics/coco128) dataset with `--data coco128-seg.yaml`, starting from pretrained `--weights yolov5s-seg.pt`, or from randomly initialized `--weights '' --cfg yolov5s-seg.yaml`.\n", + "\n", + "- **Pretrained [Models](https://github.com/ultralytics/yolov5/tree/master/models)** are downloaded\n", + "automatically from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases)\n", + "- **[Datasets](https://github.com/ultralytics/yolov5/tree/master/data)** available for autodownload include: [COCO](https://github.com/ultralytics/yolov5/blob/master/data/coco.yaml), [COCO128](https://github.com/ultralytics/yolov5/blob/master/data/coco128.yaml), [VOC](https://github.com/ultralytics/yolov5/blob/master/data/VOC.yaml), [Argoverse](https://github.com/ultralytics/yolov5/blob/master/data/Argoverse.yaml), [VisDrone](https://github.com/ultralytics/yolov5/blob/master/data/VisDrone.yaml), [GlobalWheat](https://github.com/ultralytics/yolov5/blob/master/data/GlobalWheat2020.yaml), [xView](https://github.com/ultralytics/yolov5/blob/master/data/xView.yaml), [Objects365](https://github.com/ultralytics/yolov5/blob/master/data/Objects365.yaml), [SKU-110K](https://github.com/ultralytics/yolov5/blob/master/data/SKU-110K.yaml).\n", + "- **Training Results** are saved to `runs/train-seg/` with incrementing run directories, i.e. `runs/train-seg/exp2`, `runs/train-seg/exp3` etc.\n", + "

\n", + "\n", + "A **Mosaic Dataloader** is used for training which combines 4 images into 1 mosaic.\n", + "\n", + "## Train on Custom Data with Roboflow 🌟 NEW\n", + "\n", + "[Roboflow](https://roboflow.com/?ref=ultralytics) enables you to easily **organize, label, and prepare** a high quality dataset with your own custom data. Roboflow also makes it easy to establish an active learning pipeline, collaborate with your team on dataset improvement, and integrate directly into your model building workflow with the `roboflow` pip package.\n", + "\n", + "- Custom Training Example: [https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/](https://blog.roboflow.com/train-yolov5-instance-segmentation-custom-dataset/?ref=ultralytics)\n", + "- Custom Training Notebook: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1JTz7kpmHsg-5qwVz2d2IH3AaenI1tv0N?usp=sharing)\n", + "
\n", + "\n", + "

Label images lightning fast (including with model-assisted labeling)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "i3oKtE4g-aNn" + }, + "outputs": [], + "source": [ + "#@title Select YOLOv5 🚀 logger {run: 'auto'}\n", + "logger = 'TensorBoard' #@param ['TensorBoard', 'Comet', 'ClearML']\n", + "\n", + "if logger == 'TensorBoard':\n", + " %load_ext tensorboard\n", + " %tensorboard --logdir runs/train-seg\n", + "elif logger == 'Comet':\n", + " %pip install -q comet_ml\n", + " import comet_ml; comet_ml.init()\n", + "elif logger == 'ClearML':\n", + " import clearml; clearml.browser_login()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "1NcFxRcFdJ_O", + "outputId": "3a3e0cf7-e79c-47a5-c8e7-2d26eeeab988" + }, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "\u001b[34m\u001b[1msegment/train: \u001b[0mweights=yolov5s-seg.pt, cfg=, data=coco128-seg.yaml, hyp=data/hyps/hyp.scratch-low.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, noplots=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, optimizer=SGD, sync_bn=False, workers=8, project=runs/train-seg, name=exp, exist_ok=False, quad=False, cos_lr=False, label_smoothing=0.0, patience=100, freeze=[0], save_period=-1, seed=0, local_rank=-1, mask_ratio=4, no_overlap=False\n", + "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n", + "YOLOv5 🚀 v7.0-2-gc9d47ae Python-3.7.15 torch-1.12.1+cu113 CUDA:0 (Tesla T4, 15110MiB)\n", + "\n", + "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n", + "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train-seg', view at http://localhost:6006/\n", + "\n", + "Dataset not found ⚠️, missing paths ['/content/datasets/coco128-seg/images/train2017']\n", + "Downloading https://ultralytics.com/assets/coco128-seg.zip to coco128-seg.zip...\n", + "100% 6.79M/6.79M [00:01<00:00, 6.73MB/s]\n", + "Dataset download success ✅ (1.9s), saved to \u001b[1m/content/datasets\u001b[0m\n", + "\n", + " from n params module arguments \n", + " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n", + " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n", + " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n", + " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n", + " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n", + " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n", + " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n", + " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n", + " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n", + " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n", + " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n", + " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 12 [-1, 6] 1 0 models.common.Concat [1] \n", + " 13 -1 1 361984 models.common.C3 [512, 256, 1, False] \n", + " 14 -1 1 33024 models.common.Conv [256, 128, 1, 1] \n", + " 15 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n", + " 16 [-1, 4] 1 0 models.common.Concat [1] \n", + " 17 -1 1 90880 models.common.C3 [256, 128, 1, False] \n", + " 18 -1 1 147712 models.common.Conv [128, 128, 3, 2] \n", + " 19 [-1, 14] 1 0 models.common.Concat [1] \n", + " 20 -1 1 296448 models.common.C3 [256, 256, 1, False] \n", + " 21 -1 1 590336 models.common.Conv [256, 256, 3, 2] \n", + " 22 [-1, 10] 1 0 models.common.Concat [1] \n", + " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n", + " 24 [17, 20, 23] 1 615133 models.yolo.Segment [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], 32, 128, [128, 256, 512]]\n", + "Model summary: 225 layers, 7621277 parameters, 7621277 gradients, 26.6 GFLOPs\n", + "\n", + "Transferred 367/367 items from yolov5s-seg.pt\n", + "\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed ✅\n", + "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD(lr=0.01) with parameter groups 60 weight(decay=0.0), 63 weight(decay=0.0005), 63 bias\n", + "\u001b[34m\u001b[1malbumentations: \u001b[0mBlur(p=0.01, blur_limit=(3, 7)), MedianBlur(p=0.01, blur_limit=(3, 7)), ToGray(p=0.01), CLAHE(p=0.01, clip_limit=(1, 4.0), tile_grid_size=(8, 8))\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00<00:00, 1389.59it/s]\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: /content/datasets/coco128-seg/labels/train2017.cache\n", + "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 238.86it/s]\n", + "\u001b[34m\u001b[1mval: \u001b[0mScanning /content/datasets/coco128-seg/labels/train2017.cache... 126 images, 2 backgrounds, 0 corrupt: 100% 128/128 [00:00 # 2. paste API key\n", + "python train.py --img 640 --epochs 3 --data coco128.yaml --weights yolov5s.pt # 3. train\n", + "```\n", + "To learn more about all of the supported Comet features for this integration, check out the [Comet Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/comet). If you'd like to learn more about Comet, head over to our [documentation](https://www.comet.com/docs/v2/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=yolov5_colab). Get started by trying out the Comet Colab Notebook:\n", + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing)\n", + "\n", + "\n", + "\"Comet" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lay2WsTjNJzP" + }, + "source": [ + "## ClearML Logging and Automation 🌟 NEW\n", + "\n", + "[ClearML](https://cutt.ly/yolov5-notebook-clearml) is completely integrated into YOLOv5 to track your experimentation, manage dataset versions and even remotely execute training runs. To enable ClearML (check cells above):\n", + "\n", + "- `pip install clearml`\n", + "- run `clearml-init` to connect to a ClearML server (**deploy your own [open-source server](https://github.com/allegroai/clearml-server)**, or use our [free hosted server](https://cutt.ly/yolov5-notebook-clearml))\n", + "\n", + "You'll get all the great expected features from an experiment manager: live updates, model upload, experiment comparison etc. but ClearML also tracks uncommitted changes and installed packages for example. Thanks to that ClearML Tasks (which is what we call experiments) are also reproducible on different machines! With only 1 extra line, we can schedule a YOLOv5 training task on a queue to be executed by any number of ClearML Agents (workers).\n", + "\n", + "You can use ClearML Data to version your dataset and then pass it to YOLOv5 simply using its unique ID. This will help you keep track of your data without adding extra hassle. Explore the [ClearML Tutorial](https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml) for details!\n", + "\n", + "\n", + "\"ClearML" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-WPvRbS5Swl6" + }, + "source": [ + "## Local Logging\n", + "\n", + "Training results are automatically logged with [Tensorboard](https://www.tensorflow.org/tensorboard) and [CSV](https://github.com/ultralytics/yolov5/pull/4148) loggers to `runs/train`, with a new experiment directory created for each new training as `runs/train/exp2`, `runs/train/exp3`, etc.\n", + "\n", + "This directory contains train and val statistics, mosaics, labels, predictions and augmentated mosaics, as well as metrics and charts including precision-recall (PR) curves and confusion matrices. \n", + "\n", + "\"Local\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Zelyeqbyt3GD" + }, + "source": [ + "# Environments\n", + "\n", + "YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled):\n", + "\n", + "- **Notebooks** with free GPU: \"Run \"Open \"Open\n", + "- **Google Cloud** Deep Learning VM. See [GCP Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/GCP-Quickstart)\n", + "- **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart)\n", + "- **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) \"Docker\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6Qu7Iesl0p54" + }, + "source": [ + "# Status\n", + "\n", + "![YOLOv5 CI](https://github.com/ultralytics/yolov5/actions/workflows/ci-testing.yml/badge.svg)\n", + "\n", + "If this badge is green, all [YOLOv5 GitHub Actions](https://github.com/ultralytics/yolov5/actions) Continuous Integration (CI) tests are currently passing. CI tests verify correct operation of YOLOv5 training ([train.py](https://github.com/ultralytics/yolov5/blob/master/train.py)), testing ([val.py](https://github.com/ultralytics/yolov5/blob/master/val.py)), inference ([detect.py](https://github.com/ultralytics/yolov5/blob/master/detect.py)) and export ([export.py](https://github.com/ultralytics/yolov5/blob/master/export.py)) on macOS, Windows, and Ubuntu every 24 hours and on every commit.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IEijrePND_2I" + }, + "source": [ + "# Appendix\n", + "\n", + "Additional content below." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GMusP4OAxFu6" + }, + "outputs": [], + "source": [ + "# YOLOv5 PyTorch HUB Inference (DetectionModels only)\n", + "import torch\n", + "\n", + "model = torch.hub.load('ultralytics/yolov5', 'yolov5s-seg') # yolov5n - yolov5x6 or custom\n", + "im = 'https://ultralytics.com/images/zidane.jpg' # file, Path, PIL.Image, OpenCV, nparray, list\n", + "results = model(im) # inference\n", + "results.print() # or .show(), .save(), .crop(), .pandas(), etc." + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "name": "YOLOv5 Segmentation Tutorial", + "provenance": [], + "toc_visible": true + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/src/train_utils/train_models/models/yolov5/segment/val.py b/src/train_utils/train_models/models/yolov5/segment/val.py new file mode 100644 index 0000000..a7f95fe --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/segment/val.py @@ -0,0 +1,473 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a trained YOLOv5 segment model on a segment dataset + +Usage: + $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) + $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments + +Usage - formats: + $ python segment/val.py --weights yolov5s-seg.pt # PyTorch + yolov5s-seg.torchscript # TorchScript + yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s-seg_openvino_label # OpenVINO + yolov5s-seg.engine # TensorRT + yolov5s-seg.mlmodel # CoreML (macOS-only) + yolov5s-seg_saved_model # TensorFlow SavedModel + yolov5s-seg.pb # TensorFlow GraphDef + yolov5s-seg.tflite # TensorFlow Lite + yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU + yolov5s-seg_paddle_model # PaddlePaddle +""" + +import argparse +import json +import os +import subprocess +import sys +from multiprocessing.pool import ThreadPool +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import torch.nn.functional as F + +from models.common import DetectMultiBackend +from models.yolo import SegmentationModel +from utils.callbacks import Callbacks +from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, + check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, + non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, box_iou +from utils.plots import output_to_target, plot_val_study +from utils.segment.dataloaders import create_dataloader +from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image +from utils.segment.metrics import Metrics, ap_per_class_box_and_mask +from utils.segment.plots import plot_images_and_masks +from utils.torch_utils import de_parallel, select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map, pred_masks): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + from pycocotools.mask import encode + + def single_encode(x): + rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] + rle['counts'] = rle['counts'].decode('utf-8') + return rle + + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + pred_masks = np.transpose(pred_masks, (2, 0, 1)) + with ThreadPool(NUM_THREADS) as pool: + rles = pool.map(single_encode, pred_masks) + for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5), + 'segmentation': rles[i]}) + + +def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + if masks: + if overlap: + nl = len(labels) + index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 + gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) + gt_masks = torch.where(gt_masks == index, 1.0, 0.0) + if gt_masks.shape[1:] != pred_masks.shape[1:]: + gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] + gt_masks = gt_masks.gt_(0.5) + iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) + else: # boxes + iou = box_iou(labels[:, 1:], detections[:, :4]) + + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val-seg', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + overlap=False, + mask_downsample_ratio=1, + compute_loss=None, + callbacks=Callbacks(), +): + if save_json: + check_requirements('pycocotools>=2.0.6') + process = process_mask_native # more accurate + else: + process = process_mask # faster + + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + nm = de_parallel(model).model[-1].nm # number of masks + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f'{task}: '), + overlap_mask=overlap, + mask_downsample_ratio=mask_downsample_ratio)[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', + 'mAP50', 'mAP50-95)') + dt = Profile(), Profile(), Profile() + metrics = Metrics() + loss = torch.zeros(4, device=device) + jdict, stats = [], [] + # callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar + for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): + # callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + masks = masks.to(device) + masks = masks.float() + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) + + # Loss + if compute_loss: + loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det, + nm=nm) + + # Metrics + plot_masks = [] # masks for plotting + for si, (pred, proto) in enumerate(zip(preds, protos)): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Masks + midx = [si] if overlap else targets[:, 0] == si + gt_masks = masks[midx] + pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct_bboxes = process_batch(predn, labelsn, iouv) + correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) + + pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) + if plots and batch_i < 3: + plot_masks.append(pred_masks[:15]) # filter top 15 to plot + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + pred_masks = scale_image(im[si].shape[1:], + pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) + save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary + # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + if len(plot_masks): + plot_masks = torch.cat(plot_masks, dim=0) + plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) + plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, + save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + # callbacks.run('on_val_batch_end') + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) + metrics.update(results) + nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format + LOGGER.info(pf % ('all', seen, nt.sum(), *metrics.mean_results())) + if nt.sum() == 0: + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(metrics.ap_class_index): + LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + # callbacks.run('on_val_end') + + mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + pred_json = str(save_dir / f'{w}_predictions.json') # predictions + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + results = [] + for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + results.extend(eval.stats[:2]) # update results (mAP@0.5:0.95, mAP@0.5) + map_bbox, map50_bbox, map_mask, map50_mask = results + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask + return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128-seg.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s-seg.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val-seg', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + # opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.warning(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + if opt.save_hybrid: + LOGGER.warning('WARNING ⚠️ --save-hybrid returns high mAP from hybrid labels, not from predictions alone') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) + plot_val_study(x=x) # plot + else: + raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/setup.cfg b/src/train_utils/train_models/models/yolov5/setup.cfg new file mode 100644 index 0000000..d7c4cb3 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/setup.cfg @@ -0,0 +1,54 @@ +# Project-wide configuration file, can be used for package metadata and other toll configurations +# Example usage: global configuration for PEP8 (via flake8) setting or default pytest arguments +# Local usage: pip install pre-commit, pre-commit run --all-files + +[metadata] +license_file = LICENSE +description_file = README.md + +[tool:pytest] +norecursedirs = + .git + dist + build +addopts = + --doctest-modules + --durations=25 + --color=yes + +[flake8] +max-line-length = 120 +exclude = .tox,*.egg,build,temp +select = E,W,F +doctests = True +verbose = 2 +# https://pep8.readthedocs.io/en/latest/intro.html#error-codes +format = pylint +# see: https://www.flake8rules.com/ +ignore = E731,F405,E402,F401,W504,E127,E231,E501,F403 + # E731: Do not assign a lambda expression, use a def + # F405: name may be undefined, or defined from star imports: module + # E402: module level import not at top of file + # F401: module imported but unused + # W504: line break after binary operator + # E127: continuation line over-indented for visual indent + # E231: missing whitespace after ‘,’, ‘;’, or ‘:’ + # E501: line too long + # F403: ‘from module import *’ used; unable to detect undefined names + +[isort] +# https://pycqa.github.io/isort/docs/configuration/options.html +line_length = 120 +# see: https://pycqa.github.io/isort/docs/configuration/multi_line_output_modes.html +multi_line_output = 0 + +[yapf] +based_on_style = pep8 +spaces_before_comment = 2 +COLUMN_LIMIT = 120 +COALESCE_BRACKETS = True +SPACES_AROUND_POWER_OPERATOR = True +SPACE_BETWEEN_ENDING_COMMA_AND_CLOSING_BRACKET = False +SPLIT_BEFORE_CLOSING_BRACKET = False +SPLIT_BEFORE_FIRST_ARGUMENT = False +# EACH_DICT_ENTRY_ON_SEPARATE_LINE = False diff --git a/src/train_utils/train_models/models/yolov5/train.py b/src/train_utils/train_models/models/yolov5/train.py new file mode 100644 index 0000000..960f24c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/train.py @@ -0,0 +1,640 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Train a YOLOv5 model on a custom dataset. +Models and datasets download automatically from the latest YOLOv5 release. + +Usage - Single-GPU training: + $ python train.py --data coco128.yaml --weights yolov5s.pt --img 640 # from pretrained (recommended) + $ python train.py --data coco128.yaml --weights '' --cfg yolov5s.yaml --img 640 # from scratch + +Usage - Multi-GPU DDP training: + $ python -m torch.distributed.run --nproc_per_node 4 --master_port 1 train.py --data coco128.yaml --weights yolov5s.pt --img 640 --device 0,1,2,3 + +Models: https://github.com/ultralytics/yolov5/tree/master/models +Datasets: https://github.com/ultralytics/yolov5/tree/master/data +Tutorial: https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data +""" + +import argparse +import math +import os +import random +import subprocess +import sys +import time +from copy import deepcopy +from datetime import datetime +from pathlib import Path + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import yaml +from torch.optim import lr_scheduler +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +import val as validate # for end-of-epoch mAP +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.autobatch import check_train_batch_size +from utils.callbacks import Callbacks +from utils.dataloaders import create_dataloader +from utils.downloads import attempt_download, is_url +from utils.general import (LOGGER, TQDM_BAR_FORMAT, check_amp, check_dataset, check_file, check_git_info, + check_git_status, check_img_size, check_requirements, check_suffix, check_yaml, colorstr, + get_latest_run, increment_path, init_seeds, intersect_dicts, labels_to_class_weights, + labels_to_image_weights, methods, one_cycle, print_args, print_mutation, strip_optimizer, + yaml_save) +from utils.loggers import Loggers +from utils.loggers.comet.comet_utils import check_comet_resume +from utils.loss import ComputeLoss +from utils.metrics import fitness +from utils.plots import plot_evolve +from utils.torch_utils import (EarlyStopping, ModelEMA, de_parallel, select_device, smart_DDP, smart_optimizer, + smart_resume, torch_distributed_zero_first) + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) +GIT_INFO = check_git_info() + + +def train(hyp, opt, device, callbacks): # hyp is path/to/hyp.yaml or hyp dictionary + save_dir, epochs, batch_size, weights, single_cls, evolve, data, cfg, resume, noval, nosave, workers, freeze = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.weights, opt.single_cls, opt.evolve, opt.data, opt.cfg, \ + opt.resume, opt.noval, opt.nosave, opt.workers, opt.freeze + callbacks.run('on_pretrain_routine_start') + + # Directories + w = save_dir / 'weights' # weights dir + (w.parent if evolve else w).mkdir(parents=True, exist_ok=True) # make dir + last, best = w / 'last.pt', w / 'best.pt' + + # Hyperparameters + if isinstance(hyp, str): + with open(hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + LOGGER.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + opt.hyp = hyp.copy() # for saving hyps to checkpoints + + # Save run settings + if not evolve: + yaml_save(save_dir / 'hyp.yaml', hyp) + yaml_save(save_dir / 'opt.yaml', vars(opt)) + + # Loggers + data_dict = None + if RANK in {-1, 0}: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + + # Register actions + for k in methods(loggers): + callbacks.register_action(k, callback=getattr(loggers, k)) + + # Process custom dataset artifact link + data_dict = loggers.remote_dataset + if resume: # If resuming runs from remote artifact + weights, epochs, hyp, batch_size = opt.weights, opt.epochs, opt.hyp, opt.batch_size + + # Config + plots = not evolve and not opt.noplots # create plots + cuda = device.type != 'cpu' + init_seeds(opt.seed + 1 + RANK, deterministic=True) + with torch_distributed_zero_first(LOCAL_RANK): + data_dict = data_dict or check_dataset(data) # check if None + train_path, val_path = data_dict['train'], data_dict['val'] + nc = 1 if single_cls else int(data_dict['nc']) # number of classes + names = {0: 'item'} if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + is_coco = isinstance(val_path, str) and val_path.endswith('coco/val2017.txt') # COCO dataset + + # Model + check_suffix(weights, '.pt') # check weights + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(LOCAL_RANK): + weights = attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location='cpu') # load checkpoint to CPU to avoid CUDA memory leak + model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = intersect_dicts(csd, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(csd, strict=False) # load + LOGGER.info(f'Transferred {len(csd)}/{len(model.state_dict())} items from {weights}') # report + else: + model = Model(cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + amp = check_amp(model) # check AMP + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # layers to freeze + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + # v.register_hook(lambda x: torch.nan_to_num(x)) # NaN to 0 (commented for erratic training results) + if any(x in k for x in freeze): + LOGGER.info(f'freezing {k}') + v.requires_grad = False + + # Image size + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(opt.imgsz, gs, floor=gs * 2) # verify imgsz is gs-multiple + + # Batch size + if RANK == -1 and batch_size == -1: # single-GPU only, estimate best batch size + batch_size = check_train_batch_size(model, imgsz, amp) + loggers.on_params_update({'batch_size': batch_size}) + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay + optimizer = smart_optimizer(model, opt.optimizer, hyp['lr0'], hyp['momentum'], hyp['weight_decay']) + + # Scheduler + if opt.cos_lr: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + else: + lf = lambda x: (1 - x / epochs) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if RANK in {-1, 0} else None + + # Resume + best_fitness, start_epoch = 0.0, 0 + if pretrained: + if resume: + best_fitness, start_epoch, epochs = smart_resume(ckpt, optimizer, ema, weights, epochs, resume) + del ckpt, csd + + # DP mode + if cuda and RANK == -1 and torch.cuda.device_count() > 1: + LOGGER.warning('WARNING ⚠️ DP not recommended, use torch.distributed.run for best DDP Multi-GPU results.\n' + 'See Multi-GPU Tutorial at https://github.com/ultralytics/yolov5/issues/475 to get started.') + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and RANK != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + LOGGER.info('Using SyncBatchNorm()') + + # Trainloader + train_loader, dataset = create_dataloader(train_path, + imgsz, + batch_size // WORLD_SIZE, + gs, + single_cls, + hyp=hyp, + augment=True, + cache=None if opt.cache == 'val' else opt.cache, + rect=opt.rect, + rank=LOCAL_RANK, + workers=workers, + image_weights=opt.image_weights, + quad=opt.quad, + prefix=colorstr('train: '), + shuffle=True, + seed=opt.seed) + labels = np.concatenate(dataset.labels, 0) + mlc = int(labels[:, 0].max()) # max label class + assert mlc < nc, f'Label class {mlc} exceeds nc={nc} in {data}. Possible class labels are 0-{nc - 1}' + + # Process 0 + if RANK in {-1, 0}: + val_loader = create_dataloader(val_path, + imgsz, + batch_size // WORLD_SIZE * 2, + gs, + single_cls, + hyp=hyp, + cache=None if noval else opt.cache, + rect=True, + rank=-1, + workers=workers * 2, + pad=0.5, + prefix=colorstr('val: '))[0] + + if not resume: + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # run AutoAnchor + model.half().float() # pre-reduce anchor precision + + callbacks.run('on_pretrain_routine_end', labels, names) + + # DDP mode + if cuda and RANK != -1: + model = smart_DDP(model) + + # Model attributes + nl = de_parallel(model).model[-1].nl # number of detection layers (to scale hyps) + hyp['box'] *= 3 / nl # scale to layers + hyp['cls'] *= nc / 80 * 3 / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3 / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nb = len(train_loader) # number of batches + nw = max(round(hyp['warmup_epochs'] * nb), 100) # number of warmup iterations, max(3 epochs, 100 iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + last_opt_step = -1 + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = torch.cuda.amp.GradScaler(enabled=amp) + stopper, stop = EarlyStopping(patience=opt.patience), False + compute_loss = ComputeLoss(model) # init loss class + callbacks.run('on_train_start') + LOGGER.info(f'Image sizes {imgsz} train, {imgsz} val\n' + f'Using {train_loader.num_workers * WORLD_SIZE} dataloader workers\n' + f"Logging results to {colorstr('bold', save_dir)}\n" + f'Starting training for {epochs} epochs...') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + callbacks.run('on_train_epoch_start') + model.train() + + # Update image weights (optional, single-GPU only) + if opt.image_weights: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + + # Update mosaic border (optional) + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(3, device=device) # mean losses + if RANK != -1: + train_loader.sampler.set_epoch(epoch) + pbar = enumerate(train_loader) + LOGGER.info(('\n' + '%11s' * 7) % ('Epoch', 'GPU_mem', 'box_loss', 'obj_loss', 'cls_loss', 'Instances', 'Size')) + if RANK in {-1, 0}: + pbar = tqdm(pbar, total=nb, bar_format=TQDM_BAR_FORMAT) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + callbacks.run('on_train_batch_start') + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # compute_loss.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 0 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(int(imgsz * 0.5), int(imgsz * 1.5) + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = nn.functional.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with torch.cuda.amp.autocast(amp): + pred = model(imgs) # forward + loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size + if RANK != -1: + loss *= WORLD_SIZE # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html + if ni - last_opt_step >= accumulate: + scaler.unscale_(optimizer) # unscale gradients + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=10.0) # clip gradients + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + last_opt_step = ni + + # Log + if RANK in {-1, 0}: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = f'{torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0:.3g}G' # (GB) + pbar.set_description(('%11s' * 2 + '%11.4g' * 5) % + (f'{epoch}/{epochs - 1}', mem, *mloss, targets.shape[0], imgs.shape[-1])) + callbacks.run('on_train_batch_end', model, ni, imgs, targets, paths, list(mloss)) + if callbacks.stop_training: + return + # end batch ------------------------------------------------------------------------------------------------ + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for loggers + scheduler.step() + + if RANK in {-1, 0}: + # mAP + callbacks.run('on_train_epoch_end', epoch=epoch) + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'names', 'stride', 'class_weights']) + final_epoch = (epoch + 1 == epochs) or stopper.possible_stop + if not noval or final_epoch: # Calculate mAP + results, maps, _ = validate.run(data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + half=amp, + model=ema.ema, + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + plots=False, + callbacks=callbacks, + compute_loss=compute_loss) + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + stop = stopper(epoch=epoch, fitness=fi) # early stop check + if fi > best_fitness: + best_fitness = fi + log_vals = list(mloss) + list(results) + lr + callbacks.run('on_fit_epoch_end', log_vals, epoch, best_fitness, fi) + + # Save model + if (not nosave) or (final_epoch and not evolve): # if save + ckpt = { + 'epoch': epoch, + 'best_fitness': best_fitness, + 'model': deepcopy(de_parallel(model)).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'opt': vars(opt), + 'git': GIT_INFO, # {remote, branch, commit} if a git repo + 'date': datetime.now().isoformat()} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if opt.save_period > 0 and epoch % opt.save_period == 0: + torch.save(ckpt, w / f'epoch{epoch}.pt') + del ckpt + callbacks.run('on_model_save', last, epoch, final_epoch, best_fitness, fi) + + # EarlyStopping + if RANK != -1: # if DDP training + broadcast_list = [stop if RANK == 0 else None] + dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks + if RANK != 0: + stop = broadcast_list[0] + if stop: + break # must break all DDP ranks + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training ----------------------------------------------------------------------------------------------------- + if RANK in {-1, 0}: + LOGGER.info(f'\n{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.') + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if f is best: + LOGGER.info(f'\nValidating {f}...') + results, _, _ = validate.run( + data_dict, + batch_size=batch_size // WORLD_SIZE * 2, + imgsz=imgsz, + model=attempt_load(f, device).half(), + iou_thres=0.65 if is_coco else 0.60, # best pycocotools at iou 0.65 + single_cls=single_cls, + dataloader=val_loader, + save_dir=save_dir, + save_json=is_coco, + verbose=True, + plots=plots, + callbacks=callbacks, + compute_loss=compute_loss) # val best model with plots + if is_coco: + callbacks.run('on_fit_epoch_end', list(mloss) + list(results) + lr, epoch, best_fitness, fi) + + callbacks.run('on_train_end', last, best, epoch, results) + + torch.cuda.empty_cache() + return results + + +def parse_opt(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=100, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='image --cache ram/disk') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Logger arguments + parser.add_argument('--entity', default=None, help='Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='Version of dataset artifact to use') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def main(opt, callbacks=Callbacks()): + # Checks + if RANK in {-1, 0}: + print_args(vars(opt)) + check_git_status() + check_requirements() + + # Resume (from specified or most recent last.pt) + if opt.resume and not check_comet_resume(opt) and not opt.evolve: + last = Path(check_file(opt.resume) if isinstance(opt.resume, str) else get_latest_run()) + opt_yaml = last.parent.parent / 'opt.yaml' # train options yaml + opt_data = opt.data # original dataset + if opt_yaml.is_file(): + with open(opt_yaml, errors='ignore') as f: + d = yaml.safe_load(f) + else: + d = torch.load(last, map_location='cpu')['opt'] + opt = argparse.Namespace(**d) # replace + opt.cfg, opt.weights, opt.resume = '', str(last), True # reinstate + if is_url(opt_data): + opt.data = check_file(opt_data) # avoid HUB resume auth timeout + else: + opt.data, opt.cfg, opt.hyp, opt.weights, opt.project = \ + check_file(opt.data), check_yaml(opt.cfg), check_yaml(opt.hyp), str(opt.weights), str(opt.project) # checks + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + if opt.evolve: + if opt.project == str(ROOT / 'runs/train'): # if default project name, rename to runs/evolve + opt.project = str(ROOT / 'runs/evolve') + opt.exist_ok, opt.resume = opt.resume, False # pass resume to exist_ok and disable resume + if opt.name == 'cfg': + opt.name = Path(opt.cfg).stem # use model.yaml as name + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) + + # DDP mode + device = select_device(opt.device, batch_size=opt.batch_size) + if LOCAL_RANK != -1: + msg = 'is not compatible with YOLOv5 Multi-GPU DDP training' + assert not opt.image_weights, f'--image-weights {msg}' + assert not opt.evolve, f'--evolve {msg}' + assert opt.batch_size != -1, f'AutoBatch with --batch-size -1 {msg}, please pass a valid --batch-size' + assert opt.batch_size % WORLD_SIZE == 0, f'--batch-size {opt.batch_size} must be multiple of WORLD_SIZE' + assert torch.cuda.device_count() > LOCAL_RANK, 'insufficient CUDA devices for DDP command' + torch.cuda.set_device(LOCAL_RANK) + device = torch.device('cuda', LOCAL_RANK) + dist.init_process_group(backend='nccl' if dist.is_nccl_available() else 'gloo') + + # Train + if not opt.evolve: + train(opt.hyp, opt, device, callbacks) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = { + 'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + if opt.noautoanchor: + del hyp['anchors'], meta['anchors'] + opt.noval, opt.nosave, save_dir = True, True, Path(opt.save_dir) # only val/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + evolve_yaml, evolve_csv = save_dir / 'hyp_evolve.yaml', save_dir / 'evolve.csv' + if opt.bucket: + # download evolve.csv if exists + subprocess.run([ + 'gsutil', + 'cp', + f'gs://{opt.bucket}/evolve.csv', + str(evolve_csv),]) + + for _ in range(opt.evolve): # generations to evolve + if evolve_csv.exists(): # if evolve.csv exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt(evolve_csv, ndmin=2, delimiter=',', skiprows=1) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() + 1E-6 # weights (sum > 0) + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([meta[k][0] for k in hyp.keys()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device, callbacks) + callbacks = Callbacks() + # Write mutation results + keys = ('metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', + 'val/obj_loss', 'val/cls_loss') + print_mutation(keys, results, hyp.copy(), save_dir, opt.bucket) + + # Plot results + plot_evolve(evolve_csv) + LOGGER.info(f'Hyperparameter evolution finished {opt.evolve} generations\n' + f"Results saved to {colorstr('bold', save_dir)}\n" + f'Usage example: $ python train.py --hyp {evolve_yaml}') + + +def run(**kwargs): + # Usage: import train; train.run(data='coco128.yaml', imgsz=320, weights='yolov5m.pt') + opt = parse_opt(True) + for k, v in kwargs.items(): + setattr(opt, k, v) + main(opt) + return opt + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov5/utils/__init__.py b/src/train_utils/train_models/models/yolov5/utils/__init__.py new file mode 100644 index 0000000..5b9fcd5 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/__init__.py @@ -0,0 +1,82 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +utils/initialization +""" + +import contextlib +import platform +import threading + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +class TryExcept(contextlib.ContextDecorator): + # YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager + def __init__(self, msg=''): + self.msg = msg + + def __enter__(self): + pass + + def __exit__(self, exc_type, value, traceback): + if value: + print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) + return True + + +def threaded(func): + # Multi-threads a target function and returns thread. Usage: @threaded decorator + def wrapper(*args, **kwargs): + thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) + thread.start() + return thread + + return wrapper + + +def join_threads(verbose=False): + # Join all daemon threads, i.e. atexit.register(lambda: join_threads()) + main_thread = threading.current_thread() + for t in threading.enumerate(): + if t is not main_thread: + if verbose: + print(f'Joining thread {t.name}') + t.join() + + +def notebook_init(verbose=True): + # Check system software and hardware + print('Checking setup...') + + import os + import shutil + + from utils.general import check_font, check_requirements, is_colab + from utils.torch_utils import select_device # imports + + check_font() + + import psutil + + if is_colab(): + shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory + + # System info + display = None + if verbose: + gb = 1 << 30 # bytes to GiB (1024 ** 3) + ram = psutil.virtual_memory().total + total, used, free = shutil.disk_usage('/') + with contextlib.suppress(Exception): # clear display if ipython is installed + from IPython import display + display.clear_output() + s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' + else: + s = '' + + select_device(newline=False) + print(emojis(f'Setup complete ✅ {s}')) + return display diff --git a/src/train_utils/train_models/models/yolov5/utils/activations.py b/src/train_utils/train_models/models/yolov5/utils/activations.py new file mode 100644 index 0000000..084ce8c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/activations.py @@ -0,0 +1,103 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Activation functions +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class SiLU(nn.Module): + # SiLU activation https://arxiv.org/pdf/1606.08415.pdf + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): + # Hard-SiLU activation + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for TorchScript and CoreML + return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX + + +class Mish(nn.Module): + # Mish activation https://github.com/digantamisra98/Mish + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + # Mish activation memory-efficient + class F(torch.autograd.Function): + + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +class FReLU(nn.Module): + # FReLU activation https://arxiv.org/abs/2007.11824 + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) + + +class AconC(nn.Module): + r""" ACON activation (activate or not) + AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1): + super().__init__() + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) + + def forward(self, x): + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x + + +class MetaAconC(nn.Module): + r""" ACON activation (activate or not) + MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network + according to "Activate or Not: Learning Customized Activation" . + """ + + def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r + super().__init__() + c2 = max(r, c1 // r) + self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) + self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) + self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) + # self.bn1 = nn.BatchNorm2d(c2) + # self.bn2 = nn.BatchNorm2d(c1) + + def forward(self, x): + y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) + # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 + # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable + beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed + dpx = (self.p1 - self.p2) * x + return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/src/train_utils/train_models/models/yolov5/utils/augmentations.py b/src/train_utils/train_models/models/yolov5/utils/augmentations.py new file mode 100644 index 0000000..7ab75f1 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/augmentations.py @@ -0,0 +1,397 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as TF + +from utils.general import LOGGER, check_version, colorstr, resample_segments, segment2box, xywhn2xyxy +from utils.metrics import bbox_ioa + +IMAGENET_MEAN = 0.485, 0.456, 0.406 # RGB mean +IMAGENET_STD = 0.229, 0.224, 0.225 # RGB standard deviation + + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self, size=640): + self.transform = None + prefix = colorstr('albumentations: ') + try: + import albumentations as A + check_version(A.__version__, '1.0.3', hard=True) # version requirement + + T = [ + A.RandomResizedCrop(height=size, width=size, scale=(0.8, 1.0), ratio=(0.9, 1.11), p=0.0), + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(p=0.0), + A.RandomGamma(p=0.0), + A.ImageCompression(quality_lower=75, p=0.0)] # transforms + self.transform = A.Compose(T, bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels'])) + + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + except ImportError: # package not installed, skip + pass + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def normalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD, inplace=False): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = (x - mean) / std + return TF.normalize(x, mean, std, inplace=inplace) + + +def denormalize(x, mean=IMAGENET_MEAN, std=IMAGENET_STD): + # Denormalize RGB images x per ImageNet stats in BCHW format, i.e. = x * std + mean + for i in range(3): + x[:, i] = x[:, i] * std[i] + mean[i] + return x + + +def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5): + # HSV color-space augmentation + if hgain or sgain or vgain: + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV)) + dtype = im.dtype # uint8 + + x = np.arange(0, 256, dtype=r.dtype) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))) + cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed + + +def hist_equalize(im, clahe=True, bgr=False): + # Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def replicate(im, labels): + # Replicate labels + h, w = im.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return im, labels + + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = im.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better val mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return im, ratio, (dw, dh) + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) and len(segments) == n + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return im, targets + + +def copy_paste(im, labels, segments, p=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if p and n: + h, w, c = im.shape # height, width, channels + im_new = np.zeros(im.shape, np.uint8) + for j in random.sample(range(n), k=round(p * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (1, 1, 1), cv2.FILLED) + + result = cv2.flip(im, 1) # augment segments (flip left-right) + i = cv2.flip(im_new, 1).astype(bool) + im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug + + return im, labels, segments + + +def cutout(im, labels, p=0.5): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + if random.random() < p: + h, w = im.shape[:2] + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) # create random masks + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, xywhn2xyxy(labels[:, 1:5], w, h)) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def mixup(im, labels, im2, labels2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + return im, labels + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def classify_albumentations( + augment=True, + size=224, + scale=(0.08, 1.0), + ratio=(0.75, 1.0 / 0.75), # 0.75, 1.33 + hflip=0.5, + vflip=0.0, + jitter=0.4, + mean=IMAGENET_MEAN, + std=IMAGENET_STD, + auto_aug=False): + # YOLOv5 classification Albumentations (optional, only used if package is installed) + prefix = colorstr('albumentations: ') + try: + import albumentations as A + from albumentations.pytorch import ToTensorV2 + check_version(A.__version__, '1.0.3', hard=True) # version requirement + if augment: # Resize and crop + T = [A.RandomResizedCrop(height=size, width=size, scale=scale, ratio=ratio)] + if auto_aug: + # TODO: implement AugMix, AutoAug & RandAug in albumentation + LOGGER.info(f'{prefix}auto augmentations are currently not supported') + else: + if hflip > 0: + T += [A.HorizontalFlip(p=hflip)] + if vflip > 0: + T += [A.VerticalFlip(p=vflip)] + if jitter > 0: + color_jitter = (float(jitter),) * 3 # repeat value for brightness, contrast, satuaration, 0 hue + T += [A.ColorJitter(*color_jitter, 0)] + else: # Use fixed crop for eval set (reproducibility) + T = [A.SmallestMaxSize(max_size=size), A.CenterCrop(height=size, width=size)] + T += [A.Normalize(mean=mean, std=std), ToTensorV2()] # Normalize and convert to Tensor + LOGGER.info(prefix + ', '.join(f'{x}'.replace('always_apply=False, ', '') for x in T if x.p)) + return A.Compose(T) + + except ImportError: # package not installed, skip + LOGGER.warning(f'{prefix}⚠️ not found, install with `pip install albumentations` (recommended)') + except Exception as e: + LOGGER.info(f'{prefix}{e}') + + +def classify_transforms(size=224): + # Transforms to apply if albumentations not installed + assert isinstance(size, int), f'ERROR: classify_transforms size {size} must be integer, not (list, tuple)' + # T.Compose([T.ToTensor(), T.Resize(size), T.CenterCrop(size), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + return T.Compose([CenterCrop(size), ToTensor(), T.Normalize(IMAGENET_MEAN, IMAGENET_STD)]) + + +class LetterBox: + # YOLOv5 LetterBox class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, size=(640, 640), auto=False, stride=32): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + self.auto = auto # pass max size integer, automatically solve for short side using stride + self.stride = stride # used with auto + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + r = min(self.h / imh, self.w / imw) # ratio of new/old + h, w = round(imh * r), round(imw * r) # resized image + hs, ws = (math.ceil(x / self.stride) * self.stride for x in (h, w)) if self.auto else self.h, self.w + top, left = round((hs - h) / 2 - 0.1), round((ws - w) / 2 - 0.1) + im_out = np.full((self.h, self.w, 3), 114, dtype=im.dtype) + im_out[top:top + h, left:left + w] = cv2.resize(im, (w, h), interpolation=cv2.INTER_LINEAR) + return im_out + + +class CenterCrop: + # YOLOv5 CenterCrop class for image preprocessing, i.e. T.Compose([CenterCrop(size), ToTensor()]) + def __init__(self, size=640): + super().__init__() + self.h, self.w = (size, size) if isinstance(size, int) else size + + def __call__(self, im): # im = np.array HWC + imh, imw = im.shape[:2] + m = min(imh, imw) # min dimension + top, left = (imh - m) // 2, (imw - m) // 2 + return cv2.resize(im[top:top + m, left:left + m], (self.w, self.h), interpolation=cv2.INTER_LINEAR) + + +class ToTensor: + # YOLOv5 ToTensor class for image preprocessing, i.e. T.Compose([LetterBox(size), ToTensor()]) + def __init__(self, half=False): + super().__init__() + self.half = half + + def __call__(self, im): # im = np.array HWC in BGR order + im = np.ascontiguousarray(im.transpose((2, 0, 1))[::-1]) # HWC to CHW -> BGR to RGB -> contiguous + im = torch.from_numpy(im) # to torch + im = im.half() if self.half else im.float() # uint8 to fp16/32 + im /= 255.0 # 0-255 to 0.0-1.0 + return im diff --git a/src/train_utils/train_models/models/yolov5/utils/autoanchor.py b/src/train_utils/train_models/models/yolov5/utils/autoanchor.py new file mode 100644 index 0000000..bb5cf6e --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/autoanchor.py @@ -0,0 +1,169 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +AutoAnchor utils +""" + +import random + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +from utils import TryExcept +from utils.general import LOGGER, TQDM_BAR_FORMAT, colorstr + +PREFIX = colorstr('AutoAnchor: ') + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary + a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da and (da.sign() != ds.sign()): # same order + LOGGER.info(f'{PREFIX}Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + + +@TryExcept(f'{PREFIX}ERROR') +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1 / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1 / thr).float().mean() # best possible recall + return bpr, aat + + stride = m.stride.to(m.anchors.device).view(-1, 1, 1) # model strides + anchors = m.anchors.clone() * stride # current anchors + bpr, aat = metric(anchors.cpu().view(-1, 2)) + s = f'\n{PREFIX}{aat:.2f} anchors/target, {bpr:.3f} Best Possible Recall (BPR). ' + if bpr > 0.98: # threshold to recompute + LOGGER.info(f'{s}Current anchors are a good fit to dataset ✅') + else: + LOGGER.info(f'{s}Anchors are a poor fit to dataset ⚠️, attempting to improve...') + na = m.anchors.numel() // 2 # number of anchors + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchors[:] = anchors.clone().view_as(m.anchors) + check_anchor_order(m) # must be in pixel-space (not grid-space) + m.anchors /= stride + s = f'{PREFIX}Done ✅ (optional: update model *.yaml to use these anchors in the future)' + else: + s = f'{PREFIX}Done ⚠️ (original anchors better than new anchors, proceeding with original anchors)' + LOGGER.info(s) + + +def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + dataset: path to data.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + from scipy.cluster.vq import kmeans + + npr = np.random + thr = 1 / thr + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1 / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k, verbose=True): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + s = f'{PREFIX}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr\n' \ + f'{PREFIX}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' \ + f'past_thr={x[x > thr].mean():.3f}-mean: ' + for x in k: + s += '%i,%i, ' % (round(x[0]), round(x[1])) + if verbose: + LOGGER.info(s[:-2]) + return k + + if isinstance(dataset, str): # *.yaml file + with open(dataset, errors='ignore') as f: + data_dict = yaml.safe_load(f) # model dict + from utils.dataloaders import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + LOGGER.info(f'{PREFIX}WARNING ⚠️ Extremely small objects found: {i} of {len(wh0)} labels are <3 pixels in size') + wh = wh0[(wh0 >= 2.0).any(1)].astype(np.float32) # filter > 2 pixels + # wh = wh * (npr.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans init + try: + LOGGER.info(f'{PREFIX}Running kmeans for {n} anchors on {len(wh)} points...') + assert n <= len(wh) # apply overdetermined constraint + s = wh.std(0) # sigmas for whitening + k = kmeans(wh / s, n, iter=30)[0] * s # points + assert n == len(k) # kmeans may return fewer points than requested if wh is insufficient or too similar + except Exception: + LOGGER.warning(f'{PREFIX}WARNING ⚠️ switching strategies from kmeans to random init') + k = np.sort(npr.rand(n * 2)).reshape(n, 2) * img_size # random init + wh, wh0 = (torch.tensor(x, dtype=torch.float32) for x in (wh, wh0)) + k = print_results(k, verbose=False) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), bar_format=TQDM_BAR_FORMAT) # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * random.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{PREFIX}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k, verbose) + + return print_results(k).astype(np.float32) diff --git a/src/train_utils/train_models/models/yolov5/utils/autobatch.py b/src/train_utils/train_models/models/yolov5/utils/autobatch.py new file mode 100644 index 0000000..bdeb91c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/autobatch.py @@ -0,0 +1,72 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Auto-batch utils +""" + +from copy import deepcopy + +import numpy as np +import torch + +from utils.general import LOGGER, colorstr +from utils.torch_utils import profile + + +def check_train_batch_size(model, imgsz=640, amp=True): + # Check YOLOv5 training batch size + with torch.cuda.amp.autocast(amp): + return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size + + +def autobatch(model, imgsz=640, fraction=0.8, batch_size=16): + # Automatically estimate best YOLOv5 batch size to use `fraction` of available CUDA memory + # Usage: + # import torch + # from utils.autobatch import autobatch + # model = torch.hub.load('ultralytics/yolov5', 'yolov5s', autoshape=False) + # print(autobatch(model)) + + # Check device + prefix = colorstr('AutoBatch: ') + LOGGER.info(f'{prefix}Computing optimal batch size for --imgsz {imgsz}') + device = next(model.parameters()).device # get model device + if device.type == 'cpu': + LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}') + return batch_size + if torch.backends.cudnn.benchmark: + LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}') + return batch_size + + # Inspect CUDA memory + gb = 1 << 30 # bytes to GiB (1024 ** 3) + d = str(device).upper() # 'CUDA:0' + properties = torch.cuda.get_device_properties(device) # device properties + t = properties.total_memory / gb # GiB total + r = torch.cuda.memory_reserved(device) / gb # GiB reserved + a = torch.cuda.memory_allocated(device) / gb # GiB allocated + f = t - (r + a) # GiB free + LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free') + + # Profile batch sizes + batch_sizes = [1, 2, 4, 8, 16] + try: + img = [torch.empty(b, 3, imgsz, imgsz) for b in batch_sizes] + results = profile(img, model, n=3, device=device) + except Exception as e: + LOGGER.warning(f'{prefix}{e}') + + # Fit a solution + y = [x[2] for x in results if x] # memory [2] + p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit + b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size) + if None in results: # some sizes failed + i = results.index(None) # first fail index + if b >= batch_sizes[i]: # y intercept above failure point + b = batch_sizes[max(i - 1, 0)] # select prior safe point + if b < 1 or b > 1024: # b outside of safe range + b = batch_size + LOGGER.warning(f'{prefix}WARNING ⚠️ CUDA anomaly detected, recommend restart environment and retry command.') + + fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted + LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅') + return b diff --git a/src/train_utils/train_models/models/yolov5/utils/aws/__init__.py b/src/train_utils/train_models/models/yolov5/utils/aws/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/train_utils/train_models/models/yolov5/utils/aws/mime.sh b/src/train_utils/train_models/models/yolov5/utils/aws/mime.sh new file mode 100644 index 0000000..c319a83 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/src/train_utils/train_models/models/yolov5/utils/aws/resume.py b/src/train_utils/train_models/models/yolov5/utils/aws/resume.py new file mode 100644 index 0000000..b21731c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/aws/resume.py @@ -0,0 +1,40 @@ +# Resume all interrupted trainings in yolov5/ dir including DDP trainings +# Usage: $ python utils/aws/resume.py + +import os +import sys +from pathlib import Path + +import torch +import yaml + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[2] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml', errors='ignore') as f: + opt = yaml.safe_load(f) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.run --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/src/train_utils/train_models/models/yolov5/utils/aws/userdata.sh b/src/train_utils/train_models/models/yolov5/utils/aws/userdata.sh new file mode 100644 index 0000000..5fc1332 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/aws/userdata.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolov5 ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone https://github.com/ultralytics/yolov5 -b master && sudo chmod -R 777 yolov5 + cd yolov5 + bash data/scripts/get_coco.sh && echo "COCO done." & + sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario + done <<<"$list" +fi diff --git a/src/train_utils/train_models/models/yolov5/utils/callbacks.py b/src/train_utils/train_models/models/yolov5/utils/callbacks.py new file mode 100644 index 0000000..166d893 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/callbacks.py @@ -0,0 +1,76 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Callback utils +""" + +import threading + + +class Callbacks: + """" + Handles all registered callbacks for YOLOv5 Hooks + """ + + def __init__(self): + # Define the available callbacks + self._callbacks = { + 'on_pretrain_routine_start': [], + 'on_pretrain_routine_end': [], + 'on_train_start': [], + 'on_train_epoch_start': [], + 'on_train_batch_start': [], + 'optimizer_step': [], + 'on_before_zero_grad': [], + 'on_train_batch_end': [], + 'on_train_epoch_end': [], + 'on_val_start': [], + 'on_val_batch_start': [], + 'on_val_image_end': [], + 'on_val_batch_end': [], + 'on_val_end': [], + 'on_fit_epoch_end': [], # fit = train + val + 'on_model_save': [], + 'on_train_end': [], + 'on_params_update': [], + 'teardown': [],} + self.stop_training = False # set True to interrupt training + + def register_action(self, hook, name='', callback=None): + """ + Register a new action to a callback hook + + Args: + hook: The callback hook name to register the action to + name: The name of the action for later reference + callback: The callback to fire + """ + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + assert callable(callback), f"callback '{callback}' is not callable" + self._callbacks[hook].append({'name': name, 'callback': callback}) + + def get_registered_actions(self, hook=None): + """" + Returns all the registered actions by callback hook + + Args: + hook: The name of the hook to check, defaults to all + """ + return self._callbacks[hook] if hook else self._callbacks + + def run(self, hook, *args, thread=False, **kwargs): + """ + Loop through the registered actions and fire all callbacks on main thread + + Args: + hook: The name of the hook to check, defaults to all + args: Arguments to receive from YOLOv5 + thread: (boolean) Run callbacks in daemon thread + kwargs: Keyword Arguments to receive from YOLOv5 + """ + + assert hook in self._callbacks, f"hook '{hook}' not found in callbacks {self._callbacks}" + for logger in self._callbacks[hook]: + if thread: + threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start() + else: + logger['callback'](*args, **kwargs) diff --git a/src/train_utils/train_models/models/yolov5/utils/dataloaders.py b/src/train_utils/train_models/models/yolov5/utils/dataloaders.py new file mode 100644 index 0000000..28d5b79 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/dataloaders.py @@ -0,0 +1,1222 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders and dataset utils +""" + +import contextlib +import glob +import hashlib +import json +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import Pool, ThreadPool +from pathlib import Path +from threading import Thread +from urllib.parse import urlparse + +import numpy as np +import psutil +import torch +import torch.nn.functional as F +import torchvision +import yaml +from PIL import ExifTags, Image, ImageOps +from torch.utils.data import DataLoader, Dataset, dataloader, distributed +from tqdm import tqdm + +from utils.augmentations import (Albumentations, augment_hsv, classify_albumentations, classify_transforms, copy_paste, + letterbox, mixup, random_perspective) +from utils.general import (DATASETS_DIR, LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, check_dataset, check_requirements, + check_yaml, clean_str, cv2, is_colab, is_kaggle, segments2boxes, unzip_file, xyn2xy, + xywh2xyxy, xywhn2xyxy, xyxy2xywhn) +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +HELP_URL = 'See https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes +VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +PIN_MEMORY = str(os.getenv('PIN_MEMORY', True)).lower() == 'true' # global pin_memory for dataloaders + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(paths): + # Returns a single hash value of a list of paths (files or dirs) + size = sum(os.path.getsize(p) for p in paths if os.path.exists(p)) # sizes + h = hashlib.sha256(str(size).encode()) # hash sizes + h.update(''.join(paths).encode()) # hash paths + return h.hexdigest() # return hash + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + with contextlib.suppress(Exception): + rotation = dict(img._getexif().items())[orientation] + if rotation in [6, 8]: # rotation 270 or 90 + s = (s[1], s[0]) + return s + + +def exif_transpose(image): + """ + Transpose a PIL image accordingly if it has an EXIF Orientation tag. + Inplace version of https://github.com/python-pillow/Pillow/blob/master/src/PIL/ImageOps.py exif_transpose() + + :param image: The image to transpose. + :return: An image. + """ + exif = image.getexif() + orientation = exif.get(0x0112, 1) # default 1 + if orientation > 1: + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90}.get(orientation) + if method is not None: + image = image.transpose(method) + del exif[0x0112] + image.info['exif'] = exif.tobytes() + return image + + +def seed_worker(worker_id): + # Set dataloader worker seed https://pytorch.org/docs/stable/notes/randomness.html#dataloader + worker_seed = torch.initial_seed() % 2 ** 32 + np.random.seed(worker_seed) + random.seed(worker_seed) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False, + seed=0): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabels( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + seed + RANK) + return loader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn, + worker_init_fn=seed_worker, + generator=generator), dataset + + +class InfiniteDataLoader(dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for _ in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler: + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadScreenshots: + # YOLOv5 screenshot dataloader, i.e. `python detect.py --source "screen 0 100 100 512 256"` + def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None): + # source = [screen_number left top width height] (pixels) + check_requirements('mss') + import mss + + source, *params = source.split() + self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0 + if len(params) == 1: + self.screen = int(params[0]) + elif len(params) == 4: + left, top, width, height = (int(x) for x in params) + elif len(params) == 5: + self.screen, left, top, width, height = (int(x) for x in params) + self.img_size = img_size + self.stride = stride + self.transforms = transforms + self.auto = auto + self.mode = 'stream' + self.frame = 0 + self.sct = mss.mss() + + # Parse monitor shape + monitor = self.sct.monitors[self.screen] + self.top = monitor['top'] if top is None else (monitor['top'] + top) + self.left = monitor['left'] if left is None else (monitor['left'] + left) + self.width = width or monitor['width'] + self.height = height or monitor['height'] + self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height} + + def __iter__(self): + return self + + def __next__(self): + # mss screen capture: get raw pixels from the screen as np array + im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR + s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + self.frame += 1 + return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s + + +class LoadImages: + # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4` + def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line + path = Path(path).read_text().rsplit() + files = [] + for p in sorted(path) if isinstance(path, (list, tuple)) else [path]: + p = str(Path(p).resolve()) + if '*' in p: + files.extend(sorted(glob.glob(p, recursive=True))) # glob + elif os.path.isdir(p): + files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir + elif os.path.isfile(p): + files.append(p) # files + else: + raise FileNotFoundError(f'{p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS] + videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + self.auto = auto + self.transforms = transforms # optional + self.vid_stride = vid_stride # video frame-rate stride + if any(videos): + self._new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + for _ in range(self.vid_stride): + self.cap.grab() + ret_val, im0 = self.cap.retrieve() + while not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + path = self.files[self.count] + self._new_video(path) + ret_val, im0 = self.cap.read() + + self.frame += 1 + # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False + s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: ' + + else: + # Read image + self.count += 1 + im0 = cv2.imread(path) # BGR + assert im0 is not None, f'Image Not Found {path}' + s = f'image {self.count}/{self.nf} {path}: ' + + if self.transforms: + im = self.transforms(im0) # transforms + else: + im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize + im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + im = np.ascontiguousarray(im) # contiguous + + return path, im, im0, self.cap, s + + def _new_video(self, path): + # Create a new video capture object + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride) + self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees + # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493 + + def _cv2_rotate(self, im): + # Rotate a cv2 video manually + if self.orientation == 0: + return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE) + elif self.orientation == 180: + return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE) + elif self.orientation == 90: + return cv2.rotate(im, cv2.ROTATE_180) + return im + + def __len__(self): + return self.nf # number of files + + +class LoadStreams: + # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams` + def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1): + torch.backends.cudnn.benchmark = True # faster for fixed-size inference + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + self.vid_stride = vid_stride # video frame-rate stride + sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources] + n = len(sources) + self.sources = [clean_str(x) for x in sources] # clean source names for later + self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n + for i, s in enumerate(sources): # index, source + # Start thread to read frames from video stream + st = f'{i + 1}/{n}: {s}... ' + if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video + # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/Zgi9g1ksQHc' + check_requirements(('pafy', 'youtube_dl==2020.12.2')) + import pafy + s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL + s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam + if s == 0: + assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.' + assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.' + cap = cv2.VideoCapture(s) + assert cap.isOpened(), f'{st}Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan + self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback + self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback + + _, self.imgs[i] = cap.read() # guarantee first frame + self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True) + LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)') + self.threads[i].start() + LOGGER.info('') # newline + + # check for common shapes + s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs]) + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + self.auto = auto and self.rect + self.transforms = transforms # optional + if not self.rect: + LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.') + + def update(self, i, cap, stream): + # Read stream `i` frames in daemon thread + n, f = 0, self.frames[i] # frame number, frame array + while cap.isOpened() and n < f: + n += 1 + cap.grab() # .read() = .grab() followed by .retrieve() + if n % self.vid_stride == 0: + success, im = cap.retrieve() + if success: + self.imgs[i] = im + else: + LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.') + self.imgs[i] = np.zeros_like(self.imgs[i]) + cap.open(stream) # re-open stream if signal was lost + time.sleep(0.0) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + im0 = self.imgs.copy() + if self.transforms: + im = np.stack([self.transforms(x) for x in im0]) # transforms + else: + im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize + im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW + im = np.ascontiguousarray(im) # contiguous + + return self.sources, im, im0, None, '' + + def __len__(self): + return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = f'{os.sep}images{os.sep}', f'{os.sep}labels{os.sep}' # /images/, /labels/ substrings + return [sb.join(x.rsplit(sa, 1)).rsplit('.', 1)[0] + '.txt' for x in img_paths] + + +class LoadImagesAndLabels(Dataset): + # YOLOv5 train_loader/val_loader, loads images and labels for training and validation + cache_version = 0.6 # dataset labels *.cache version + rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4] + + def __init__(self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0.0, + min_items=0, + prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + self.albumentations = Albumentations(size=img_size) if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('*.*')) # pathlib + elif p.is_file(): # file + with open(p) as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent, 1) if x.startswith('./') else x for x in t] # to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # to global path (pathlib) + else: + raise FileNotFoundError(f'{prefix}{p} does not exist') + self.im_files = sorted(x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in IMG_FORMATS) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in IMG_FORMATS]) # pathlib + assert self.im_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\n{HELP_URL}') from e + + # Check cache + self.label_files = img2label_paths(self.im_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') + try: + cache, exists = np.load(cache_path, allow_pickle=True).item(), True # load dict + assert cache['version'] == self.cache_version # matches current version + assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash + except Exception: + cache, exists = self.cache_labels(cache_path, prefix), False # run cache ops + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total + if exists and LOCAL_RANK in {-1, 0}: + d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt' + tqdm(None, desc=prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results + if cache['msgs']: + LOGGER.info('\n'.join(cache['msgs'])) # display warnings + assert nf > 0 or not augment, f'{prefix}No labels found in {cache_path}, can not start training. {HELP_URL}' + + # Read cache + [cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items + labels, shapes, self.segments = zip(*cache.values()) + nl = len(np.concatenate(labels, 0)) # number of labels + assert nl > 0 or not augment, f'{prefix}All labels empty in {cache_path}, can not start training. {HELP_URL}' + self.labels = list(labels) + self.shapes = np.array(shapes) + self.im_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + + # Filter images + if min_items: + include = np.array([len(x) >= min_items for x in self.labels]).nonzero()[0].astype(int) + LOGGER.info(f'{prefix}{n - len(include)}/{n} images filtered from dataset') + self.im_files = [self.im_files[i] for i in include] + self.label_files = [self.label_files[i] for i in include] + self.labels = [self.labels[i] for i in include] + self.segments = [self.segments[i] for i in include] + self.shapes = self.shapes[include] # wh + + # Create indices + n = len(self.shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Update labels + include_class = [] # filter labels to include only these classes (optional) + self.segments = list(self.segments) + include_class_array = np.array(include_class).reshape(1, -1) + for i, (label, segment) in enumerate(zip(self.labels, self.segments)): + if include_class: + j = (label[:, 0:1] == include_class_array).any(1) + self.labels[i] = label[j] + if segment: + self.segments[i] = [segment[idx] for idx, elem in enumerate(j) if elem] + if single_cls: # single-class training, merge all classes into 0 + self.labels[i][:, 0] = 0 + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.im_files = [self.im_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.segments = [self.segments[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride + + # Cache images into RAM/disk for faster training + if cache_images == 'ram' and not self.check_cache_ram(prefix=prefix): + cache_images = False + self.ims = [None] * n + self.npy_files = [Path(f).with_suffix('.npy') for f in self.im_files] + if cache_images: + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + self.im_hw0, self.im_hw = [None] * n, [None] * n + fcn = self.cache_images_to_disk if cache_images == 'disk' else self.load_image + results = ThreadPool(NUM_THREADS).imap(fcn, range(n)) + pbar = tqdm(enumerate(results), total=n, bar_format=TQDM_BAR_FORMAT, disable=LOCAL_RANK > 0) + for i, x in pbar: + if cache_images == 'disk': + b += self.npy_files[i].stat().st_size + else: # 'ram' + self.ims[i], self.im_hw0[i], self.im_hw[i] = x # im, hw_orig, hw_resized = load_image(self, i) + b += self.ims[i].nbytes + pbar.desc = f'{prefix}Caching images ({b / gb:.1f}GB {cache_images})' + pbar.close() + + def check_cache_ram(self, safety_margin=0.1, prefix=''): + # Check image caching requirements vs available memory + b, gb = 0, 1 << 30 # bytes of cached images, bytes per gigabytes + n = min(self.n, 30) # extrapolate from 30 random images + for _ in range(n): + im = cv2.imread(random.choice(self.im_files)) # sample image + ratio = self.img_size / max(im.shape[0], im.shape[1]) # max(h, w) # ratio + b += im.nbytes * ratio ** 2 + mem_required = b * self.n / n # GB required to cache dataset into RAM + mem = psutil.virtual_memory() + cache = mem_required * (1 + safety_margin) < mem.available # to cache or not to cache, that is the question + if not cache: + LOGGER.info(f'{prefix}{mem_required / gb:.1f}GB RAM required, ' + f'{mem.available / gb:.1f}/{mem.total / gb:.1f}GB available, ' + f"{'caching images ✅' if cache else 'not caching images ⚠️'}") + return cache + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages + desc = f'{prefix}Scanning {path.parent / path.stem}...' + with Pool(NUM_THREADS) as pool: + pbar = tqdm(pool.imap(verify_image_label, zip(self.im_files, self.label_files, repeat(prefix))), + desc=desc, + total=len(self.im_files), + bar_format=TQDM_BAR_FORMAT) + for im_file, lb, shape, segments, nm_f, nf_f, ne_f, nc_f, msg in pbar: + nm += nm_f + nf += nf_f + ne += ne_f + nc += nc_f + if im_file: + x[im_file] = [lb, shape, segments] + if msg: + msgs.append(msg) + pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt' + + pbar.close() + if msgs: + LOGGER.info('\n'.join(msgs)) + if nf == 0: + LOGGER.warning(f'{prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}') + x['hash'] = get_hash(self.label_files + self.im_files) + x['results'] = nf, nm, ne, nc, len(self.im_files) + x['msgs'] = msgs # warnings + x['version'] = self.cache_version # cache version + try: + np.save(path, x) # save cache for next time + path.with_suffix('.cache.npy').rename(path) # remove .npy suffix + LOGGER.info(f'{prefix}New cache created: {path}') + except Exception as e: + LOGGER.warning(f'{prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable: {e}') # not writeable + return x + + def __len__(self): + return len(self.im_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + img, labels = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp['mixup']: + img, labels = mixup(img, labels, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels = random_perspective(img, + labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1E-3) + + if self.augment: + # Albumentations + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + + # Flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + + # Cutouts + # labels = cutout(img, labels, p=0.5) + # nl = len(labels) # update after cutout + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.im_files[index], shapes + + def load_image(self, i): + # Loads 1 image from dataset index 'i', returns (im, original hw, resized hw) + im, f, fn = self.ims[i], self.im_files[i], self.npy_files[i], + if im is None: # not cached in RAM + if fn.exists(): # load npy + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + assert im is not None, f'Image Not Found {f}' + h0, w0 = im.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # ratio + if r != 1: # if sizes are not equal + interp = cv2.INTER_LINEAR if (self.augment or r > 1) else cv2.INTER_AREA + im = cv2.resize(im, (math.ceil(w0 * r), math.ceil(h0 * r)), interpolation=interp) + return im, (h0, w0), im.shape[:2] # im, hw_original, hw_resized + return self.ims[i], self.im_hw0[i], self.im_hw[i] # im, hw_original, hw_resized + + def cache_images_to_disk(self, i): + # Saves an image as an *.npy file for faster loading + f = self.npy_files[i] + if not f.exists(): + np.save(f.as_posix(), cv2.imread(self.im_files[i])) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + random.shuffle(indices) + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + def load_mosaic9(self, index): + # YOLOv5 9-mosaic loader. Loads 1 image + 8 random images into a 9-image mosaic + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + random.shuffle(indices) + hp, wp = -1, -1 # height, width previous + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = (max(x, 0) for x in c) # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = (int(random.uniform(0, s)) for _ in self.mosaic_border) # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, p=self.hyp['copy_paste']) + img9, labels9 = random_perspective(img9, + labels9, + segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + @staticmethod + def collate_fn(batch): + im, label, path, shapes = zip(*batch) # transposed + for i, lb in enumerate(label): + lb[:, 0] = i # add target image index for build_targets() + return torch.stack(im, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + im, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + im4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0.0, 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0.0, 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, 0.5, 0.5, 0.5, 0.5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im1 = F.interpolate(im[i].unsqueeze(0).float(), scale_factor=2.0, mode='bilinear', + align_corners=False)[0].type(im[i].type()) + lb = label[i] + else: + im1 = torch.cat((torch.cat((im[i], im[i + 1]), 1), torch.cat((im[i + 2], im[i + 3]), 1)), 2) + lb = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + im4.append(im1) + label4.append(lb) + + for i, lb in enumerate(label4): + lb[:, 0] = i # add target image index for build_targets() + + return torch.stack(im4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def flatten_recursive(path=DATASETS_DIR / 'coco128'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(f'{str(path)}_flat') + if os.path.exists(new_path): + shutil.rmtree(new_path) # delete output folder + os.makedirs(new_path) # make new output folder + for file in tqdm(glob.glob(f'{str(Path(path))}/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path=DATASETS_DIR / 'coco128'): # from utils.dataloaders import *; extract_boxes() + # Convert detection dataset into classification dataset, with one directory per class + path = Path(path) # images dir + shutil.rmtree(path / 'classification') if (path / 'classification').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in IMG_FORMATS: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file) as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path=DATASETS_DIR / 'coco128/images', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.dataloaders import *; autosplit() + Arguments + path: Path to images directory + weights: Train, val, test weights (list, tuple) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sorted(x for x in path.rglob('*.*') if x.suffix[1:].lower() in IMG_FORMATS) # image files only + n = len(files) # number of files + random.seed(0) # for reproducibility + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + for x in txt: + if (path.parent / x).exists(): + (path.parent / x).unlink() # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path.parent / txt[i], 'a') as f: + f.write(f'./{img.relative_to(path.parent).as_posix()}' + '\n') # add image to txt file + + +def verify_image_label(args): + # Verify one image-label pair + im_file, lb_file, prefix = args + nm, nf, ne, nc, msg, segments = 0, 0, 0, 0, '', [] # number (missing, found, empty, corrupt), message, segments + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in IMG_FORMATS, f'invalid image format {im.format}' + if im.format.lower() in ('jpg', 'jpeg'): + with open(im_file, 'rb') as f: + f.seek(-2, 2) + if f.read() != b'\xff\xd9': # corrupt JPEG + ImageOps.exif_transpose(Image.open(im_file)).save(im_file, 'JPEG', subsampling=0, quality=100) + msg = f'{prefix}WARNING ⚠️ {im_file}: corrupt JPEG restored and saved' + + # verify labels + if os.path.isfile(lb_file): + nf = 1 # label found + with open(lb_file) as f: + lb = [x.split() for x in f.read().strip().splitlines() if len(x)] + if any(len(x) > 6 for x in lb): # is segment + classes = np.array([x[0] for x in lb], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in lb] # (cls, xy1...) + lb = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + lb = np.array(lb, dtype=np.float32) + nl = len(lb) + if nl: + assert lb.shape[1] == 5, f'labels require 5 columns, {lb.shape[1]} columns detected' + assert (lb >= 0).all(), f'negative label values {lb[lb < 0]}' + assert (lb[:, 1:] <= 1).all(), f'non-normalized or out of bounds coordinates {lb[:, 1:][lb[:, 1:] > 1]}' + _, i = np.unique(lb, axis=0, return_index=True) + if len(i) < nl: # duplicate row check + lb = lb[i] # remove duplicates + if segments: + segments = [segments[x] for x in i] + msg = f'{prefix}WARNING ⚠️ {im_file}: {nl - len(i)} duplicate labels removed' + else: + ne = 1 # label empty + lb = np.zeros((0, 5), dtype=np.float32) + else: + nm = 1 # label missing + lb = np.zeros((0, 5), dtype=np.float32) + return im_file, lb, shape, segments, nm, nf, ne, nc, msg + except Exception as e: + nc = 1 + msg = f'{prefix}WARNING ⚠️ {im_file}: ignoring corrupt image/label: {e}' + return [None, None, None, None, nm, nf, ne, nc, msg] + + +class HUBDatasetStats(): + """ Class for generating HUB dataset JSON and `-hub` dataset directory + + Arguments + path: Path to data.yaml or data.zip (with data.yaml inside data.zip) + autodownload: Attempt to download dataset if not found locally + + Usage + from utils.dataloaders import HUBDatasetStats + stats = HUBDatasetStats('coco128.yaml', autodownload=True) # usage 1 + stats = HUBDatasetStats('path/to/coco128.zip') # usage 2 + stats.get_json(save=False) + stats.process_images() + """ + + def __init__(self, path='coco128.yaml', autodownload=False): + # Initialize class + zipped, data_dir, yaml_path = self._unzip(Path(path)) + try: + with open(check_yaml(yaml_path), errors='ignore') as f: + data = yaml.safe_load(f) # data dict + if zipped: + data['path'] = data_dir + except Exception as e: + raise Exception('error/HUB/dataset_stats/yaml_load') from e + + check_dataset(data, autodownload) # download dataset if missing + self.hub_dir = Path(data['path'] + '-hub') + self.im_dir = self.hub_dir / 'images' + self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images + self.stats = {'nc': data['nc'], 'names': list(data['names'].values())} # statistics dictionary + self.data = data + + @staticmethod + def _find_yaml(dir): + # Return data.yaml file + files = list(dir.glob('*.yaml')) or list(dir.rglob('*.yaml')) # try root level first and then recursive + assert files, f'No *.yaml file found in {dir}' + if len(files) > 1: + files = [f for f in files if f.stem == dir.stem] # prefer *.yaml files that match dir name + assert files, f'Multiple *.yaml files found in {dir}, only 1 *.yaml file allowed' + assert len(files) == 1, f'Multiple *.yaml files found: {files}, only 1 *.yaml file allowed in {dir}' + return files[0] + + def _unzip(self, path): + # Unzip data.zip + if not str(path).endswith('.zip'): # path is data.yaml + return False, None, path + assert Path(path).is_file(), f'Error unzipping {path}, file not found' + unzip_file(path, path=path.parent) + dir = path.with_suffix('') # dataset directory == zip name + assert dir.is_dir(), f'Error unzipping {path}, {dir} not found. path/to/abc.zip MUST unzip to path/to/abc/' + return True, str(dir), self._find_yaml(dir) # zipped, data_dir, yaml_path + + def _hub_ops(self, f, max_dim=1920): + # HUB ops for 1 image 'f': resize and save at reduced quality in /dataset-hub for web/app viewing + f_new = self.im_dir / Path(f).name # dataset-hub image filename + try: # use PIL + im = Image.open(f) + r = max_dim / max(im.height, im.width) # ratio + if r < 1.0: # image too large + im = im.resize((int(im.width * r), int(im.height * r))) + im.save(f_new, 'JPEG', quality=50, optimize=True) # save + except Exception as e: # use OpenCV + LOGGER.info(f'WARNING ⚠️ HUB ops PIL failure {f}: {e}') + im = cv2.imread(f) + im_height, im_width = im.shape[:2] + r = max_dim / max(im_height, im_width) # ratio + if r < 1.0: # image too large + im = cv2.resize(im, (int(im_width * r), int(im_height * r)), interpolation=cv2.INTER_AREA) + cv2.imwrite(str(f_new), im) + + def get_json(self, save=False, verbose=False): + # Return dataset JSON for Ultralytics HUB + def _round(labels): + # Update labels to integer class and 6 decimal place floats + return [[int(c), *(round(x, 4) for x in points)] for c, *points in labels] + + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + self.stats[split] = None # i.e. no test set + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + x = np.array([ + np.bincount(label[:, 0].astype(int), minlength=self.data['nc']) + for label in tqdm(dataset.labels, total=dataset.n, desc='Statistics')]) # shape(128x80) + self.stats[split] = { + 'instance_stats': { + 'total': int(x.sum()), + 'per_class': x.sum(0).tolist()}, + 'image_stats': { + 'total': dataset.n, + 'unlabelled': int(np.all(x == 0, 1).sum()), + 'per_class': (x > 0).sum(0).tolist()}, + 'labels': [{ + str(Path(k).name): _round(v.tolist())} for k, v in zip(dataset.im_files, dataset.labels)]} + + # Save, print and return + if save: + stats_path = self.hub_dir / 'stats.json' + print(f'Saving {stats_path.resolve()}...') + with open(stats_path, 'w') as f: + json.dump(self.stats, f) # save stats.json + if verbose: + print(json.dumps(self.stats, indent=2, sort_keys=False)) + return self.stats + + def process_images(self): + # Compress images for Ultralytics HUB + for split in 'train', 'val', 'test': + if self.data.get(split) is None: + continue + dataset = LoadImagesAndLabels(self.data[split]) # load dataset + desc = f'{split} images' + for _ in tqdm(ThreadPool(NUM_THREADS).imap(self._hub_ops, dataset.im_files), total=dataset.n, desc=desc): + pass + print(f'Done. All images saved to {self.im_dir}') + return self.im_dir + + +# Classification dataloaders ------------------------------------------------------------------------------------------- +class ClassificationDataset(torchvision.datasets.ImageFolder): + """ + YOLOv5 Classification Dataset. + Arguments + root: Dataset path + transform: torchvision transforms, used by default + album_transform: Albumentations transforms, used if installed + """ + + def __init__(self, root, augment, imgsz, cache=False): + super().__init__(root=root) + self.torch_transforms = classify_transforms(imgsz) + self.album_transforms = classify_albumentations(augment, imgsz) if augment else None + self.cache_ram = cache is True or cache == 'ram' + self.cache_disk = cache == 'disk' + self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im + + def __getitem__(self, i): + f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image + if self.cache_ram and im is None: + im = self.samples[i][3] = cv2.imread(f) + elif self.cache_disk: + if not fn.exists(): # load npy + np.save(fn.as_posix(), cv2.imread(f)) + im = np.load(fn) + else: # read image + im = cv2.imread(f) # BGR + if self.album_transforms: + sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image'] + else: + sample = self.torch_transforms(im) + return sample, j + + +def create_classification_dataloader(path, + imgsz=224, + batch_size=16, + augment=True, + cache=False, + rank=-1, + workers=8, + shuffle=True): + # Returns Dataloader object to be used with YOLOv5 Classifier + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = ClassificationDataset(root=path, imgsz=imgsz, augment=augment, cache=cache) + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + RANK) + return InfiniteDataLoader(dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=PIN_MEMORY, + worker_init_fn=seed_worker, + generator=generator) # or DataLoader(persistent_workers=True) diff --git a/src/train_utils/train_models/models/yolov5/utils/docker/Dockerfile b/src/train_utils/train_models/models/yolov5/utils/docker/Dockerfile new file mode 100644 index 0000000..811ad4a --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/docker/Dockerfile @@ -0,0 +1,74 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Builds ultralytics/yolov5:latest image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 +# Image is CUDA-optimized for YOLOv5 single/multi-GPU training and inference + +# Start FROM PyTorch image https://hub.docker.com/r/pytorch/pytorch +FROM pytorch/pytorch:2.0.0-cuda11.7-cudnn8-runtime + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y gcc git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +# RUN alias python=python3 + +# Security updates +# https://security.snyk.io/vuln/SNYK-UBUNTU1804-OPENSSL-3314796 +RUN apt upgrade --no-install-recommends -y openssl + +# Create working directory +RUN rm -rf /usr/src/app && mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app + +# Install pip packages +COPY requirements.txt . +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt albumentations comet gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' + # tensorflow tensorflowjs \ + +# Set environment variables +ENV OMP_NUM_THREADS=1 + +# Cleanup +ENV DEBIAN_FRONTEND teletype + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest && sudo docker build -f utils/docker/Dockerfile -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all $t + +# Pull and Run with local directory access +# t=ultralytics/yolov5:latest && sudo docker pull $t && sudo docker run -it --ipc=host --gpus all -v "$(pwd)"/datasets:/usr/src/datasets $t + +# Kill all +# sudo docker kill $(sudo docker ps -q) + +# Kill all image-based +# sudo docker kill $(sudo docker ps -qa --filter ancestor=ultralytics/yolov5:latest) + +# DockerHub tag update +# t=ultralytics/yolov5:latest tnew=ultralytics/yolov5:v6.2 && sudo docker pull $t && sudo docker tag $t $tnew && sudo docker push $tnew + +# Clean up +# sudo docker system prune -a --volumes + +# Update Ubuntu drivers +# https://www.maketecheasier.com/install-nvidia-drivers-ubuntu/ + +# DDP test +# python -m torch.distributed.run --nproc_per_node 2 --master_port 1 train.py --epochs 3 + +# GCP VM from Image +# docker.io/ultralytics/yolov5:latest diff --git a/src/train_utils/train_models/models/yolov5/utils/docker/Dockerfile-arm64 b/src/train_utils/train_models/models/yolov5/utils/docker/Dockerfile-arm64 new file mode 100644 index 0000000..7023c6a --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/docker/Dockerfile-arm64 @@ -0,0 +1,41 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Builds ultralytics/yolov5:latest-arm64 image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 +# Image is aarch64-compatible for Apple M1 and other ARM architectures i.e. Jetson Nano and Raspberry Pi + +# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu +FROM arm64v8/ubuntu:rolling + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y python3-pip git zip curl htop gcc libgl1-mesa-glx libglib2.0-0 libpython3-dev +# RUN alias python=python3 + +# Install pip packages +COPY requirements.txt . +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnxruntime + # tensorflow-aarch64 tensorflowjs \ + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +ENV DEBIAN_FRONTEND teletype + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest-arm64 && sudo docker build --platform linux/arm64 -f utils/docker/Dockerfile-arm64 -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest-arm64 && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/src/train_utils/train_models/models/yolov5/utils/docker/Dockerfile-cpu b/src/train_utils/train_models/models/yolov5/utils/docker/Dockerfile-cpu new file mode 100644 index 0000000..06bad9a --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/docker/Dockerfile-cpu @@ -0,0 +1,42 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +# Builds ultralytics/yolov5:latest-cpu image on DockerHub https://hub.docker.com/r/ultralytics/yolov5 +# Image is CPU-optimized for ONNX, OpenVINO and PyTorch YOLOv5 deployments + +# Start FROM Ubuntu image https://hub.docker.com/_/ubuntu +FROM ubuntu:rolling + +# Downloads to user config dir +ADD https://ultralytics.com/assets/Arial.ttf https://ultralytics.com/assets/Arial.Unicode.ttf /root/.config/Ultralytics/ + +# Install linux packages +ENV DEBIAN_FRONTEND noninteractive +RUN apt update +RUN TZ=Etc/UTC apt install -y tzdata +RUN apt install --no-install-recommends -y python3-pip git zip curl htop libgl1-mesa-glx libglib2.0-0 libpython3-dev gnupg +# RUN alias python=python3 + +# Install pip packages +COPY requirements.txt . +RUN python3 -m pip install --upgrade pip wheel +RUN pip install --no-cache -r requirements.txt albumentations gsutil notebook \ + coremltools onnx onnx-simplifier onnxruntime 'openvino-dev>=2022.3' \ + # tensorflow tensorflowjs \ + --extra-index-url https://download.pytorch.org/whl/cpu + +# Create working directory +RUN mkdir -p /usr/src/app +WORKDIR /usr/src/app + +# Copy contents +# COPY . /usr/src/app (issues as not a .git directory) +RUN git clone https://github.com/ultralytics/yolov5 /usr/src/app +ENV DEBIAN_FRONTEND teletype + + +# Usage Examples ------------------------------------------------------------------------------------------------------- + +# Build and Push +# t=ultralytics/yolov5:latest-cpu && sudo docker build -f utils/docker/Dockerfile-cpu -t $t . && sudo docker push $t + +# Pull and Run +# t=ultralytics/yolov5:latest-cpu && sudo docker pull $t && sudo docker run -it --ipc=host -v "$(pwd)"/datasets:/usr/src/datasets $t diff --git a/src/train_utils/train_models/models/yolov5/utils/downloads.py b/src/train_utils/train_models/models/yolov5/utils/downloads.py new file mode 100644 index 0000000..88f5237 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/downloads.py @@ -0,0 +1,128 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Download utils +""" + +import logging +import os +import subprocess +import urllib +from pathlib import Path + +import requests +import torch + + +def is_url(url, check=True): + # Check if string is URL and check if URL exists + try: + url = str(url) + result = urllib.parse.urlparse(url) + assert all([result.scheme, result.netloc]) # check if is url + return (urllib.request.urlopen(url).getcode() == 200) if check else True # check if exists online + except (AssertionError, urllib.request.HTTPError): + return False + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + output = subprocess.check_output(['gsutil', 'du', url], shell=True, encoding='utf-8') + if output: + return int(output.split()[0]) + return 0 + + +def url_getsize(url='https://ultralytics.com/images/bus.jpg'): + # Return downloadable file size in bytes + response = requests.head(url, allow_redirects=True) + return int(response.headers.get('content-length', -1)) + + +def curl_download(url, filename, *, silent: bool = False) -> bool: + """ + Download a file from a url to a filename using curl. + """ + silent_option = 'sS' if silent else '' # silent + proc = subprocess.run([ + 'curl', + '-#', + f'-{silent_option}L', + url, + '--output', + filename, + '--retry', + '9', + '-C', + '-',]) + return proc.returncode == 0 + + +def safe_download(file, url, url2=None, min_bytes=1E0, error_msg=''): + # Attempts to download file from url or url2, checks and removes incomplete downloads < min_bytes + from utils.general import LOGGER + + file = Path(file) + assert_msg = f"Downloaded file '{file}' does not exist or size is < min_bytes={min_bytes}" + try: # url1 + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=LOGGER.level <= logging.INFO) + assert file.exists() and file.stat().st_size > min_bytes, assert_msg # check + except Exception as e: # url2 + if file.exists(): + file.unlink() # remove partial downloads + LOGGER.info(f'ERROR: {e}\nRe-attempting {url2 or url} to {file}...') + # curl download, retry and resume on fail + curl_download(url2 or url, file) + finally: + if not file.exists() or file.stat().st_size < min_bytes: # check + if file.exists(): + file.unlink() # remove partial downloads + LOGGER.info(f'ERROR: {assert_msg}\n{error_msg}') + LOGGER.info('') + + +def attempt_download(file, repo='ultralytics/yolov5', release='v7.0'): + # Attempt file download from GitHub release assets if not found locally. release = 'latest', 'v7.0', etc. + from utils.general import LOGGER + + def github_assets(repository, version='latest'): + # Return GitHub repo tag (i.e. 'v7.0') and assets (i.e. ['yolov5s.pt', 'yolov5m.pt', ...]) + if version != 'latest': + version = f'tags/{version}' # i.e. tags/v7.0 + response = requests.get(f'https://api.github.com/repos/{repository}/releases/{version}').json() # github api + return response['tag_name'], [x['name'] for x in response['assets']] # tag, assets + + file = Path(str(file).strip().replace("'", '')) + if not file.exists(): + # URL specified + name = Path(urllib.parse.unquote(str(file))).name # decode '%2F' to '/' etc. + if str(file).startswith(('http:/', 'https:/')): # download + url = str(file).replace(':/', '://') # Pathlib turns :// -> :/ + file = name.split('?')[0] # parse authentication https://url.com/file.txt?auth... + if Path(file).is_file(): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + safe_download(file=file, url=url, min_bytes=1E5) + return file + + # GitHub assets + assets = [f'yolov5{size}{suffix}.pt' for size in 'nsmlx' for suffix in ('', '6', '-cls', '-seg')] # default + try: + tag, assets = github_assets(repo, release) + except Exception: + try: + tag, assets = github_assets(repo) # latest release + except Exception: + try: + tag = subprocess.check_output('git tag', shell=True, stderr=subprocess.STDOUT).decode().split()[-1] + except Exception: + tag = release + + if name in assets: + file.parent.mkdir(parents=True, exist_ok=True) # make parent dir (if required) + safe_download(file, + url=f'https://github.com/{repo}/releases/download/{tag}/{name}', + min_bytes=1E5, + error_msg=f'{file} missing, try downloading from https://github.com/{repo}/releases/{tag}') + + return str(file) diff --git a/src/train_utils/train_models/models/yolov5/utils/flask_rest_api/README.md b/src/train_utils/train_models/models/yolov5/utils/flask_rest_api/README.md new file mode 100644 index 0000000..a726acb --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/flask_rest_api/README.md @@ -0,0 +1,73 @@ +# Flask REST API + +[REST](https://en.wikipedia.org/wiki/Representational_state_transfer) [API](https://en.wikipedia.org/wiki/API)s are +commonly used to expose Machine Learning (ML) models to other services. This folder contains an example REST API +created using Flask to expose the YOLOv5s model from [PyTorch Hub](https://pytorch.org/hub/ultralytics_yolov5/). + +## Requirements + +[Flask](https://palletsprojects.com/p/flask/) is required. Install with: + +```shell +$ pip install Flask +``` + +## Run + +After Flask installation run: + +```shell +$ python3 restapi.py --port 5000 +``` + +Then use [curl](https://curl.se/) to perform a request: + +```shell +$ curl -X POST -F image=@zidane.jpg 'http://localhost:5000/v1/object-detection/yolov5s' +``` + +The model inference results are returned as a JSON response: + +```json +[ + { + "class": 0, + "confidence": 0.8900438547, + "height": 0.9318675399, + "name": "person", + "width": 0.3264600933, + "xcenter": 0.7438579798, + "ycenter": 0.5207948685 + }, + { + "class": 0, + "confidence": 0.8440024257, + "height": 0.7155083418, + "name": "person", + "width": 0.6546785235, + "xcenter": 0.427829951, + "ycenter": 0.6334488392 + }, + { + "class": 27, + "confidence": 0.3771208823, + "height": 0.3902671337, + "name": "tie", + "width": 0.0696444362, + "xcenter": 0.3675483763, + "ycenter": 0.7991207838 + }, + { + "class": 27, + "confidence": 0.3527112305, + "height": 0.1540903747, + "name": "tie", + "width": 0.0336618312, + "xcenter": 0.7814827561, + "ycenter": 0.5065554976 + } +] +``` + +An example python script to perform inference using [requests](https://docs.python-requests.org/en/master/) is given +in `example_request.py` diff --git a/src/train_utils/train_models/models/yolov5/utils/flask_rest_api/example_request.py b/src/train_utils/train_models/models/yolov5/utils/flask_rest_api/example_request.py new file mode 100644 index 0000000..952e5dc --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/flask_rest_api/example_request.py @@ -0,0 +1,19 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Perform test request +""" + +import pprint + +import requests + +DETECTION_URL = 'http://localhost:5000/v1/object-detection/yolov5s' +IMAGE = 'zidane.jpg' + +# Read image +with open(IMAGE, 'rb') as f: + image_data = f.read() + +response = requests.post(DETECTION_URL, files={'image': image_data}).json() + +pprint.pprint(response) diff --git a/src/train_utils/train_models/models/yolov5/utils/flask_rest_api/restapi.py b/src/train_utils/train_models/models/yolov5/utils/flask_rest_api/restapi.py new file mode 100644 index 0000000..9258b1a --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/flask_rest_api/restapi.py @@ -0,0 +1,48 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Run a Flask REST API exposing one or more YOLOv5s models +""" + +import argparse +import io + +import torch +from flask import Flask, request +from PIL import Image + +app = Flask(__name__) +models = {} + +DETECTION_URL = '/v1/object-detection/' + + +@app.route(DETECTION_URL, methods=['POST']) +def predict(model): + if request.method != 'POST': + return + + if request.files.get('image'): + # Method 1 + # with request.files["image"] as f: + # im = Image.open(io.BytesIO(f.read())) + + # Method 2 + im_file = request.files['image'] + im_bytes = im_file.read() + im = Image.open(io.BytesIO(im_bytes)) + + if model in models: + results = models[model](im, size=640) # reduce size=320 for faster inference + return results.pandas().xyxy[0].to_json(orient='records') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='Flask API exposing YOLOv5 model') + parser.add_argument('--port', default=5000, type=int, help='port number') + parser.add_argument('--model', nargs='+', default=['yolov5s'], help='model(s) to run, i.e. --model yolov5n yolov5s') + opt = parser.parse_args() + + for m in opt.model: + models[m] = torch.hub.load('ultralytics/yolov5', m, force_reload=True, skip_validation=True) + + app.run(host='0.0.0.0', port=opt.port) # debug=True causes Restarting with stat diff --git a/src/train_utils/train_models/models/yolov5/utils/general.py b/src/train_utils/train_models/models/yolov5/utils/general.py new file mode 100644 index 0000000..adb9242 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/general.py @@ -0,0 +1,1140 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +General utils +""" + +import contextlib +import glob +import inspect +import logging +import logging.config +import math +import os +import platform +import random +import re +import signal +import subprocess +import sys +import time +import urllib +from copy import deepcopy +from datetime import datetime +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from subprocess import check_output +from tarfile import is_tarfile +from typing import Optional +from zipfile import ZipFile, is_zipfile + +import cv2 +import numpy as np +import pandas as pd +import pkg_resources as pkg +import torch +import torchvision +import yaml + +from utils import TryExcept, emojis +from utils.downloads import curl_download, gsutil_getsize +from utils.metrics import box_iou, fitness + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[1] # YOLOv5 root directory +RANK = int(os.getenv('RANK', -1)) + +# Settings +NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads +DATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory +AUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode +VERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode +TQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format +FONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf + +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(NUM_THREADS) # NumExpr max threads +os.environ['OMP_NUM_THREADS'] = '1' if platform.system() == 'darwin' else str(NUM_THREADS) # OpenMP (PyTorch and SciPy) + + +def is_ascii(s=''): + # Is string composed of all ASCII (no UTF) characters? (note str().isascii() introduced in python 3.7) + s = str(s) # convert list, tuple, None, etc. to str + return len(s.encode().decode('ascii', 'ignore')) == len(s) + + +def is_chinese(s='人工智能'): + # Is string composed of any Chinese characters? + return bool(re.search('[\u4e00-\u9fff]', str(s))) + + +def is_colab(): + # Is environment a Google Colab instance? + return 'google.colab' in sys.modules + + +def is_jupyter(): + """ + Check if the current script is running inside a Jupyter Notebook. + Verified on Colab, Jupyterlab, Kaggle, Paperspace. + + Returns: + bool: True if running inside a Jupyter Notebook, False otherwise. + """ + with contextlib.suppress(Exception): + from IPython import get_ipython + return get_ipython() is not None + return False + + +def is_kaggle(): + # Is environment a Kaggle Notebook? + return os.environ.get('PWD') == '/kaggle/working' and os.environ.get('KAGGLE_URL_BASE') == 'https://www.kaggle.com' + + +def is_docker() -> bool: + """Check if the process runs inside a docker container.""" + if Path('/.dockerenv').exists(): + return True + try: # check if docker is in control groups + with open('/proc/self/cgroup') as file: + return any('docker' in line for line in file) + except OSError: + return False + + +def is_writeable(dir, test=False): + # Return True if directory has write permissions, test opening a file with write permissions if test=True + if not test: + return os.access(dir, os.W_OK) # possible issues on Windows + file = Path(dir) / 'tmp.txt' + try: + with open(file, 'w'): # open file with write permissions + pass + file.unlink() # remove file + return True + except OSError: + return False + + +LOGGING_NAME = 'yolov5' + + +def set_logging(name=LOGGING_NAME, verbose=True): + # sets up logging for the given name + rank = int(os.getenv('RANK', -1)) # rank in world for Multi-GPU trainings + level = logging.INFO if verbose and rank in {-1, 0} else logging.ERROR + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + name: { + 'format': '%(message)s'}}, + 'handlers': { + name: { + 'class': 'logging.StreamHandler', + 'formatter': name, + 'level': level,}}, + 'loggers': { + name: { + 'level': level, + 'handlers': [name], + 'propagate': False,}}}) + + +set_logging(LOGGING_NAME) # run before defining LOGGER +LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.) +if platform.system() == 'Windows': + for fn in LOGGER.info, LOGGER.warning: + setattr(LOGGER, fn.__name__, lambda x: fn(emojis(x))) # emoji safe logging + + +def user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'): + # Return path of user configuration directory. Prefer environment variable if exists. Make dir if required. + env = os.getenv(env_var) + if env: + path = Path(env) # use environment variable + else: + cfg = {'Windows': 'AppData/Roaming', 'Linux': '.config', 'Darwin': 'Library/Application Support'} # 3 OS dirs + path = Path.home() / cfg.get(platform.system(), '') # OS-specific config dir + path = (path if is_writeable(path) else Path('/tmp')) / dir # GCP and AWS lambda fix, only /tmp is writeable + path.mkdir(exist_ok=True) # make if required + return path + + +CONFIG_DIR = user_config_dir() # Ultralytics settings dir + + +class Profile(contextlib.ContextDecorator): + # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager + def __init__(self, t=0.0): + self.t = t + self.cuda = torch.cuda.is_available() + + def __enter__(self): + self.start = self.time() + return self + + def __exit__(self, type, value, traceback): + self.dt = self.time() - self.start # delta-time + self.t += self.dt # accumulate dt + + def time(self): + if self.cuda: + torch.cuda.synchronize() + return time.time() + + +class Timeout(contextlib.ContextDecorator): + # YOLOv5 Timeout class. Usage: @Timeout(seconds) decorator or 'with Timeout(seconds):' context manager + def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True): + self.seconds = int(seconds) + self.timeout_message = timeout_msg + self.suppress = bool(suppress_timeout_errors) + + def _timeout_handler(self, signum, frame): + raise TimeoutError(self.timeout_message) + + def __enter__(self): + if platform.system() != 'Windows': # not supported on Windows + signal.signal(signal.SIGALRM, self._timeout_handler) # Set handler for SIGALRM + signal.alarm(self.seconds) # start countdown for SIGALRM to be raised + + def __exit__(self, exc_type, exc_val, exc_tb): + if platform.system() != 'Windows': + signal.alarm(0) # Cancel SIGALRM if it's scheduled + if self.suppress and exc_type is TimeoutError: # Suppress TimeoutError + return True + + +class WorkingDirectory(contextlib.ContextDecorator): + # Usage: @WorkingDirectory(dir) decorator or 'with WorkingDirectory(dir):' context manager + def __init__(self, new_dir): + self.dir = new_dir # new dir + self.cwd = Path.cwd().resolve() # current dir + + def __enter__(self): + os.chdir(self.dir) + + def __exit__(self, exc_type, exc_val, exc_tb): + os.chdir(self.cwd) + + +def methods(instance): + # Get class/instance methods + return [f for f in dir(instance) if callable(getattr(instance, f)) and not f.startswith('__')] + + +def print_args(args: Optional[dict] = None, show_file=True, show_func=False): + # Print function arguments (optional args dict) + x = inspect.currentframe().f_back # previous frame + file, _, func, _, _ = inspect.getframeinfo(x) + if args is None: # get args automatically + args, _, _, frm = inspect.getargvalues(x) + args = {k: v for k, v in frm.items() if k in args} + try: + file = Path(file).resolve().relative_to(ROOT).with_suffix('') + except ValueError: + file = Path(file).stem + s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '') + LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items())) + + +def init_seeds(seed=0, deterministic=False): + # Initialize random number generator (RNG) seeds https://pytorch.org/docs/stable/notes/randomness.html + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) # for Multi-GPU, exception safe + # torch.backends.cudnn.benchmark = True # AutoBatch problem https://github.com/ultralytics/yolov5/issues/9287 + if deterministic and check_version(torch.__version__, '1.12.0'): # https://github.com/ultralytics/yolov5/pull/8213 + torch.use_deterministic_algorithms(True) + torch.backends.cudnn.deterministic = True + os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8' + os.environ['PYTHONHASHSEED'] = str(seed) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and all(x not in k for x in exclude) and v.shape == db[k].shape} + + +def get_default_args(func): + # Get func() default arguments + signature = inspect.signature(func) + return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def file_age(path=__file__): + # Return days since last file update + dt = (datetime.now() - datetime.fromtimestamp(Path(path).stat().st_mtime)) # delta + return dt.days # + dt.seconds / 86400 # fractional days + + +def file_date(path=__file__): + # Return human-readable file modification date, i.e. '2021-3-26' + t = datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def file_size(path): + # Return file/dir size (MB) + mb = 1 << 20 # bytes to MiB (1024 ** 2) + path = Path(path) + if path.is_file(): + return path.stat().st_size / mb + elif path.is_dir(): + return sum(f.stat().st_size for f in path.glob('**/*') if f.is_file()) / mb + else: + return 0.0 + + +def check_online(): + # Check internet connectivity + import socket + + def run_once(): + # Check once + try: + socket.create_connection(('1.1.1.1', 443), 5) # check host accessibility + return True + except OSError: + return False + + return run_once() or run_once() # check twice to increase robustness to intermittent connectivity issues + + +def git_describe(path=ROOT): # path must be a directory + # Return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + try: + assert (Path(path) / '.git').is_dir() + return check_output(f'git -C {path} describe --tags --long --always', shell=True).decode()[:-1] + except Exception: + return '' + + +@TryExcept() +@WorkingDirectory(ROOT) +def check_git_status(repo='ultralytics/yolov5', branch='master'): + # YOLOv5 status check, recommend 'git pull' if code is out of date + url = f'https://github.com/{repo}' + msg = f', for updates see {url}' + s = colorstr('github: ') # string + assert Path('.git').exists(), s + 'skipping check (not a git repository)' + msg + assert check_online(), s + 'skipping check (offline)' + msg + + splits = re.split(pattern=r'\s', string=check_output('git remote -v', shell=True).decode()) + matches = [repo in s for s in splits] + if any(matches): + remote = splits[matches.index(True) - 1] + else: + remote = 'ultralytics' + check_output(f'git remote add {remote} {url}', shell=True) + check_output(f'git fetch {remote}', shell=True, timeout=5) # git fetch + local_branch = check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(check_output(f'git rev-list {local_branch}..{remote}/{branch} --count', shell=True)) # commits behind + if n > 0: + pull = 'git pull' if remote == 'origin' else f'git pull {remote} {branch}' + s += f"⚠️ YOLOv5 is out of date by {n} commit{'s' * (n > 1)}. Use '{pull}' or 'git clone {url}' to update." + else: + s += f'up to date with {url} ✅' + LOGGER.info(s) + + +@WorkingDirectory(ROOT) +def check_git_info(path='.'): + # YOLOv5 git info check, return {remote, branch, commit} + check_requirements('gitpython') + import git + try: + repo = git.Repo(path) + remote = repo.remotes.origin.url.replace('.git', '') # i.e. 'https://github.com/ultralytics/yolov5' + commit = repo.head.commit.hexsha # i.e. '3134699c73af83aac2a481435550b968d5792c0d' + try: + branch = repo.active_branch.name # i.e. 'main' + except TypeError: # not on any branch + branch = None # i.e. 'detached HEAD' state + return {'remote': remote, 'branch': branch, 'commit': commit} + except git.exc.InvalidGitRepositoryError: # path is not a git dir + return {'remote': None, 'branch': None, 'commit': None} + + +def check_python(minimum='3.7.0'): + # Check current python version vs. required python version + check_version(platform.python_version(), minimum, name='Python ', hard=True) + + +def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False): + # Check version vs. required version + current, minimum = (pkg.parse_version(x) for x in (current, minimum)) + result = (current == minimum) if pinned else (current >= minimum) # bool + s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string + if hard: + assert result, emojis(s) # assert min requirements met + if verbose and not result: + LOGGER.warning(s) + return result + + +@TryExcept() +def check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''): + # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str) + prefix = colorstr('red', 'bold', 'requirements:') + check_python() # check python version + if isinstance(requirements, Path): # requirements.txt file + file = requirements.resolve() + assert file.exists(), f'{prefix} {file} not found, check failed.' + with file.open() as f: + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude] + elif isinstance(requirements, str): + requirements = [requirements] + + s = '' + n = 0 + for r in requirements: + try: + pkg.require(r) + except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met + s += f'"{r}" ' + n += 1 + + if s and install and AUTOINSTALL: # check environment variable + LOGGER.info(f"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...") + try: + # assert check_online(), "AutoUpdate skipped (offline)" + LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode()) + source = file if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + LOGGER.info(s) + except Exception as e: + LOGGER.warning(f'{prefix} ❌ {e}') + + +def check_img_size(imgsz, s=32, floor=0): + # Verify image size is a multiple of stride s in each dimension + if isinstance(imgsz, int): # integer i.e. img_size=640 + new_size = max(make_divisible(imgsz, int(s)), floor) + else: # list i.e. img_size=[640, 480] + imgsz = list(imgsz) # convert to list if tuple + new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz] + if new_size != imgsz: + LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}') + return new_size + + +def check_imshow(warn=False): + # Check if environment supports image displays + try: + assert not is_jupyter() + assert not is_docker() + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + if warn: + LOGGER.warning(f'WARNING ⚠️ Environment does not support cv2.imshow() or PIL Image.show()\n{e}') + return False + + +def check_suffix(file='yolov5s.pt', suffix=('.pt',), msg=''): + # Check file(s) for acceptable suffix + if file and suffix: + if isinstance(suffix, str): + suffix = [suffix] + for f in file if isinstance(file, (list, tuple)) else [file]: + s = Path(f).suffix.lower() # file suffix + if len(s): + assert s in suffix, f'{msg}{f} acceptable suffix is {suffix}' + + +def check_yaml(file, suffix=('.yaml', '.yml')): + # Search/download YAML file (if necessary) and return path, checking suffix + return check_file(file, suffix) + + +def check_file(file, suffix=''): + # Search/download file (if necessary) and return path + check_suffix(file, suffix) # optional + file = str(file) # convert to str() + if os.path.isfile(file) or not file: # exists + return file + elif file.startswith(('http:/', 'https:/')): # download + url = file # warning: Pathlib turns :// -> :/ + file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth + if os.path.isfile(file): + LOGGER.info(f'Found {url} locally at {file}') # file already exists + else: + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check + return file + elif file.startswith('clearml://'): # ClearML Dataset ID + assert 'clearml' in sys.modules, "ClearML is not installed, so cannot use ClearML dataset. Try running 'pip install clearml'." + return file + else: # search + files = [] + for d in 'data', 'models', 'utils': # search directories + files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file + assert len(files), f'File not found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_font(font=FONT, progress=False): + # Download font to CONFIG_DIR if necessary + font = Path(font) + file = CONFIG_DIR / font.name + if not font.exists() and not file.exists(): + url = f'https://ultralytics.com/assets/{font.name}' + LOGGER.info(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, str(file), progress=progress) + + +def check_dataset(data, autodownload=True): + # Download, check and/or unzip dataset if not found locally + + # Download (optional) + extract_dir = '' + if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)): + download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1) + data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml')) + extract_dir, autodownload = data.parent, False + + # Read yaml (optional) + if isinstance(data, (str, Path)): + data = yaml_load(data) # dictionary + + # Checks + for k in 'train', 'val', 'names': + assert k in data, emojis(f"data.yaml '{k}:' field missing ❌") + if isinstance(data['names'], (list, tuple)): # old array format + data['names'] = dict(enumerate(data['names'])) # convert to dict + assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car' + data['nc'] = len(data['names']) + + # Resolve paths + path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.' + if not path.is_absolute(): + path = (ROOT / path).resolve() + data['path'] = path # download scripts + for k in 'train', 'val', 'test': + if data.get(k): # prepend path + if isinstance(data[k], str): + x = (path / data[k]).resolve() + if not x.exists() and data[k].startswith('../'): + x = (path / data[k][3:]).resolve() + data[k] = str(x) + else: + data[k] = [str((path / x).resolve()) for x in data[k]] + + # Parse yaml + train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download')) + if val: + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + LOGGER.info('\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()]) + if not s or not autodownload: + raise Exception('Dataset not found ❌') + t = time.time() + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + LOGGER.info(f'Downloading {s} to {f}...') + torch.hub.download_url_to_file(s, f) + Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root + unzip_file(f, path=DATASETS_DIR) # unzip + Path(f).unlink() # remove zip + r = None # success + elif s.startswith('bash '): # bash script + LOGGER.info(f'Running {s} ...') + r = subprocess.run(s, shell=True) + else: # python script + r = exec(s, {'yaml': data}) # return None + dt = f'({round(time.time() - t, 1)}s)' + s = f"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}" if r in (0, None) else f'failure {dt} ❌' + LOGGER.info(f'Dataset download {s}') + check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts + return data # dictionary + + +def check_amp(model): + # Check PyTorch Automatic Mixed Precision (AMP) functionality. Return True on correct operation + from models.common import AutoShape, DetectMultiBackend + + def amp_allclose(model, im): + # All close FP32 vs AMP results + m = AutoShape(model, verbose=False) # model + a = m(im).xywhn[0] # FP32 inference + m.amp = True + b = m(im).xywhn[0] # AMP inference + return a.shape == b.shape and torch.allclose(a, b, atol=0.1) # close to 10% absolute tolerance + + prefix = colorstr('AMP: ') + device = next(model.parameters()).device # get model device + if device.type in ('cpu', 'mps'): + return False # AMP only used on CUDA devices + f = ROOT / 'data' / 'images' / 'bus.jpg' # image to check + im = f if f.exists() else 'https://ultralytics.com/images/bus.jpg' if check_online() else np.ones((640, 640, 3)) + try: + assert amp_allclose(deepcopy(model), im) or amp_allclose(DetectMultiBackend('yolov5n.pt', device), im) + LOGGER.info(f'{prefix}checks passed ✅') + return True + except Exception: + help_url = 'https://github.com/ultralytics/yolov5/issues/7908' + LOGGER.warning(f'{prefix}checks failed ❌, disabling Automatic Mixed Precision. See {help_url}') + return False + + +def yaml_load(file='data.yaml'): + # Single-line safe yaml loading + with open(file, errors='ignore') as f: + return yaml.safe_load(f) + + +def yaml_save(file='data.yaml', data={}): + # Single-line safe yaml saving + with open(file, 'w') as f: + yaml.safe_dump({k: str(v) if isinstance(v, Path) else v for k, v in data.items()}, f, sort_keys=False) + + +def unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')): + # Unzip a *.zip file to path/, excluding files containing strings in exclude list + if path is None: + path = Path(file).parent # default path + with ZipFile(file) as zipObj: + for f in zipObj.namelist(): # list all archived filenames in the zip + if all(x not in f for x in exclude): + zipObj.extract(f, path=path) + + +def url2file(url): + # Convert URL to filename, i.e. https://url.com/file.txt?auth -> file.txt + url = str(Path(url)).replace(':/', '://') # Pathlib turns :// -> :/ + return Path(urllib.parse.unquote(url)).name.split('?')[0] # '%2F' to '/', split https://url.com/file.txt?auth + + +def download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3): + # Multithreaded file download and unzip function, used in data.yaml for autodownload + def download_one(url, dir): + # Download 1 file + success = True + if os.path.isfile(url): + f = Path(url) # filename + else: # does not exist + f = dir / Path(url).name + LOGGER.info(f'Downloading {url} to {f}...') + for i in range(retry + 1): + if curl: + success = curl_download(url, f, silent=(threads > 1)) + else: + torch.hub.download_url_to_file(url, f, progress=threads == 1) # torch download + success = f.is_file() + if success: + break + elif i < retry: + LOGGER.warning(f'⚠️ Download failure, retrying {i + 1}/{retry} {url}...') + else: + LOGGER.warning(f'❌ Failed to download {url}...') + + if unzip and success and (f.suffix == '.gz' or is_zipfile(f) or is_tarfile(f)): + LOGGER.info(f'Unzipping {f}...') + if is_zipfile(f): + unzip_file(f, dir) # unzip + elif is_tarfile(f): + subprocess.run(['tar', 'xf', f, '--directory', f.parent], check=True) # unzip + elif f.suffix == '.gz': + subprocess.run(['tar', 'xfz', f, '--directory', f.parent], check=True) # unzip + if delete: + f.unlink() # remove zip + + dir = Path(dir) + dir.mkdir(parents=True, exist_ok=True) # make directory + if threads > 1: + pool = ThreadPool(threads) + pool.imap(lambda x: download_one(*x), zip(url, repeat(dir))) # multithreaded + pool.close() + pool.join() + else: + for u in [url] if isinstance(url, (str, Path)) else url: + download_one(u, dir) + + +def make_divisible(x, divisor): + # Returns nearest x divisible by divisor + if isinstance(divisor, torch.Tensor): + divisor = int(divisor.max()) # to int + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern='[|@#!¡·$€%&()=?¿^*;:,¨´><+]', repl='_', string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 https://arxiv.org/pdf/1812.01187.pdf + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = { + 'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(int) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights).float() + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + # Usage: index = random.choices(range(n), weights=image_weights, k=1) # weighted image sample + class_counts = np.array([np.bincount(x[:, 0].astype(int), minlength=nc) for x in labels]) + return (class_weights.reshape(1, nc) * class_counts).sum(1) + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + return [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = (x[..., 0] + x[..., 2]) / 2 # x center + y[..., 1] = (x[..., 1] + x[..., 3]) / 2 # y center + y[..., 2] = x[..., 2] - x[..., 0] # width + y[..., 3] = x[..., 3] - x[..., 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = x[..., 0] - x[..., 2] / 2 # top left x + y[..., 1] = x[..., 1] - x[..., 3] / 2 # top left y + y[..., 2] = x[..., 0] + x[..., 2] / 2 # bottom right x + y[..., 3] = x[..., 1] + x[..., 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * (x[..., 0] - x[..., 2] / 2) + padw # top left x + y[..., 1] = h * (x[..., 1] - x[..., 3] / 2) + padh # top left y + y[..., 2] = w * (x[..., 0] + x[..., 2] / 2) + padw # bottom right x + y[..., 3] = h * (x[..., 1] + x[..., 3] / 2) + padh # bottom right y + return y + + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = ((x[..., 0] + x[..., 2]) / 2) / w # x center + y[..., 1] = ((x[..., 1] + x[..., 3]) / 2) / h # y center + y[..., 2] = (x[..., 2] - x[..., 0]) / w # width + y[..., 3] = (x[..., 3] - x[..., 1]) / h # height + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[..., 0] = w * x[..., 0] + padw # top left x + y[..., 1] = h * x[..., 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + s = np.concatenate((s, s[0:1, :]), axis=0) + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): + # Rescale boxes (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + boxes[..., [0, 2]] -= pad[0] # x padding + boxes[..., [1, 3]] -= pad[1] # y padding + boxes[..., :4] /= gain + clip_boxes(boxes, img0_shape) + return boxes + + +def scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + segments[:, 0] -= pad[0] # x padding + segments[:, 1] -= pad[1] # y padding + segments /= gain + clip_segments(segments, img0_shape) + if normalize: + segments[:, 0] /= img0_shape[1] # width + segments[:, 1] /= img0_shape[0] # height + return segments + + +def clip_boxes(boxes, shape): + # Clip boxes (xyxy) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[..., 0].clamp_(0, shape[1]) # x1 + boxes[..., 1].clamp_(0, shape[0]) # y1 + boxes[..., 2].clamp_(0, shape[1]) # x2 + boxes[..., 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[..., [0, 2]] = boxes[..., [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[..., [1, 3]] = boxes[..., [1, 3]].clip(0, shape[0]) # y1, y2 + + +def clip_segments(segments, shape): + # Clip segments (xy1,xy2,...) to image shape (height, width) + if isinstance(segments, torch.Tensor): # faster individually + segments[:, 0].clamp_(0, shape[1]) # x + segments[:, 1].clamp_(0, shape[0]) # y + else: # np.array (faster grouped) + segments[:, 0] = segments[:, 0].clip(0, shape[1]) # x + segments[:, 1] = segments[:, 1].clip(0, shape[0]) # y + + +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=False, + labels=(), + max_det=300, + nm=0, # number of masks +): + """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out) + prediction = prediction[0] # select only inference output + + device = prediction.device + mps = 'mps' in device.type # Apple MPS + if mps: # MPS not fully supported yet, convert tensors to CPU before NMS + prediction = prediction.cpu() + bs = prediction.shape[0] # batch size + nc = prediction.shape[2] - nm - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + # min_wh = 2 # (pixels) minimum box width and height + max_wh = 7680 # (pixels) maximum box width and height + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 0.5 + 0.05 * bs # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + mi = 5 + nc # mask start index + output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + lb = labels[xi] + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) + v[:, :4] = lb[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box/Mask + box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2) + mask = x[:, mi:] # zero columns if no masks + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1) + else: # best class only + conf, j = x[:, 5:mi].max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence and remove excess boxes + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + i = i[:max_det] # limit detections + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if mps: + output[xi] = output[xi].to(device) + if (time.time() - t) > time_limit: + LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'best_fitness', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB") + + +def print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')): + evolve_csv = save_dir / 'evolve.csv' + evolve_yaml = save_dir / 'hyp_evolve.yaml' + keys = tuple(keys) + tuple(hyp.keys()) # [results + hyps] + keys = tuple(x.strip() for x in keys) + vals = results + tuple(hyp.values()) + n = len(keys) + + # Download (optional) + if bucket: + url = f'gs://{bucket}/evolve.csv' + if gsutil_getsize(url) > (evolve_csv.stat().st_size if evolve_csv.exists() else 0): + subprocess.run(['gsutil', 'cp', f'{url}', f'{save_dir}']) # download evolve.csv if larger than local + + # Log to evolve.csv + s = '' if evolve_csv.exists() else (('%20s,' * n % keys).rstrip(',') + '\n') # add header + with open(evolve_csv, 'a') as f: + f.write(s + ('%20.5g,' * n % vals).rstrip(',') + '\n') + + # Save yaml + with open(evolve_yaml, 'w') as f: + data = pd.read_csv(evolve_csv, skipinitialspace=True) + data = data.rename(columns=lambda x: x.strip()) # strip keys + i = np.argmax(fitness(data.values[:, :4])) # + generations = len(data) + f.write('# YOLOv5 Hyperparameter Evolution Results\n' + f'# Best generation: {i}\n' + + f'# Last generation: {generations - 1}\n' + '# ' + ', '.join(f'{x.strip():>20s}' for x in keys[:7]) + + '\n' + '# ' + ', '.join(f'{x:>20.5g}' for x in data.values[i, :7]) + '\n\n') + yaml.safe_dump(data.loc[i][7:].to_dict(), f, sort_keys=False) + + # Print to screen + LOGGER.info(prefix + f'{generations} generations finished, current result:\n' + prefix + + ', '.join(f'{x.strip():>20s}' for x in keys) + '\n' + prefix + ', '.join(f'{x:20.5g}' + for x in vals) + '\n\n') + + if bucket: + subprocess.run(['gsutil', 'cp', f'{evolve_csv}', f'{evolve_yaml}', f'gs://{bucket}']) # upload + + +def apply_classifier(x, model, img, im0): + # Apply a second stage classifier to YOLO outputs + # Example model = torchvision.models.__dict__['efficientnet_b0'](pretrained=True).to(device).eval() + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_boxes(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for a in d: + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=False, sep='', mkdir=False): + # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc. + path = Path(path) # os-agnostic + if path.exists() and not exist_ok: + path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '') + + # Method 1 + for n in range(2, 9999): + p = f'{path}{sep}{n}{suffix}' # increment path + if not os.path.exists(p): # + break + path = Path(p) + + # Method 2 (deprecated) + # dirs = glob.glob(f"{path}{sep}*") # similar paths + # matches = [re.search(rf"{path.stem}{sep}(\d+)", d) for d in dirs] + # i = [int(m.groups()[0]) for m in matches if m] # indices + # n = max(i) + 1 if i else 2 # increment number + # path = Path(f"{path}{sep}{n}{suffix}") # increment path + + if mkdir: + path.mkdir(parents=True, exist_ok=True) # make directory + + return path + + +# OpenCV Multilanguage-friendly functions ------------------------------------------------------------------------------------ +imshow_ = cv2.imshow # copy to avoid recursion errors + + +def imread(filename, flags=cv2.IMREAD_COLOR): + return cv2.imdecode(np.fromfile(filename, np.uint8), flags) + + +def imwrite(filename, img): + try: + cv2.imencode(Path(filename).suffix, img)[1].tofile(filename) + return True + except Exception: + return False + + +def imshow(path, im): + imshow_(path.encode('unicode_escape').decode(), im) + + +cv2.imread, cv2.imwrite, cv2.imshow = imread, imwrite, imshow # redefine + +# Variables ------------------------------------------------------------------------------------------------------------ diff --git a/src/train_utils/train_models/models/yolov5/utils/google_app_engine/Dockerfile b/src/train_utils/train_models/models/yolov5/utils/google_app_engine/Dockerfile new file mode 100644 index 0000000..0155618 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/google_app_engine/Dockerfile @@ -0,0 +1,25 @@ +FROM gcr.io/google-appengine/python + +# Create a virtualenv for dependencies. This isolates these packages from +# system-level packages. +# Use -p python3 or -p python3.7 to select python version. Default is version 2. +RUN virtualenv /env -p python3 + +# Setting these environment variables are the same as running +# source /env/bin/activate. +ENV VIRTUAL_ENV /env +ENV PATH /env/bin:$PATH + +RUN apt-get update && apt-get install -y python-opencv + +# Copy the application's requirements.txt and run pip to install all +# dependencies into the virtualenv. +ADD requirements.txt /app/requirements.txt +RUN pip install -r /app/requirements.txt + +# Add the application source code. +ADD . /app + +# Run a WSGI server to serve the application. gunicorn must be declared as +# a dependency in requirements.txt. +CMD gunicorn -b :$PORT main:app diff --git a/src/train_utils/train_models/models/yolov5/utils/google_app_engine/additional_requirements.txt b/src/train_utils/train_models/models/yolov5/utils/google_app_engine/additional_requirements.txt new file mode 100644 index 0000000..d5b7675 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/google_app_engine/additional_requirements.txt @@ -0,0 +1,5 @@ +# add these requirements in your app on top of the existing ones +pip==21.1 +Flask==1.0.2 +gunicorn==19.10.0 +werkzeug>=2.2.3 # not directly required, pinned by Snyk to avoid a vulnerability diff --git a/src/train_utils/train_models/models/yolov5/utils/google_app_engine/app.yaml b/src/train_utils/train_models/models/yolov5/utils/google_app_engine/app.yaml new file mode 100644 index 0000000..5056b7c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/google_app_engine/app.yaml @@ -0,0 +1,14 @@ +runtime: custom +env: flex + +service: yolov5app + +liveness_check: + initial_delay_sec: 600 + +manual_scaling: + instances: 1 +resources: + cpu: 1 + memory_gb: 4 + disk_size_gb: 20 diff --git a/src/train_utils/train_models/models/yolov5/utils/loggers/__init__.py b/src/train_utils/train_models/models/yolov5/utils/loggers/__init__.py new file mode 100644 index 0000000..9de1f22 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/loggers/__init__.py @@ -0,0 +1,401 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Logging utils +""" + +import os +import warnings +from pathlib import Path + +import pkg_resources as pkg +import torch +from torch.utils.tensorboard import SummaryWriter + +from utils.general import LOGGER, colorstr, cv2 +from utils.loggers.clearml.clearml_utils import ClearmlLogger +from utils.loggers.wandb.wandb_utils import WandbLogger +from utils.plots import plot_images, plot_labels, plot_results +from utils.torch_utils import de_parallel + +LOGGERS = ('csv', 'tb', 'wandb', 'clearml', 'comet') # *.csv, TensorBoard, Weights & Biases, ClearML +RANK = int(os.getenv('RANK', -1)) + +try: + import wandb + + assert hasattr(wandb, '__version__') # verify package import not local dir + if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in {0, -1}: + try: + wandb_login_success = wandb.login(timeout=30) + except wandb.errors.UsageError: # known non-TTY terminal issue + wandb_login_success = False + if not wandb_login_success: + wandb = None +except (ImportError, AssertionError): + wandb = None + +try: + import clearml + + assert hasattr(clearml, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + clearml = None + +try: + if RANK not in [0, -1]: + comet_ml = None + else: + import comet_ml + + assert hasattr(comet_ml, '__version__') # verify package import not local dir + from utils.loggers.comet import CometLogger + +except (ModuleNotFoundError, ImportError, AssertionError): + comet_ml = None + + +class Loggers(): + # YOLOv5 Loggers class + def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): + self.save_dir = save_dir + self.weights = weights + self.opt = opt + self.hyp = hyp + self.plots = not opt.noplots # plot results + self.logger = logger # for printing results to console + self.include = include + self.keys = [ + 'train/box_loss', + 'train/obj_loss', + 'train/cls_loss', # train loss + 'metrics/precision', + 'metrics/recall', + 'metrics/mAP_0.5', + 'metrics/mAP_0.5:0.95', # metrics + 'val/box_loss', + 'val/obj_loss', + 'val/cls_loss', # val loss + 'x/lr0', + 'x/lr1', + 'x/lr2'] # params + self.best_keys = ['best/epoch', 'best/precision', 'best/recall', 'best/mAP_0.5', 'best/mAP_0.5:0.95'] + for k in LOGGERS: + setattr(self, k, None) # init empty logger dictionary + self.csv = True # always log to csv + + # Messages + if not clearml: + prefix = colorstr('ClearML: ') + s = f"{prefix}run 'pip install clearml' to automatically track, visualize and remotely train YOLOv5 🚀 in ClearML" + self.logger.info(s) + if not comet_ml: + prefix = colorstr('Comet: ') + s = f"{prefix}run 'pip install comet_ml' to automatically track and visualize YOLOv5 🚀 runs in Comet" + self.logger.info(s) + # TensorBoard + s = self.save_dir + if 'tb' in self.include and not self.opt.evolve: + prefix = colorstr('TensorBoard: ') + self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(s)) + + # W&B + if wandb and 'wandb' in self.include: + self.opt.hyp = self.hyp # add hyperparameters + self.wandb = WandbLogger(self.opt) + else: + self.wandb = None + + # ClearML + if clearml and 'clearml' in self.include: + try: + self.clearml = ClearmlLogger(self.opt, self.hyp) + except Exception: + self.clearml = None + prefix = colorstr('ClearML: ') + LOGGER.warning(f'{prefix}WARNING ⚠️ ClearML is installed but not configured, skipping ClearML logging.' + f' See https://github.com/ultralytics/yolov5/tree/master/utils/loggers/clearml#readme') + + else: + self.clearml = None + + # Comet + if comet_ml and 'comet' in self.include: + if isinstance(self.opt.resume, str) and self.opt.resume.startswith('comet://'): + run_id = self.opt.resume.split('/')[-1] + self.comet_logger = CometLogger(self.opt, self.hyp, run_id=run_id) + + else: + self.comet_logger = CometLogger(self.opt, self.hyp) + + else: + self.comet_logger = None + + @property + def remote_dataset(self): + # Get data_dict if custom dataset artifact link is provided + data_dict = None + if self.clearml: + data_dict = self.clearml.data_dict + if self.wandb: + data_dict = self.wandb.data_dict + if self.comet_logger: + data_dict = self.comet_logger.data_dict + + return data_dict + + def on_train_start(self): + if self.comet_logger: + self.comet_logger.on_train_start() + + def on_pretrain_routine_start(self): + if self.comet_logger: + self.comet_logger.on_pretrain_routine_start() + + def on_pretrain_routine_end(self, labels, names): + # Callback runs on pre-train routine end + if self.plots: + plot_labels(labels, names, self.save_dir) + paths = self.save_dir.glob('*labels*.jpg') # training labels + if self.wandb: + self.wandb.log({'Labels': [wandb.Image(str(x), caption=x.name) for x in paths]}) + # if self.clearml: + # pass # ClearML saves these images automatically using hooks + if self.comet_logger: + self.comet_logger.on_pretrain_routine_end(paths) + + def on_train_batch_end(self, model, ni, imgs, targets, paths, vals): + log_dict = dict(zip(self.keys[:3], vals)) + # Callback runs on train batch end + # ni: number integrated batches (since train start) + if self.plots: + if ni < 3: + f = self.save_dir / f'train_batch{ni}.jpg' # filename + plot_images(imgs, targets, paths, f) + if ni == 0 and self.tb and not self.opt.sync_bn: + log_tensorboard_graph(self.tb, model, imgsz=(self.opt.imgsz, self.opt.imgsz)) + if ni == 10 and (self.wandb or self.clearml): + files = sorted(self.save_dir.glob('train*.jpg')) + if self.wandb: + self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Mosaics') + + if self.comet_logger: + self.comet_logger.on_train_batch_end(log_dict, step=ni) + + def on_train_epoch_end(self, epoch): + # Callback runs on train epoch end + if self.wandb: + self.wandb.current_epoch = epoch + 1 + + if self.comet_logger: + self.comet_logger.on_train_epoch_end(epoch) + + def on_val_start(self): + if self.comet_logger: + self.comet_logger.on_val_start() + + def on_val_image_end(self, pred, predn, path, names, im): + # Callback runs on val image end + if self.wandb: + self.wandb.val_one_image(pred, predn, path, names, im) + if self.clearml: + self.clearml.log_image_with_boxes(path, pred, names, im) + + def on_val_batch_end(self, batch_i, im, targets, paths, shapes, out): + if self.comet_logger: + self.comet_logger.on_val_batch_end(batch_i, im, targets, paths, shapes, out) + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): + # Callback runs on val end + if self.wandb or self.clearml: + files = sorted(self.save_dir.glob('val*.jpg')) + if self.wandb: + self.wandb.log({'Validation': [wandb.Image(str(f), caption=f.name) for f in files]}) + if self.clearml: + self.clearml.log_debug_samples(files, title='Validation') + + if self.comet_logger: + self.comet_logger.on_val_end(nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + + def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): + # Callback runs at the end of each fit (train+val) epoch + x = dict(zip(self.keys, vals)) + if self.csv: + file = self.save_dir / 'results.csv' + n = len(x) + 1 # number of cols + s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header + with open(file, 'a') as f: + f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + + if self.tb: + for k, v in x.items(): + self.tb.add_scalar(k, v, epoch) + elif self.clearml: # log to ClearML if TensorBoard not used + for k, v in x.items(): + title, series = k.split('/') + self.clearml.task.get_logger().report_scalar(title, series, v, epoch) + + if self.wandb: + if best_fitness == fi: + best_results = [epoch] + vals[3:7] + for i, name in enumerate(self.best_keys): + self.wandb.wandb_run.summary[name] = best_results[i] # log best results in the summary + self.wandb.log(x) + self.wandb.end_epoch() + + if self.clearml: + self.clearml.current_epoch_logged_images = set() # reset epoch image limit + self.clearml.current_epoch += 1 + + if self.comet_logger: + self.comet_logger.on_fit_epoch_end(x, epoch=epoch) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + # Callback runs on model save event + if (epoch + 1) % self.opt.save_period == 0 and not final_epoch and self.opt.save_period != -1: + if self.wandb: + self.wandb.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + if self.clearml: + self.clearml.task.update_output_model(model_path=str(last), + model_name='Latest Model', + auto_delete_file=False) + + if self.comet_logger: + self.comet_logger.on_model_save(last, epoch, final_epoch, best_fitness, fi) + + def on_train_end(self, last, best, epoch, results): + # Callback runs on training end, i.e. saving best model + if self.plots: + plot_results(file=self.save_dir / 'results.csv') # save results.png + files = ['results.png', 'confusion_matrix.png', *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))] + files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter + self.logger.info(f"Results saved to {colorstr('bold', self.save_dir)}") + + if self.tb and not self.clearml: # These images are already captured by ClearML by now, we don't want doubles + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log(dict(zip(self.keys[3:10], results))) + self.wandb.log({'Results': [wandb.Image(str(f), caption=f.name) for f in files]}) + # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model + if not self.opt.evolve: + wandb.log_artifact(str(best if best.exists() else last), + type='model', + name=f'run_{self.wandb.wandb_run.id}_model', + aliases=['latest', 'best', 'stripped']) + self.wandb.finish_run() + + if self.clearml and not self.opt.evolve: + self.clearml.task.update_output_model(model_path=str(best if best.exists() else last), + name='Best Model', + auto_delete_file=False) + + if self.comet_logger: + final_results = dict(zip(self.keys[3:10], results)) + self.comet_logger.on_train_end(files, self.save_dir, last, best, epoch, final_results) + + def on_params_update(self, params: dict): + # Update hyperparams or configs of the experiment + if self.wandb: + self.wandb.wandb_run.config.update(params, allow_val_change=True) + if self.comet_logger: + self.comet_logger.on_params_update(params) + + +class GenericLogger: + """ + YOLOv5 General purpose logger for non-task specific logging + Usage: from utils.loggers import GenericLogger; logger = GenericLogger(...) + Arguments + opt: Run arguments + console_logger: Console logger + include: loggers to include + """ + + def __init__(self, opt, console_logger, include=('tb', 'wandb')): + # init default loggers + self.save_dir = Path(opt.save_dir) + self.include = include + self.console_logger = console_logger + self.csv = self.save_dir / 'results.csv' # CSV logger + if 'tb' in self.include: + prefix = colorstr('TensorBoard: ') + self.console_logger.info( + f"{prefix}Start with 'tensorboard --logdir {self.save_dir.parent}', view at http://localhost:6006/") + self.tb = SummaryWriter(str(self.save_dir)) + + if wandb and 'wandb' in self.include: + self.wandb = wandb.init(project=web_project_name(str(opt.project)), + name=None if opt.name == 'exp' else opt.name, + config=opt) + else: + self.wandb = None + + def log_metrics(self, metrics, epoch): + # Log metrics dictionary to all loggers + if self.csv: + keys, vals = list(metrics.keys()), list(metrics.values()) + n = len(metrics) + 1 # number of cols + s = '' if self.csv.exists() else (('%23s,' * n % tuple(['epoch'] + keys)).rstrip(',') + '\n') # header + with open(self.csv, 'a') as f: + f.write(s + ('%23.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + + if self.tb: + for k, v in metrics.items(): + self.tb.add_scalar(k, v, epoch) + + if self.wandb: + self.wandb.log(metrics, step=epoch) + + def log_images(self, files, name='Images', epoch=0): + # Log images to all loggers + files = [Path(f) for f in (files if isinstance(files, (tuple, list)) else [files])] # to Path + files = [f for f in files if f.exists()] # filter by exists + + if self.tb: + for f in files: + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + + if self.wandb: + self.wandb.log({name: [wandb.Image(str(f), caption=f.name) for f in files]}, step=epoch) + + def log_graph(self, model, imgsz=(640, 640)): + # Log model graph to all loggers + if self.tb: + log_tensorboard_graph(self.tb, model, imgsz) + + def log_model(self, model_path, epoch=0, metadata={}): + # Log model to all loggers + if self.wandb: + art = wandb.Artifact(name=f'run_{wandb.run.id}_model', type='model', metadata=metadata) + art.add_file(str(model_path)) + wandb.log_artifact(art) + + def update_params(self, params): + # Update the parameters logged + if self.wandb: + wandb.run.config.update(params, allow_val_change=True) + + +def log_tensorboard_graph(tb, model, imgsz=(640, 640)): + # Log model graph to TensorBoard + try: + p = next(model.parameters()) # for device, type + imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz # expand + im = torch.zeros((1, 3, *imgsz)).to(p.device).type_as(p) # input image (WARNING: must be zeros, not empty) + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress jit trace warning + tb.add_graph(torch.jit.trace(de_parallel(model), im, strict=False), []) + except Exception as e: + LOGGER.warning(f'WARNING ⚠️ TensorBoard graph visualization failure {e}') + + +def web_project_name(project): + # Convert local project name to web project name + if not project.startswith('runs/train'): + return project + suffix = '-Classify' if project.endswith('-cls') else '-Segment' if project.endswith('-seg') else '' + return f'YOLOv5{suffix}' diff --git a/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/README.md b/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/README.md new file mode 100644 index 0000000..ca41c04 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/README.md @@ -0,0 +1,237 @@ +# ClearML Integration + +Clear|MLClear|ML + +## About ClearML + +[ClearML](https://cutt.ly/yolov5-tutorial-clearml) is an [open-source](https://github.com/allegroai/clearml) toolbox designed to save you time ⏱️. + +🔨 Track every YOLOv5 training run in the experiment manager + +🔧 Version and easily access your custom training data with the integrated ClearML Data Versioning Tool + +🔦 Remotely train and monitor your YOLOv5 training runs using ClearML Agent + +🔬 Get the very best mAP using ClearML Hyperparameter Optimization + +🔭 Turn your newly trained YOLOv5 model into an API with just a few commands using ClearML Serving + +
+And so much more. It's up to you how many of these tools you want to use, you can stick to the experiment manager, or chain them all together into an impressive pipeline! +
+
+ +![ClearML scalars dashboard](https://github.com/thepycoder/clearml_screenshots/raw/main/experiment_manager_with_compare.gif) + +
+
+ +## 🦾 Setting Things Up + +To keep track of your experiments and/or data, ClearML needs to communicate to a server. You have 2 options to get one: + +Either sign up for free to the [ClearML Hosted Service](https://cutt.ly/yolov5-tutorial-clearml) or you can set up your own server, see [here](https://clear.ml/docs/latest/docs/deploying_clearml/clearml_server). Even the server is open-source, so even if you're dealing with sensitive data, you should be good to go! + +1. Install the `clearml` python package: + + ```bash + pip install clearml + ``` + +1. Connect the ClearML SDK to the server by [creating credentials](https://app.clear.ml/settings/workspace-configuration) (go right top to Settings -> Workspace -> Create new credentials), then execute the command below and follow the instructions: + + ```bash + clearml-init + ``` + +That's it! You're done 😎 + +
+ +## 🚀 Training YOLOv5 With ClearML + +To enable ClearML experiment tracking, simply install the ClearML pip package. + +```bash +pip install clearml>=1.2.0 +``` + +This will enable integration with the YOLOv5 training script. Every training run from now on, will be captured and stored by the ClearML experiment manager. + +If you want to change the `project_name` or `task_name`, use the `--project` and `--name` arguments of the `train.py` script, by default the project will be called `YOLOv5` and the task `Training`. +PLEASE NOTE: ClearML uses `/` as a delimiter for subprojects, so be careful when using `/` in your project name! + +```bash +python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache +``` + +or with custom project and task name: + +```bash +python train.py --project my_project --name my_training --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache +``` + +This will capture: + +- Source code + uncommitted changes +- Installed packages +- (Hyper)parameters +- Model files (use `--save-period n` to save a checkpoint every n epochs) +- Console output +- Scalars (mAP_0.5, mAP_0.5:0.95, precision, recall, losses, learning rates, ...) +- General info such as machine details, runtime, creation date etc. +- All produced plots such as label correlogram and confusion matrix +- Images with bounding boxes per epoch +- Mosaic per epoch +- Validation images per epoch +- ... + +That's a lot right? 🤯 +Now, we can visualize all of this information in the ClearML UI to get an overview of our training progress. Add custom columns to the table view (such as e.g. mAP_0.5) so you can easily sort on the best performing model. Or select multiple experiments and directly compare them! + +There even more we can do with all of this information, like hyperparameter optimization and remote execution, so keep reading if you want to see how that works! + +
+ +## 🔗 Dataset Version Management + +Versioning your data separately from your code is generally a good idea and makes it easy to acquire the latest version too. This repository supports supplying a dataset version ID, and it will make sure to get the data if it's not there yet. Next to that, this workflow also saves the used dataset ID as part of the task parameters, so you will always know for sure which data was used in which experiment! + +![ClearML Dataset Interface](https://github.com/thepycoder/clearml_screenshots/raw/main/clearml_data.gif) + +### Prepare Your Dataset + +The YOLOv5 repository supports a number of different datasets by using yaml files containing their information. By default datasets are downloaded to the `../datasets` folder in relation to the repository root folder. So if you downloaded the `coco128` dataset using the link in the yaml or with the scripts provided by yolov5, you get this folder structure: + +``` +.. +|_ yolov5 +|_ datasets + |_ coco128 + |_ images + |_ labels + |_ LICENSE + |_ README.txt +``` + +But this can be any dataset you wish. Feel free to use your own, as long as you keep to this folder structure. + +Next, ⚠️**copy the corresponding yaml file to the root of the dataset folder**⚠️. This yaml files contains the information ClearML will need to properly use the dataset. You can make this yourself too, of course, just follow the structure of the example yamls. + +Basically we need the following keys: `path`, `train`, `test`, `val`, `nc`, `names`. + +``` +.. +|_ yolov5 +|_ datasets + |_ coco128 + |_ images + |_ labels + |_ coco128.yaml # <---- HERE! + |_ LICENSE + |_ README.txt +``` + +### Upload Your Dataset + +To get this dataset into ClearML as a versioned dataset, go to the dataset root folder and run the following command: + +```bash +cd coco128 +clearml-data sync --project YOLOv5 --name coco128 --folder . +``` + +The command `clearml-data sync` is actually a shorthand command. You could also run these commands one after the other: + +```bash +# Optionally add --parent if you want to base +# this version on another dataset version, so no duplicate files are uploaded! +clearml-data create --name coco128 --project YOLOv5 +clearml-data add --files . +clearml-data close +``` + +### Run Training Using A ClearML Dataset + +Now that you have a ClearML dataset, you can very simply use it to train custom YOLOv5 🚀 models! + +```bash +python train.py --img 640 --batch 16 --epochs 3 --data clearml:// --weights yolov5s.pt --cache +``` + +
+ +## 👀 Hyperparameter Optimization + +Now that we have our experiments and data versioned, it's time to take a look at what we can build on top! + +Using the code information, installed packages and environment details, the experiment itself is now **completely reproducible**. In fact, ClearML allows you to clone an experiment and even change its parameters. We can then just rerun it with these new parameters automatically, this is basically what HPO does! + +To **run hyperparameter optimization locally**, we've included a pre-made script for you. Just make sure a training task has been run at least once, so it is in the ClearML experiment manager, we will essentially clone it and change its hyperparameters. + +You'll need to fill in the ID of this `template task` in the script found at `utils/loggers/clearml/hpo.py` and then just run it :) You can change `task.execute_locally()` to `task.execute()` to put it in a ClearML queue and have a remote agent work on it instead. + +```bash +# To use optuna, install it first, otherwise you can change the optimizer to just be RandomSearch +pip install optuna +python utils/loggers/clearml/hpo.py +``` + +![HPO](https://github.com/thepycoder/clearml_screenshots/raw/main/hpo.png) + +## 🤯 Remote Execution (advanced) + +Running HPO locally is really handy, but what if we want to run our experiments on a remote machine instead? Maybe you have access to a very powerful GPU machine on-site, or you have some budget to use cloud GPUs. +This is where the ClearML Agent comes into play. Check out what the agent can do here: + +- [YouTube video](https://youtu.be/MX3BrXnaULs) +- [Documentation](https://clear.ml/docs/latest/docs/clearml_agent) + +In short: every experiment tracked by the experiment manager contains enough information to reproduce it on a different machine (installed packages, uncommitted changes etc.). So a ClearML agent does just that: it listens to a queue for incoming tasks and when it finds one, it recreates the environment and runs it while still reporting scalars, plots etc. to the experiment manager. + +You can turn any machine (a cloud VM, a local GPU machine, your own laptop ... ) into a ClearML agent by simply running: + +```bash +clearml-agent daemon --queue [--docker] +``` + +### Cloning, Editing And Enqueuing + +With our agent running, we can give it some work. Remember from the HPO section that we can clone a task and edit the hyperparameters? We can do that from the interface too! + +🪄 Clone the experiment by right-clicking it + +🎯 Edit the hyperparameters to what you wish them to be + +⏳ Enqueue the task to any of the queues by right-clicking it + +![Enqueue a task from the UI](https://github.com/thepycoder/clearml_screenshots/raw/main/enqueue.gif) + +### Executing A Task Remotely + +Now you can clone a task like we explained above, or simply mark your current script by adding `task.execute_remotely()` and on execution it will be put into a queue, for the agent to start working on! + +To run the YOLOv5 training script remotely, all you have to do is add this line to the training.py script after the clearml logger has been instantiated: + +```python +# ... +# Loggers +data_dict = None +if RANK in {-1, 0}: + loggers = Loggers(save_dir, weights, opt, hyp, LOGGER) # loggers instance + if loggers.clearml: + loggers.clearml.task.execute_remotely(queue="my_queue") # <------ ADD THIS LINE + # Data_dict is either None is user did not choose for ClearML dataset or is filled in by ClearML + data_dict = loggers.clearml.data_dict +# ... +``` + +When running the training script after this change, python will run the script up until that line, after which it will package the code and send it to the queue instead! + +### Autoscaling workers + +ClearML comes with autoscalers too! This tool will automatically spin up new remote machines in the cloud of your choice (AWS, GCP, Azure) and turn them into ClearML agents for you whenever there are experiments detected in the queue. Once the tasks are processed, the autoscaler will automatically shut down the remote machines, and you stop paying! + +Check out the autoscalers getting started video below. + +[![Watch the video](https://img.youtube.com/vi/j4XVMAaUt3E/0.jpg)](https://youtu.be/j4XVMAaUt3E) diff --git a/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/__init__.py b/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/clearml_utils.py b/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/clearml_utils.py new file mode 100644 index 0000000..2764abe --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/clearml_utils.py @@ -0,0 +1,164 @@ +"""Main Logger class for ClearML experiment tracking.""" +import glob +import re +from pathlib import Path + +import numpy as np +import yaml + +from utils.plots import Annotator, colors + +try: + import clearml + from clearml import Dataset, Task + + assert hasattr(clearml, '__version__') # verify package import not local dir +except (ImportError, AssertionError): + clearml = None + + +def construct_dataset(clearml_info_string): + """Load in a clearml dataset and fill the internal data_dict with its contents. + """ + dataset_id = clearml_info_string.replace('clearml://', '') + dataset = Dataset.get(dataset_id=dataset_id) + dataset_root_path = Path(dataset.get_local_copy()) + + # We'll search for the yaml file definition in the dataset + yaml_filenames = list(glob.glob(str(dataset_root_path / '*.yaml')) + glob.glob(str(dataset_root_path / '*.yml'))) + if len(yaml_filenames) > 1: + raise ValueError('More than one yaml file was found in the dataset root, cannot determine which one contains ' + 'the dataset definition this way.') + elif len(yaml_filenames) == 0: + raise ValueError('No yaml definition found in dataset root path, check that there is a correct yaml file ' + 'inside the dataset root path.') + with open(yaml_filenames[0]) as f: + dataset_definition = yaml.safe_load(f) + + assert set(dataset_definition.keys()).issuperset( + {'train', 'test', 'val', 'nc', 'names'} + ), "The right keys were not found in the yaml file, make sure it at least has the following keys: ('train', 'test', 'val', 'nc', 'names')" + + data_dict = dict() + data_dict['train'] = str( + (dataset_root_path / dataset_definition['train']).resolve()) if dataset_definition['train'] else None + data_dict['test'] = str( + (dataset_root_path / dataset_definition['test']).resolve()) if dataset_definition['test'] else None + data_dict['val'] = str( + (dataset_root_path / dataset_definition['val']).resolve()) if dataset_definition['val'] else None + data_dict['nc'] = dataset_definition['nc'] + data_dict['names'] = dataset_definition['names'] + + return data_dict + + +class ClearmlLogger: + """Log training runs, datasets, models, and predictions to ClearML. + + This logger sends information to ClearML at app.clear.ml or to your own hosted server. By default, + this information includes hyperparameters, system configuration and metrics, model metrics, code information and + basic data metrics and analyses. + + By providing additional command line arguments to train.py, datasets, + models and predictions can also be logged. + """ + + def __init__(self, opt, hyp): + """ + - Initialize ClearML Task, this object will capture the experiment + - Upload dataset version to ClearML Data if opt.upload_dataset is True + + arguments: + opt (namespace) -- Commandline arguments for this run + hyp (dict) -- Hyperparameters for this run + + """ + self.current_epoch = 0 + # Keep tracked of amount of logged images to enforce a limit + self.current_epoch_logged_images = set() + # Maximum number of images to log to clearML per epoch + self.max_imgs_to_log_per_epoch = 16 + # Get the interval of epochs when bounding box images should be logged + self.bbox_interval = opt.bbox_interval + self.clearml = clearml + self.task = None + self.data_dict = None + if self.clearml: + self.task = Task.init( + project_name=opt.project if opt.project != 'runs/train' else 'YOLOv5', + task_name=opt.name if opt.name != 'exp' else 'Training', + tags=['YOLOv5'], + output_uri=True, + reuse_last_task_id=opt.exist_ok, + auto_connect_frameworks={'pytorch': False} + # We disconnect pytorch auto-detection, because we added manual model save points in the code + ) + # ClearML's hooks will already grab all general parameters + # Only the hyperparameters coming from the yaml config file + # will have to be added manually! + self.task.connect(hyp, name='Hyperparameters') + self.task.connect(opt, name='Args') + + # Make sure the code is easily remotely runnable by setting the docker image to use by the remote agent + self.task.set_base_docker('ultralytics/yolov5:latest', + docker_arguments='--ipc=host -e="CLEARML_AGENT_SKIP_PYTHON_ENV_INSTALL=1"', + docker_setup_bash_script='pip install clearml') + + # Get ClearML Dataset Version if requested + if opt.data.startswith('clearml://'): + # data_dict should have the following keys: + # names, nc (number of classes), test, train, val (all three relative paths to ../datasets) + self.data_dict = construct_dataset(opt.data) + # Set data to data_dict because wandb will crash without this information and opt is the best way + # to give it to them + opt.data = self.data_dict + + def log_debug_samples(self, files, title='Debug Samples'): + """ + Log files (images) as debug samples in the ClearML task. + + arguments: + files (List(PosixPath)) a list of file paths in PosixPath format + title (str) A title that groups together images with the same values + """ + for f in files: + if f.exists(): + it = re.search(r'_batch(\d+)', f.name) + iteration = int(it.groups()[0]) if it else 0 + self.task.get_logger().report_image(title=title, + series=f.name.replace(it.group(), ''), + local_path=str(f), + iteration=iteration) + + def log_image_with_boxes(self, image_path, boxes, class_names, image, conf_threshold=0.25): + """ + Draw the bounding boxes on a single image and report the result as a ClearML debug sample. + + arguments: + image_path (PosixPath) the path the original image file + boxes (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] + class_names (dict): dict containing mapping of class int to class name + image (Tensor): A torch tensor containing the actual image data + """ + if len(self.current_epoch_logged_images) < self.max_imgs_to_log_per_epoch and self.current_epoch >= 0: + # Log every bbox_interval times and deduplicate for any intermittend extra eval runs + if self.current_epoch % self.bbox_interval == 0 and image_path not in self.current_epoch_logged_images: + im = np.ascontiguousarray(np.moveaxis(image.mul(255).clamp(0, 255).byte().cpu().numpy(), 0, 2)) + annotator = Annotator(im=im, pil=True) + for i, (conf, class_nr, box) in enumerate(zip(boxes[:, 4], boxes[:, 5], boxes[:, :4])): + color = colors(i) + + class_name = class_names[int(class_nr)] + confidence_percentage = round(float(conf) * 100, 2) + label = f'{class_name}: {confidence_percentage}%' + + if conf > conf_threshold: + annotator.rectangle(box.cpu().numpy(), outline=color) + annotator.box_label(box.cpu().numpy(), label=label, color=color) + + annotated_image = annotator.result() + self.task.get_logger().report_image(title='Bounding Boxes', + series=image_path.name, + iteration=self.current_epoch, + image=annotated_image) + self.current_epoch_logged_images.add(image_path) diff --git a/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/hpo.py b/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/hpo.py new file mode 100644 index 0000000..ee518b0 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/loggers/clearml/hpo.py @@ -0,0 +1,84 @@ +from clearml import Task +# Connecting ClearML with the current process, +# from here on everything is logged automatically +from clearml.automation import HyperParameterOptimizer, UniformParameterRange +from clearml.automation.optuna import OptimizerOptuna + +task = Task.init(project_name='Hyper-Parameter Optimization', + task_name='YOLOv5', + task_type=Task.TaskTypes.optimizer, + reuse_last_task_id=False) + +# Example use case: +optimizer = HyperParameterOptimizer( + # This is the experiment we want to optimize + base_task_id='', + # here we define the hyper-parameters to optimize + # Notice: The parameter name should exactly match what you see in the UI: / + # For Example, here we see in the base experiment a section Named: "General" + # under it a parameter named "batch_size", this becomes "General/batch_size" + # If you have `argparse` for example, then arguments will appear under the "Args" section, + # and you should instead pass "Args/batch_size" + hyper_parameters=[ + UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), + UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), + UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), + UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), + UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), + UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), + UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), + UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), + UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), + UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), + UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), + UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), + UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), + UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), + UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), + UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), + UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), + UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), + UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), + UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), + UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), + UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], + # this is the objective metric we want to maximize/minimize + objective_metric_title='metrics', + objective_metric_series='mAP_0.5', + # now we decide if we want to maximize it or minimize it (accuracy we maximize) + objective_metric_sign='max', + # let us limit the number of concurrent experiments, + # this in turn will make sure we do dont bombard the scheduler with experiments. + # if we have an auto-scaler connected, this, by proxy, will limit the number of machine + max_number_of_concurrent_tasks=1, + # this is the optimizer class (actually doing the optimization) + # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) + optimizer_class=OptimizerOptuna, + # If specified only the top K performing Tasks will be kept, the others will be automatically archived + save_top_k_tasks_only=5, # 5, + compute_time_limit=None, + total_max_jobs=20, + min_iteration_per_job=None, + max_iteration_per_job=None, +) + +# report every 10 seconds, this is way too often, but we are testing here +optimizer.set_report_period(10 / 60) +# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent +# an_optimizer.start_locally(job_complete_callback=job_complete_callback) +# set the time limit for the optimization process (2 hours) +optimizer.set_time_limit(in_minutes=120.0) +# Start the optimization process in the local environment +optimizer.start_locally() +# wait until process is done (notice we are controlling the optimization process in the background) +optimizer.wait() +# make sure background optimization stopped +optimizer.stop() + +print('We are done, good bye') diff --git a/src/train_utils/train_models/models/yolov5/utils/loggers/comet/README.md b/src/train_utils/train_models/models/yolov5/utils/loggers/comet/README.md new file mode 100644 index 0000000..47e6a45 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/loggers/comet/README.md @@ -0,0 +1,258 @@ + + +# YOLOv5 with Comet + +This guide will cover how to use YOLOv5 with [Comet](https://bit.ly/yolov5-readme-comet2) + +# About Comet + +Comet builds tools that help data scientists, engineers, and team leaders accelerate and optimize machine learning and deep learning models. + +Track and visualize model metrics in real time, save your hyperparameters, datasets, and model checkpoints, and visualize your model predictions with [Comet Custom Panels](https://www.comet.com/docs/v2/guides/comet-dashboard/code-panels/about-panels/?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github)! +Comet makes sure you never lose track of your work and makes it easy to share results and collaborate across teams of all sizes! + +# Getting Started + +## Install Comet + +```shell +pip install comet_ml +``` + +## Configure Comet Credentials + +There are two ways to configure Comet with YOLOv5. + +You can either set your credentials through environment variables + +**Environment Variables** + +```shell +export COMET_API_KEY= +export COMET_PROJECT_NAME= # This will default to 'yolov5' +``` + +Or create a `.comet.config` file in your working directory and set your credentials there. + +**Comet Configuration File** + +``` +[comet] +api_key= +project_name= # This will default to 'yolov5' +``` + +## Run the Training Script + +```shell +# Train YOLOv5s on COCO128 for 5 epochs +python train.py --img 640 --batch 16 --epochs 5 --data coco128.yaml --weights yolov5s.pt +``` + +That's it! Comet will automatically log your hyperparameters, command line arguments, training and validation metrics. You can visualize and analyze your runs in the Comet UI + +yolo-ui + +# Try out an Example! + +Check out an example of a [completed run here](https://www.comet.com/examples/comet-example-yolov5/a0e29e0e9b984e4a822db2a62d0cb357?experiment-tab=chart&showOutliers=true&smoothing=0&transformY=smoothing&xAxis=step&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) + +Or better yet, try it out yourself in this Colab Notebook + +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1RG0WOQyxlDlo5Km8GogJpIEJlg_5lyYO?usp=sharing) + +# Log automatically + +By default, Comet will log the following items + +## Metrics + +- Box Loss, Object Loss, Classification Loss for the training and validation data +- mAP_0.5, mAP_0.5:0.95 metrics for the validation data. +- Precision and Recall for the validation data + +## Parameters + +- Model Hyperparameters +- All parameters passed through the command line options + +## Visualizations + +- Confusion Matrix of the model predictions on the validation data +- Plots for the PR and F1 curves across all classes +- Correlogram of the Class Labels + +# Configure Comet Logging + +Comet can be configured to log additional data either through command line flags passed to the training script +or through environment variables. + +```shell +export COMET_MODE=online # Set whether to run Comet in 'online' or 'offline' mode. Defaults to online +export COMET_MODEL_NAME= #Set the name for the saved model. Defaults to yolov5 +export COMET_LOG_CONFUSION_MATRIX=false # Set to disable logging a Comet Confusion Matrix. Defaults to true +export COMET_MAX_IMAGE_UPLOADS= # Controls how many total image predictions to log to Comet. Defaults to 100. +export COMET_LOG_PER_CLASS_METRICS=true # Set to log evaluation metrics for each detected class at the end of training. Defaults to false +export COMET_DEFAULT_CHECKPOINT_FILENAME= # Set this if you would like to resume training from a different checkpoint. Defaults to 'last.pt' +export COMET_LOG_BATCH_LEVEL_METRICS=true # Set this if you would like to log training metrics at the batch level. Defaults to false. +export COMET_LOG_PREDICTIONS=true # Set this to false to disable logging model predictions +``` + +## Logging Checkpoints with Comet + +Logging Models to Comet is disabled by default. To enable it, pass the `save-period` argument to the training script. This will save the +logged checkpoints to Comet based on the interval value provided by `save-period` + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--save-period 1 +``` + +## Logging Model Predictions + +By default, model predictions (images, ground truth labels and bounding boxes) will be logged to Comet. + +You can control the frequency of logged predictions and the associated images by passing the `bbox_interval` command line argument. Predictions can be visualized using Comet's Object Detection Custom Panel. This frequency corresponds to every Nth batch of data per epoch. In the example below, we are logging every 2nd batch of data for each epoch. + +**Note:** The YOLOv5 validation dataloader will default to a batch size of 32, so you will have to set the logging frequency accordingly. + +Here is an [example project using the Panel](https://www.comet.com/examples/comet-example-yolov5?shareable=YcwMiJaZSXfcEXpGOHDD12vA1&utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 2 +``` + +### Controlling the number of Prediction Images logged to Comet + +When logging predictions from YOLOv5, Comet will log the images associated with each set of predictions. By default a maximum of 100 validation images are logged. You can increase or decrease this number using the `COMET_MAX_IMAGE_UPLOADS` environment variable. + +```shell +env COMET_MAX_IMAGE_UPLOADS=200 python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--bbox_interval 1 +``` + +### Logging Class Level Metrics + +Use the `COMET_LOG_PER_CLASS_METRICS` environment variable to log mAP, precision, recall, f1 for each class. + +```shell +env COMET_LOG_PER_CLASS_METRICS=true python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt +``` + +## Uploading a Dataset to Comet Artifacts + +If you would like to store your data using [Comet Artifacts](https://www.comet.com/docs/v2/guides/data-management/using-artifacts/#learn-more?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github), you can do so using the `upload_dataset` flag. + +The dataset be organized in the way described in the [YOLOv5 documentation](https://docs.ultralytics.com/tutorials/train-custom-datasets/#3-organize-directories). The dataset config `yaml` file must follow the same format as that of the `coco128.yaml` file. + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data coco128.yaml \ +--weights yolov5s.pt \ +--upload_dataset +``` + +You can find the uploaded dataset in the Artifacts tab in your Comet Workspace +artifact-1 + +You can preview the data directly in the Comet UI. +artifact-2 + +Artifacts are versioned and also support adding metadata about the dataset. Comet will automatically log the metadata from your dataset `yaml` file +artifact-3 + +### Using a saved Artifact + +If you would like to use a dataset from Comet Artifacts, set the `path` variable in your dataset `yaml` file to point to the following Artifact resource URL. + +``` +# contents of artifact.yaml file +path: "comet:///:" +``` + +Then pass this file to your training script in the following way + +```shell +python train.py \ +--img 640 \ +--batch 16 \ +--epochs 5 \ +--data artifact.yaml \ +--weights yolov5s.pt +``` + +Artifacts also allow you to track the lineage of data as it flows through your Experimentation workflow. Here you can see a graph that shows you all the experiments that have used your uploaded dataset. +artifact-4 + +## Resuming a Training Run + +If your training run is interrupted for any reason, e.g. disrupted internet connection, you can resume the run using the `resume` flag and the Comet Run Path. + +The Run Path has the following format `comet:////`. + +This will restore the run to its state before the interruption, which includes restoring the model from a checkpoint, restoring all hyperparameters and training arguments and downloading Comet dataset Artifacts if they were used in the original run. The resumed run will continue logging to the existing Experiment in the Comet UI + +```shell +python train.py \ +--resume "comet://" +``` + +## Hyperparameter Search with the Comet Optimizer + +YOLOv5 is also integrated with Comet's Optimizer, making is simple to visualize hyperparameter sweeps in the Comet UI. + +### Configuring an Optimizer Sweep + +To configure the Comet Optimizer, you will have to create a JSON file with the information about the sweep. An example file has been provided in `utils/loggers/comet/optimizer_config.json` + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" +``` + +The `hpo.py` script accepts the same arguments as `train.py`. If you wish to pass additional arguments to your sweep simply add them after +the script. + +```shell +python utils/loggers/comet/hpo.py \ + --comet_optimizer_config "utils/loggers/comet/optimizer_config.json" \ + --save-period 1 \ + --bbox_interval 1 +``` + +### Running a Sweep in Parallel + +```shell +comet optimizer -j utils/loggers/comet/hpo.py \ + utils/loggers/comet/optimizer_config.json" +``` + +### Visualizing Results + +Comet provides a number of ways to visualize the results of your sweep. Take a look at a [project with a completed sweep here](https://www.comet.com/examples/comet-example-yolov5/view/PrlArHGuuhDTKC1UuBmTtOSXD/panels?utm_source=yolov5&utm_medium=partner&utm_campaign=partner_yolov5_2022&utm_content=github) + +hyperparameter-yolo diff --git a/src/train_utils/train_models/models/yolov5/utils/loggers/comet/__init__.py b/src/train_utils/train_models/models/yolov5/utils/loggers/comet/__init__.py new file mode 100644 index 0000000..d459984 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/loggers/comet/__init__.py @@ -0,0 +1,508 @@ +import glob +import json +import logging +import os +import sys +from pathlib import Path + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +try: + import comet_ml + + # Project Configuration + config = comet_ml.config.get_config() + COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') +except (ModuleNotFoundError, ImportError): + comet_ml = None + COMET_PROJECT_NAME = None + +import PIL +import torch +import torchvision.transforms as T +import yaml + +from utils.dataloaders import img2label_paths +from utils.general import check_dataset, scale_boxes, xywh2xyxy +from utils.metrics import box_iou + +COMET_PREFIX = 'comet://' + +COMET_MODE = os.getenv('COMET_MODE', 'online') + +# Model Saving Settings +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') + +# Dataset Artifact Settings +COMET_UPLOAD_DATASET = os.getenv('COMET_UPLOAD_DATASET', 'false').lower() == 'true' + +# Evaluation Settings +COMET_LOG_CONFUSION_MATRIX = os.getenv('COMET_LOG_CONFUSION_MATRIX', 'true').lower() == 'true' +COMET_LOG_PREDICTIONS = os.getenv('COMET_LOG_PREDICTIONS', 'true').lower() == 'true' +COMET_MAX_IMAGE_UPLOADS = int(os.getenv('COMET_MAX_IMAGE_UPLOADS', 100)) + +# Confusion Matrix Settings +CONF_THRES = float(os.getenv('CONF_THRES', 0.001)) +IOU_THRES = float(os.getenv('IOU_THRES', 0.6)) + +# Batch Logging Settings +COMET_LOG_BATCH_METRICS = os.getenv('COMET_LOG_BATCH_METRICS', 'false').lower() == 'true' +COMET_BATCH_LOGGING_INTERVAL = os.getenv('COMET_BATCH_LOGGING_INTERVAL', 1) +COMET_PREDICTION_LOGGING_INTERVAL = os.getenv('COMET_PREDICTION_LOGGING_INTERVAL', 1) +COMET_LOG_PER_CLASS_METRICS = os.getenv('COMET_LOG_PER_CLASS_METRICS', 'false').lower() == 'true' + +RANK = int(os.getenv('RANK', -1)) + +to_pil = T.ToPILImage() + + +class CometLogger: + """Log metrics, parameters, source code, models and much more + with Comet + """ + + def __init__(self, opt, hyp, run_id=None, job_type='Training', **experiment_kwargs) -> None: + self.job_type = job_type + self.opt = opt + self.hyp = hyp + + # Comet Flags + self.comet_mode = COMET_MODE + + self.save_model = opt.save_period > -1 + self.model_name = COMET_MODEL_NAME + + # Batch Logging Settings + self.log_batch_metrics = COMET_LOG_BATCH_METRICS + self.comet_log_batch_interval = COMET_BATCH_LOGGING_INTERVAL + + # Dataset Artifact Settings + self.upload_dataset = self.opt.upload_dataset if self.opt.upload_dataset else COMET_UPLOAD_DATASET + self.resume = self.opt.resume + + # Default parameters to pass to Experiment objects + self.default_experiment_kwargs = { + 'log_code': False, + 'log_env_gpu': True, + 'log_env_cpu': True, + 'project_name': COMET_PROJECT_NAME,} + self.default_experiment_kwargs.update(experiment_kwargs) + self.experiment = self._get_experiment(self.comet_mode, run_id) + + self.data_dict = self.check_dataset(self.opt.data) + self.class_names = self.data_dict['names'] + self.num_classes = self.data_dict['nc'] + + self.logged_images_count = 0 + self.max_images = COMET_MAX_IMAGE_UPLOADS + + if run_id is None: + self.experiment.log_other('Created from', 'YOLOv5') + if not isinstance(self.experiment, comet_ml.OfflineExperiment): + workspace, project_name, experiment_id = self.experiment.url.split('/')[-3:] + self.experiment.log_other( + 'Run Path', + f'{workspace}/{project_name}/{experiment_id}', + ) + self.log_parameters(vars(opt)) + self.log_parameters(self.opt.hyp) + self.log_asset_data( + self.opt.hyp, + name='hyperparameters.json', + metadata={'type': 'hyp-config-file'}, + ) + self.log_asset( + f'{self.opt.save_dir}/opt.yaml', + metadata={'type': 'opt-config-file'}, + ) + + self.comet_log_confusion_matrix = COMET_LOG_CONFUSION_MATRIX + + if hasattr(self.opt, 'conf_thres'): + self.conf_thres = self.opt.conf_thres + else: + self.conf_thres = CONF_THRES + if hasattr(self.opt, 'iou_thres'): + self.iou_thres = self.opt.iou_thres + else: + self.iou_thres = IOU_THRES + + self.log_parameters({'val_iou_threshold': self.iou_thres, 'val_conf_threshold': self.conf_thres}) + + self.comet_log_predictions = COMET_LOG_PREDICTIONS + if self.opt.bbox_interval == -1: + self.comet_log_prediction_interval = 1 if self.opt.epochs < 10 else self.opt.epochs // 10 + else: + self.comet_log_prediction_interval = self.opt.bbox_interval + + if self.comet_log_predictions: + self.metadata_dict = {} + self.logged_image_names = [] + + self.comet_log_per_class_metrics = COMET_LOG_PER_CLASS_METRICS + + self.experiment.log_others({ + 'comet_mode': COMET_MODE, + 'comet_max_image_uploads': COMET_MAX_IMAGE_UPLOADS, + 'comet_log_per_class_metrics': COMET_LOG_PER_CLASS_METRICS, + 'comet_log_batch_metrics': COMET_LOG_BATCH_METRICS, + 'comet_log_confusion_matrix': COMET_LOG_CONFUSION_MATRIX, + 'comet_model_name': COMET_MODEL_NAME,}) + + # Check if running the Experiment with the Comet Optimizer + if hasattr(self.opt, 'comet_optimizer_id'): + self.experiment.log_other('optimizer_id', self.opt.comet_optimizer_id) + self.experiment.log_other('optimizer_objective', self.opt.comet_optimizer_objective) + self.experiment.log_other('optimizer_metric', self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_parameters', json.dumps(self.hyp)) + + def _get_experiment(self, mode, experiment_id=None): + if mode == 'offline': + if experiment_id is not None: + return comet_ml.ExistingOfflineExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.OfflineExperiment(**self.default_experiment_kwargs,) + + else: + try: + if experiment_id is not None: + return comet_ml.ExistingExperiment( + previous_experiment=experiment_id, + **self.default_experiment_kwargs, + ) + + return comet_ml.Experiment(**self.default_experiment_kwargs) + + except ValueError: + logger.warning('COMET WARNING: ' + 'Comet credentials have not been set. ' + 'Comet will default to offline logging. ' + 'Please set your credentials to enable online logging.') + return self._get_experiment('offline', experiment_id) + + return + + def log_metrics(self, log_dict, **kwargs): + self.experiment.log_metrics(log_dict, **kwargs) + + def log_parameters(self, log_dict, **kwargs): + self.experiment.log_parameters(log_dict, **kwargs) + + def log_asset(self, asset_path, **kwargs): + self.experiment.log_asset(asset_path, **kwargs) + + def log_asset_data(self, asset, **kwargs): + self.experiment.log_asset_data(asset, **kwargs) + + def log_image(self, img, **kwargs): + self.experiment.log_image(img, **kwargs) + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + if not self.save_model: + return + + model_metadata = { + 'fitness_score': fitness_score[-1], + 'epochs_trained': epoch + 1, + 'save_period': opt.save_period, + 'total_epochs': opt.epochs,} + + model_files = glob.glob(f'{path}/*.pt') + for model_path in model_files: + name = Path(model_path).name + + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + metadata=model_metadata, + overwrite=True, + ) + + def check_dataset(self, data_file): + with open(data_file) as f: + data_config = yaml.safe_load(f) + + if data_config['path'].startswith(COMET_PREFIX): + path = data_config['path'].replace(COMET_PREFIX, '') + data_dict = self.download_dataset_artifact(path) + + return data_dict + + self.log_asset(self.opt.data, metadata={'type': 'data-config-file'}) + + return check_dataset(data_file) + + def log_predictions(self, image, labelsn, path, shape, predn): + if self.logged_images_count >= self.max_images: + return + detections = predn[predn[:, 4] > self.conf_thres] + iou = box_iou(labelsn[:, 1:], detections[:, :4]) + mask, _ = torch.where(iou > self.iou_thres) + if len(mask) == 0: + return + + filtered_detections = detections[mask] + filtered_labels = labelsn[mask] + + image_id = path.split('/')[-1].split('.')[0] + image_name = f'{image_id}_curr_epoch_{self.experiment.curr_epoch}' + if image_name not in self.logged_image_names: + native_scale_image = PIL.Image.open(path) + self.log_image(native_scale_image, name=image_name) + self.logged_image_names.append(image_name) + + metadata = [] + for cls, *xyxy in filtered_labels.tolist(): + metadata.append({ + 'label': f'{self.class_names[int(cls)]}-gt', + 'score': 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) + for *xyxy, conf, cls in filtered_detections.tolist(): + metadata.append({ + 'label': f'{self.class_names[int(cls)]}', + 'score': conf * 100, + 'box': { + 'x': xyxy[0], + 'y': xyxy[1], + 'x2': xyxy[2], + 'y2': xyxy[3]},}) + + self.metadata_dict[image_name] = metadata + self.logged_images_count += 1 + + return + + def preprocess_prediction(self, image, labels, shape, pred): + nl, _ = labels.shape[0], pred.shape[0] + + # Predictions + if self.opt.single_cls: + pred[:, 5] = 0 + + predn = pred.clone() + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) + + labelsn = None + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(image.shape[1:], tbox, shape[0], shape[1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + scale_boxes(image.shape[1:], predn[:, :4], shape[0], shape[1]) # native-space pred + + return predn, labelsn + + def add_assets_to_artifact(self, artifact, path, asset_path, split): + img_paths = sorted(glob.glob(f'{asset_path}/*')) + label_paths = img2label_paths(img_paths) + + for image_file, label_file in zip(img_paths, label_paths): + image_logical_path, label_logical_path = map(lambda x: os.path.relpath(x, path), [image_file, label_file]) + + try: + artifact.add(image_file, logical_path=image_logical_path, metadata={'split': split}) + artifact.add(label_file, logical_path=label_logical_path, metadata={'split': split}) + except ValueError as e: + logger.error('COMET ERROR: Error adding file to Artifact. Skipping file.') + logger.error(f'COMET ERROR: {e}') + continue + + return artifact + + def upload_dataset_artifact(self): + dataset_name = self.data_dict.get('dataset_name', 'yolov5-dataset') + path = str((ROOT / Path(self.data_dict['path'])).resolve()) + + metadata = self.data_dict.copy() + for key in ['train', 'val', 'test']: + split_path = metadata.get(key) + if split_path is not None: + metadata[key] = split_path.replace(path, '') + + artifact = comet_ml.Artifact(name=dataset_name, artifact_type='dataset', metadata=metadata) + for key in metadata.keys(): + if key in ['train', 'val', 'test']: + if isinstance(self.upload_dataset, str) and (key != self.upload_dataset): + continue + + asset_path = self.data_dict.get(key) + if asset_path is not None: + artifact = self.add_assets_to_artifact(artifact, path, asset_path, key) + + self.experiment.log_artifact(artifact) + + return + + def download_dataset_artifact(self, artifact_path): + logged_artifact = self.experiment.get_artifact(artifact_path) + artifact_save_dir = str(Path(self.opt.save_dir) / logged_artifact.name) + logged_artifact.download(artifact_save_dir) + + metadata = logged_artifact.metadata + data_dict = metadata.copy() + data_dict['path'] = artifact_save_dir + + metadata_names = metadata.get('names') + if type(metadata_names) == dict: + data_dict['names'] = {int(k): v for k, v in metadata.get('names').items()} + elif type(metadata_names) == list: + data_dict['names'] = {int(k): v for k, v in zip(range(len(metadata_names)), metadata_names)} + else: + raise "Invalid 'names' field in dataset yaml file. Please use a list or dictionary" + + data_dict = self.update_data_paths(data_dict) + return data_dict + + def update_data_paths(self, data_dict): + path = data_dict.get('path', '') + + for split in ['train', 'val', 'test']: + if data_dict.get(split): + split_path = data_dict.get(split) + data_dict[split] = (f'{path}/{split_path}' if isinstance(split, str) else [ + f'{path}/{x}' for x in split_path]) + + return data_dict + + def on_pretrain_routine_end(self, paths): + if self.opt.resume: + return + + for path in paths: + self.log_asset(str(path)) + + if self.upload_dataset: + if not self.resume: + self.upload_dataset_artifact() + + return + + def on_train_start(self): + self.log_parameters(self.hyp) + + def on_train_epoch_start(self): + return + + def on_train_epoch_end(self, epoch): + self.experiment.curr_epoch = epoch + + return + + def on_train_batch_start(self): + return + + def on_train_batch_end(self, log_dict, step): + self.experiment.curr_step = step + if self.log_batch_metrics and (step % self.comet_log_batch_interval == 0): + self.log_metrics(log_dict, step=step) + + return + + def on_train_end(self, files, save_dir, last, best, epoch, results): + if self.comet_log_predictions: + curr_epoch = self.experiment.curr_epoch + self.experiment.log_asset_data(self.metadata_dict, 'image-metadata.json', epoch=curr_epoch) + + for f in files: + self.log_asset(f, metadata={'epoch': epoch}) + self.log_asset(f'{save_dir}/results.csv', metadata={'epoch': epoch}) + + if not self.opt.evolve: + model_path = str(best if best.exists() else last) + name = Path(model_path).name + if self.save_model: + self.experiment.log_model( + self.model_name, + file_or_folder=model_path, + file_name=name, + overwrite=True, + ) + + # Check if running Experiment with Comet Optimizer + if hasattr(self.opt, 'comet_optimizer_id'): + metric = results.get(self.opt.comet_optimizer_metric) + self.experiment.log_other('optimizer_metric_value', metric) + + self.finish_run() + + def on_val_start(self): + return + + def on_val_batch_start(self): + return + + def on_val_batch_end(self, batch_i, images, targets, paths, shapes, outputs): + if not (self.comet_log_predictions and ((batch_i + 1) % self.comet_log_prediction_interval == 0)): + return + + for si, pred in enumerate(outputs): + if len(pred) == 0: + continue + + image = images[si] + labels = targets[targets[:, 0] == si, 1:] + shape = shapes[si] + path = paths[si] + predn, labelsn = self.preprocess_prediction(image, labels, shape, pred) + if labelsn is not None: + self.log_predictions(image, labelsn, path, shape, predn) + + return + + def on_val_end(self, nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix): + if self.comet_log_per_class_metrics: + if self.num_classes > 1: + for i, c in enumerate(ap_class): + class_name = self.class_names[c] + self.experiment.log_metrics( + { + 'mAP@.5': ap50[i], + 'mAP@.5:.95': ap[i], + 'precision': p[i], + 'recall': r[i], + 'f1': f1[i], + 'true_positives': tp[i], + 'false_positives': fp[i], + 'support': nt[c]}, + prefix=class_name) + + if self.comet_log_confusion_matrix: + epoch = self.experiment.curr_epoch + class_names = list(self.class_names.values()) + class_names.append('background') + num_classes = len(class_names) + + self.experiment.log_confusion_matrix( + matrix=confusion_matrix.matrix, + max_categories=num_classes, + labels=class_names, + epoch=epoch, + column_label='Actual Category', + row_label='Predicted Category', + file_name=f'confusion-matrix-epoch-{epoch}.json', + ) + + def on_fit_epoch_end(self, result, epoch): + self.log_metrics(result, epoch=epoch) + + def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): + if ((epoch + 1) % self.opt.save_period == 0 and not final_epoch) and self.opt.save_period != -1: + self.log_model(last.parent, self.opt, epoch, fi, best_model=best_fitness == fi) + + def on_params_update(self, params): + self.log_parameters(params) + + def finish_run(self): + self.experiment.end() diff --git a/src/train_utils/train_models/models/yolov5/utils/loggers/comet/comet_utils.py b/src/train_utils/train_models/models/yolov5/utils/loggers/comet/comet_utils.py new file mode 100644 index 0000000..2760076 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/loggers/comet/comet_utils.py @@ -0,0 +1,150 @@ +import logging +import os +from urllib.parse import urlparse + +try: + import comet_ml +except (ModuleNotFoundError, ImportError): + comet_ml = None + +import yaml + +logger = logging.getLogger(__name__) + +COMET_PREFIX = 'comet://' +COMET_MODEL_NAME = os.getenv('COMET_MODEL_NAME', 'yolov5') +COMET_DEFAULT_CHECKPOINT_FILENAME = os.getenv('COMET_DEFAULT_CHECKPOINT_FILENAME', 'last.pt') + + +def download_model_checkpoint(opt, experiment): + model_dir = f'{opt.project}/{experiment.name}' + os.makedirs(model_dir, exist_ok=True) + + model_name = COMET_MODEL_NAME + model_asset_list = experiment.get_model_asset_list(model_name) + + if len(model_asset_list) == 0: + logger.error(f'COMET ERROR: No checkpoints found for model name : {model_name}') + return + + model_asset_list = sorted( + model_asset_list, + key=lambda x: x['step'], + reverse=True, + ) + logged_checkpoint_map = {asset['fileName']: asset['assetId'] for asset in model_asset_list} + + resource_url = urlparse(opt.weights) + checkpoint_filename = resource_url.query + + if checkpoint_filename: + asset_id = logged_checkpoint_map.get(checkpoint_filename) + else: + asset_id = logged_checkpoint_map.get(COMET_DEFAULT_CHECKPOINT_FILENAME) + checkpoint_filename = COMET_DEFAULT_CHECKPOINT_FILENAME + + if asset_id is None: + logger.error(f'COMET ERROR: Checkpoint {checkpoint_filename} not found in the given Experiment') + return + + try: + logger.info(f'COMET INFO: Downloading checkpoint {checkpoint_filename}') + asset_filename = checkpoint_filename + + model_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) + model_download_path = f'{model_dir}/{asset_filename}' + with open(model_download_path, 'wb') as f: + f.write(model_binary) + + opt.weights = model_download_path + + except Exception as e: + logger.warning('COMET WARNING: Unable to download checkpoint from Comet') + logger.exception(e) + + +def set_opt_parameters(opt, experiment): + """Update the opts Namespace with parameters + from Comet's ExistingExperiment when resuming a run + + Args: + opt (argparse.Namespace): Namespace of command line options + experiment (comet_ml.APIExperiment): Comet API Experiment object + """ + asset_list = experiment.get_asset_list() + resume_string = opt.resume + + for asset in asset_list: + if asset['fileName'] == 'opt.yaml': + asset_id = asset['assetId'] + asset_binary = experiment.get_asset(asset_id, return_type='binary', stream=False) + opt_dict = yaml.safe_load(asset_binary) + for key, value in opt_dict.items(): + setattr(opt, key, value) + opt.resume = resume_string + + # Save hyperparameters to YAML file + # Necessary to pass checks in training script + save_dir = f'{opt.project}/{experiment.name}' + os.makedirs(save_dir, exist_ok=True) + + hyp_yaml_path = f'{save_dir}/hyp.yaml' + with open(hyp_yaml_path, 'w') as f: + yaml.dump(opt.hyp, f) + opt.hyp = hyp_yaml_path + + +def check_comet_weights(opt): + """Downloads model weights from Comet and updates the + weights path to point to saved weights location + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if weights are successfully downloaded + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.weights, str): + if opt.weights.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.weights) + experiment_path = f'{resource.netloc}{resource.path}' + experiment = api.get(experiment_path) + download_model_checkpoint(opt, experiment) + return True + + return None + + +def check_comet_resume(opt): + """Restores run parameters to its original state based on the model checkpoint + and logged Experiment parameters. + + Args: + opt (argparse.Namespace): Command Line arguments passed + to YOLOv5 training script + + Returns: + None/bool: Return True if the run is restored successfully + else return None + """ + if comet_ml is None: + return + + if isinstance(opt.resume, str): + if opt.resume.startswith(COMET_PREFIX): + api = comet_ml.API() + resource = urlparse(opt.resume) + experiment_path = f'{resource.netloc}{resource.path}' + experiment = api.get(experiment_path) + set_opt_parameters(opt, experiment) + download_model_checkpoint(opt, experiment) + + return True + + return None diff --git a/src/train_utils/train_models/models/yolov5/utils/loggers/comet/hpo.py b/src/train_utils/train_models/models/yolov5/utils/loggers/comet/hpo.py new file mode 100644 index 0000000..fc49115 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/loggers/comet/hpo.py @@ -0,0 +1,118 @@ +import argparse +import json +import logging +import os +import sys +from pathlib import Path + +import comet_ml + +logger = logging.getLogger(__name__) + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[3] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH + +from train import train +from utils.callbacks import Callbacks +from utils.general import increment_path +from utils.torch_utils import select_device + +# Project Configuration +config = comet_ml.config.get_config() +COMET_PROJECT_NAME = config.get_string(os.getenv('COMET_PROJECT_NAME'), 'comet.project_name', default='yolov5') + + +def get_args(known=False): + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch-low.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300, help='total training epochs') + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs, -1 for autobatch') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--noval', action='store_true', help='only validate final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable AutoAnchor') + parser.add_argument('--noplots', action='store_true', help='save no plot files') + parser.add_argument('--evolve', type=int, nargs='?', const=300, help='evolve hyperparameters for x generations') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--optimizer', type=str, choices=['SGD', 'Adam', 'AdamW'], default='SGD', help='optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--project', default=ROOT / 'runs/train', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--cos-lr', action='store_true', help='cosine LR scheduler') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--patience', type=int, default=100, help='EarlyStopping patience (epochs without improvement)') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone=10, first3=0 1 2') + parser.add_argument('--save-period', type=int, default=-1, help='Save checkpoint every x epochs (disabled if < 1)') + parser.add_argument('--seed', type=int, default=0, help='Global training seed') + parser.add_argument('--local_rank', type=int, default=-1, help='Automatic DDP Multi-GPU argument, do not modify') + + # Weights & Biases arguments + parser.add_argument('--entity', default=None, help='W&B: Entity') + parser.add_argument('--upload_dataset', nargs='?', const=True, default=False, help='W&B: Upload data, "val" option') + parser.add_argument('--bbox_interval', type=int, default=-1, help='W&B: Set bounding-box image logging interval') + parser.add_argument('--artifact_alias', type=str, default='latest', help='W&B: Version of dataset artifact to use') + + # Comet Arguments + parser.add_argument('--comet_optimizer_config', type=str, help='Comet: Path to a Comet Optimizer Config File.') + parser.add_argument('--comet_optimizer_id', type=str, help='Comet: ID of the Comet Optimizer sweep.') + parser.add_argument('--comet_optimizer_objective', type=str, help="Comet: Set to 'minimize' or 'maximize'.") + parser.add_argument('--comet_optimizer_metric', type=str, help='Comet: Metric to Optimize.') + parser.add_argument('--comet_optimizer_workers', + type=int, + default=1, + help='Comet: Number of Parallel Workers to use with the Comet Optimizer.') + + return parser.parse_known_args()[0] if known else parser.parse_args() + + +def run(parameters, opt): + hyp_dict = {k: v for k, v in parameters.items() if k not in ['epochs', 'batch_size']} + + opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok or opt.evolve)) + opt.batch_size = parameters.get('batch_size') + opt.epochs = parameters.get('epochs') + + device = select_device(opt.device, batch_size=opt.batch_size) + train(hyp_dict, opt, device, callbacks=Callbacks()) + + +if __name__ == '__main__': + opt = get_args(known=True) + + opt.weights = str(opt.weights) + opt.cfg = str(opt.cfg) + opt.data = str(opt.data) + opt.project = str(opt.project) + + optimizer_id = os.getenv('COMET_OPTIMIZER_ID') + if optimizer_id is None: + with open(opt.comet_optimizer_config) as f: + optimizer_config = json.load(f) + optimizer = comet_ml.Optimizer(optimizer_config) + else: + optimizer = comet_ml.Optimizer(optimizer_id) + + opt.comet_optimizer_id = optimizer.id + status = optimizer.status() + + opt.comet_optimizer_objective = status['spec']['objective'] + opt.comet_optimizer_metric = status['spec']['metric'] + + logger.info('COMET INFO: Starting Hyperparameter Sweep') + for parameter in optimizer.get_parameters(): + run(parameter['parameters'], opt) diff --git a/src/train_utils/train_models/models/yolov5/utils/loss.py b/src/train_utils/train_models/models/yolov5/utils/loss.py new file mode 100644 index 0000000..9b9c3d9 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/loss.py @@ -0,0 +1,234 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Loss functions +""" + +import torch +import torch.nn as nn + +from utils.metrics import bbox_iou +from utils.torch_utils import de_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super().__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super().__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class ComputeLoss: + sort_obj_iou = False + + # Compute losses + def __init__(self, model, autobalance=False): + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.anchors = m.anchors + self.device = device + + def __call__(self, p, targets): # predictions, targets + lcls = torch.zeros(1, device=self.device) # class loss + lbox = torch.zeros(1, device=self.device) # box loss + lobj = torch.zeros(1, device=self.device) # object loss + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + # pxy, pwh, _, pcls = pi[b, a, gj, gi].tensor_split((2, 4, 5), dim=1) # faster, requires torch 1.8.0 + pxy, pwh, _, pcls = pi[b, a, gj, gi].split((2, 2, 1, self.nc), 1) # target-subset of predictions + + # Regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, a = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + a, (b, c) = a.long().view(-1), bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch diff --git a/src/train_utils/train_models/models/yolov5/utils/metrics.py b/src/train_utils/train_models/models/yolov5/utils/metrics.py new file mode 100644 index 0000000..95f364c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/metrics.py @@ -0,0 +1,360 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import math +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from utils import TryExcept, threaded + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def smooth(y, f=0.05): + # Box filter of fraction f + nf = round(len(y) * f * 2) // 2 + 1 # number of filter elements (must be odd) + p = np.ones(nf // 2) # ones padding + yp = np.concatenate((p * y[0], y, p * y[-1]), 0) # y padded + return np.convolve(yp, np.ones(nf) / nf, mode='valid') # y-smoothed + + +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=(), eps=1e-16, prefix=''): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes, nt = np.unique(target_cls, return_counts=True) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = nt[ci] # number of labels + n_p = i.sum() # number of predictions + if n_p == 0 or n_l == 0: + continue + + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + eps) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j]) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + eps) + names = [v for k, v in names.items() if k in unique_classes] # list: only classes that have data + names = dict(enumerate(names)) # to dict + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / f'{prefix}PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / f'{prefix}F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / f'{prefix}P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / f'{prefix}R_curve.png', names, ylabel='Recall') + + i = smooth(f1.mean(0), 0.1).argmax() # max F1 index + p, r, f1 = p[:, i], r[:, i], f1[:, i] + tp = (r * nt).round() # true positives + fp = (tp / (p + eps) - tp).round() # false positives + return tp, fp, p, r, f1, ap, unique_classes.astype(int) + + +def compute_ap(recall, precision): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + mrec = np.concatenate(([0.0], recall, [1.0])) + mpre = np.concatenate(([1.0], precision, [0.0])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + if detections is None: + gt_classes = labels.int() + for gc in gt_classes: + self.matrix[self.nc, gc] += 1 # background FN + return + + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(int) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[detection_classes[m1[j]], gc] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # true background + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # predicted background + + def tp_fp(self): + tp = self.matrix.diagonal() # true positives + fp = self.matrix.sum(1) - tp # false positives + # fn = self.matrix.sum(0) - tp # false negatives (missed detections) + return tp[:-1], fp[:-1] # remove background class + + @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure') + def plot(self, normalize=True, save_dir='', names=()): + import seaborn as sn + + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True) + nc, nn = self.nc, len(names) # number of classes, names + sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size + labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels + ticklabels = (names + ['background']) if labels else 'auto' + with warnings.catch_warnings(): + warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap(array, + ax=ax, + annot=nc < 30, + annot_kws={ + 'size': 8}, + cmap='Blues', + fmt='.2f', + square=True, + vmin=0.0, + xticklabels=ticklabels, + yticklabels=ticklabels).set_facecolor((1, 1, 1)) + ax.set_xlabel('True') + ax.set_ylabel('Predicted') + ax.set_title('Confusion Matrix') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + plt.close(fig) + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +def bbox_iou(box1, box2, xywh=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns Intersection over Union (IoU) of box1(1,4) to box2(n,4) + + # Get the coordinates of bounding boxes + if xywh: # transform from xywh to xyxy + (x1, y1, w1, h1), (x2, y2, w2, h2) = box1.chunk(4, -1), box2.chunk(4, -1) + w1_, h1_, w2_, h2_ = w1 / 2, h1 / 2, w2 / 2, h2 / 2 + b1_x1, b1_x2, b1_y1, b1_y2 = x1 - w1_, x1 + w1_, y1 - h1_, y1 + h1_ + b2_x1, b2_x2, b2_y1, b2_y2 = x2 - w2_, x2 + w2_, y2 - h2_, y2 + h2_ + else: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1.chunk(4, -1) + b2_x1, b2_y1, b2_x2, b2_y2 = box2.chunk(4, -1) + w1, h1 = b1_x2 - b1_x1, (b1_y2 - b1_y1).clamp(eps) + w2, h2 = b2_x2 - b2_x1, (b2_y2 - b2_y1).clamp(eps) + + # Intersection area + inter = (b1_x2.minimum(b2_x2) - b1_x1.maximum(b2_x1)).clamp(0) * \ + (b1_y2.minimum(b2_y2) - b1_y1.maximum(b2_y1)).clamp(0) + + # Union Area + union = w1 * h1 + w2 * h2 - inter + eps + + # IoU + iou = inter / union + if CIoU or DIoU or GIoU: + cw = b1_x2.maximum(b2_x2) - b1_x1.minimum(b2_x1) # convex (smallest enclosing box) width + ch = b1_y2.maximum(b2_y2) - b1_y1.minimum(b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center dist ** 2 + if CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * (torch.atan(w2 / h2) - torch.atan(w1 / h1)).pow(2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + return iou - rho2 / c2 # DIoU + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU https://arxiv.org/pdf/1902.09630.pdf + return iou # IoU + + +def box_iou(box1, box2, eps=1e-7): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2) + inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) + + # IoU = inter / (area1 + area2 - inter) + return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps) + + +def bbox_ioa(box1, box2, eps=1e-7): + """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 + box1: np.array of shape(4) + box2: np.array of shape(nx4) + returns: np.array of shape(n) + """ + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1 + b2_x1, b2_y1, b2_x2, b2_y2 = box2.T + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps + + # Intersection over box2 area + return inter_area / box2_area + + +def wh_iou(wh1, wh2, eps=1e-7): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter + eps) # iou = inter / (area1 + area2 - inter) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + + +@threaded +def plot_pr_curve(px, py, ap, save_dir=Path('pr_curve.png'), names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') + ax.set_title('Precision-Recall Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) + + +@threaded +def plot_mc_curve(px, py, save_dir=Path('mc_curve.png'), names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = smooth(py.mean(0), 0.05) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left') + ax.set_title(f'{ylabel}-Confidence Curve') + fig.savefig(save_dir, dpi=250) + plt.close(fig) diff --git a/src/train_utils/train_models/models/yolov5/utils/plots.py b/src/train_utils/train_models/models/yolov5/utils/plots.py new file mode 100644 index 0000000..24c618c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/plots.py @@ -0,0 +1,560 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Plotting utils +""" + +import contextlib +import math +import os +from copy import copy +from pathlib import Path +from urllib.error import URLError + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sn +import torch +from PIL import Image, ImageDraw, ImageFont + +from utils import TryExcept, threaded +from utils.general import (CONFIG_DIR, FONT, LOGGER, check_font, check_requirements, clip_boxes, increment_path, + is_ascii, xywh2xyxy, xyxy2xywh) +from utils.metrics import fitness +from utils.segment.general import scale_image + +# Settings +RANK = int(os.getenv('RANK', -1)) +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +class Colors: + # Ultralytics color palette https://ultralytics.com/ + def __init__(self): + # hex = matplotlib.colors.TABLEAU_COLORS.values() + hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', + '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') + self.palette = [self.hex2rgb(f'#{c}') for c in hexs] + self.n = len(self.palette) + + def __call__(self, i, bgr=False): + c = self.palette[int(i) % self.n] + return (c[2], c[1], c[0]) if bgr else c + + @staticmethod + def hex2rgb(h): # rgb order (PIL) + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + +colors = Colors() # create instance for 'from utils.plots import colors' + + +def check_pil_font(font=FONT, size=10): + # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary + font = Path(font) + font = font if font.exists() else (CONFIG_DIR / font.name) + try: + return ImageFont.truetype(str(font) if font.exists() else font.name, size) + except Exception: # download if missing + try: + check_font(font) + return ImageFont.truetype(str(font), size) + except TypeError: + check_requirements('Pillow>=8.4.0') # known issue https://github.com/ultralytics/yolov5/issues/5374 + except URLError: # not online + return ImageFont.load_default() + + +class Annotator: + # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations + def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): + assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic + self.pil = pil or non_ascii + if self.pil: # use PIL + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + self.font = check_pil_font(font='Arial.Unicode.ttf' if non_ascii else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + else: # use cv2 + self.im = im + self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width + + def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + # Add one xyxy box to image with label + if self.pil or not is_ascii(label): + self.draw.rectangle(box, width=self.lw, outline=color) # box + if label: + w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0 + # _, _, w, h = self.font.getbbox(label) # text width, height (New) + outside = box[1] - h >= 0 # label fits outside box + self.draw.rectangle( + (box[0], box[1] - h if outside else box[1], box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1), + fill=color, + ) + # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 + self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) + else: # cv2 + p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) + cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA) + if label: + tf = max(self.lw - 1, 1) # font thickness + w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height + outside = p1[1] - h >= 3 + p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 + cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled + cv2.putText(self.im, + label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, + self.lw / 3, + txt_color, + thickness=tf, + lineType=cv2.LINE_AA) + + def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False): + """Plot masks at once. + Args: + masks (tensor): predicted masks on cuda, shape: [n, h, w] + colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n] + im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1] + alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque + """ + if self.pil: + # convert to numpy first + self.im = np.asarray(self.im).copy() + if len(masks) == 0: + self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255 + colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0 + colors = colors[:, None, None] # shape(n,1,1,3) + masks = masks.unsqueeze(3) # shape(n,h,w,1) + masks_color = masks * (colors * alpha) # shape(n,h,w,3) + + inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1) + mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3) + + im_gpu = im_gpu.flip(dims=[0]) # flip channel + im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3) + im_gpu = im_gpu * inv_alph_masks[-1] + mcs + im_mask = (im_gpu * 255).byte().cpu().numpy() + self.im[:] = im_mask if retina_masks else scale_image(im_gpu.shape, im_mask, self.im.shape) + if self.pil: + # convert im back to PIL and update draw + self.fromarray(self.im) + + def rectangle(self, xy, fill=None, outline=None, width=1): + # Add rectangle to image (PIL-only) + self.draw.rectangle(xy, fill, outline, width) + + def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'): + # Add text to image (PIL-only) + if anchor == 'bottom': # start y from font bottom + w, h = self.font.getsize(text) # text width, height + xy[1] += 1 - h + self.draw.text(xy, text, fill=txt_color, font=self.font) + + def fromarray(self, im): + # Update self.im from a numpy array + self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) + self.draw = ImageDraw.Draw(self.im) + + def result(self): + # Return annotated image as array + return np.asarray(self.im) + + +def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): + """ + x: Features to be visualized + module_type: Module type + stage: Module stage within model + n: Maximum number of feature maps to plot + save_dir: Directory to save results + """ + if 'Detect' not in module_type: + batch, channels, height, width = x.shape # batch, channels, height, width + if height > 1 and width > 1: + f = save_dir / f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename + + blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels + n = min(n, channels) # number of plots + fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols + ax = ax.ravel() + plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze()) # cmap='gray' + ax[i].axis('off') + + LOGGER.info(f'Saving {f}... ({n}/{channels})') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + from scipy.signal import butter, filtfilt + + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def output_to_target(output, max_det=300): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting + targets = [] + for i, o in enumerate(output): + box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1) + j = torch.full((conf.shape[0], 1), i) + targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1)) + return torch.cat(targets, 0).numpy() + + +@threaded +def plot_images(images, targets, paths=None, fname='images.jpg', names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + ti = targets[targets[:, 0] == i] # image targets + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + annotator.im.save(fname) # save + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_val_txt(): # from utils.plots import *; plot_val() + # Plot val.txt histograms + x = np.loadtxt('val.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label=f'{x[i].mean():.3g} +/- {x[i].std():.3g}') + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() + # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) + save_dir = Path(file).parent if file else Path(dir) + plot2 = False # plot additional results + if plot2: + ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: + for f in sorted(save_dir.glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + if plot2: + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + for i in range(7): + ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[5, 1:j], + y[3, 1:j] * 1E2, + '.-', + linewidth=2, + markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', + linewidth=2, + markersize=8, + alpha=.25, + label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(25, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + f = save_dir / 'study.png' + print(f'Saving {f}...') + plt.savefig(f, dpi=300) + + +@TryExcept() # known issue https://github.com/ultralytics/yolov5/issues/5395 +def plot_labels(labels, names=(), save_dir=Path('')): + # plot dataset labels + LOGGER.info(f"Plotting labels to {save_dir / 'labels.jpg'}... ") + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + with contextlib.suppress(Exception): # color histogram bars by class + [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # known issue #3195 + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(list(names.values()), rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + +def imshow_cls(im, labels=None, pred=None, names=None, nmax=25, verbose=False, f=Path('images.jpg')): + # Show classification image grid with labels (optional) and predictions (optional) + from utils.augmentations import denormalize + + names = names or [f'class{i}' for i in range(1000)] + blocks = torch.chunk(denormalize(im.clone()).cpu().float(), len(im), + dim=0) # select batch index 0, block by channels + n = min(len(blocks), nmax) # number of plots + m = min(8, round(n ** 0.5)) # 8 x 8 default + fig, ax = plt.subplots(math.ceil(n / m), m) # 8 rows x n/8 cols + ax = ax.ravel() if m > 1 else [ax] + # plt.subplots_adjust(wspace=0.05, hspace=0.05) + for i in range(n): + ax[i].imshow(blocks[i].squeeze().permute((1, 2, 0)).numpy().clip(0.0, 1.0)) + ax[i].axis('off') + if labels is not None: + s = names[labels[i]] + (f'—{names[pred[i]]}' if pred is not None else '') + ax[i].set_title(s, fontsize=8, verticalalignment='top') + plt.savefig(f, dpi=300, bbox_inches='tight') + plt.close() + if verbose: + LOGGER.info(f'Saving {f}') + if labels is not None: + LOGGER.info('True: ' + ' '.join(f'{names[i]:3s}' for i in labels[:nmax])) + if pred is not None: + LOGGER.info('Predicted:' + ' '.join(f'{names[i]:3s}' for i in pred[:nmax])) + return f + + +def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() + # Plot evolve.csv hyp evolution results + evolve_csv = Path(evolve_csv) + data = pd.read_csv(evolve_csv) + keys = [x.strip() for x in data.columns] + x = data.values + f = fitness(x) + j = np.argmax(f) # max fitness index + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + print(f'Best results from row {j} of {evolve_csv}:') + for i, k in enumerate(keys[7:]): + v = x[:, 7 + i] + mu = v[j] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title(f'{k} = {mu:.3g}', fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print(f'{k:>15}: {mu:.3g}') + f = evolve_csv.with_suffix('.png') # filename + plt.savefig(f, dpi=200) + plt.close() + print(f'Saved {f}') + + +def plot_results(file='path/to/results.csv', dir=''): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for f in files: + try: + data = pd.read_csv(f) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): + y = data.values[:, j].astype('float') + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].set_title(s[j], fontsize=12) + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + LOGGER.info(f'Warning: Plotting error for {f}: {e}') + ax[1].legend() + fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print(f'Warning: Plotting error for {f}; {e}') + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def save_one_box(xyxy, im, file=Path('im.jpg'), gain=1.02, pad=10, square=False, BGR=False, save=True): + # Save image crop as {file} with crop size multiple {gain} and {pad} pixels. Save and/or return crop + xyxy = torch.tensor(xyxy).view(-1, 4) + b = xyxy2xywh(xyxy) # boxes + if square: + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # attempt rectangle to square + b[:, 2:] = b[:, 2:] * gain + pad # box wh * gain + pad + xyxy = xywh2xyxy(b).long() + clip_boxes(xyxy, im.shape) + crop = im[int(xyxy[0, 1]):int(xyxy[0, 3]), int(xyxy[0, 0]):int(xyxy[0, 2]), ::(1 if BGR else -1)] + if save: + file.parent.mkdir(parents=True, exist_ok=True) # make directory + f = str(increment_path(file).with_suffix('.jpg')) + # cv2.imwrite(f, crop) # save BGR, https://github.com/ultralytics/yolov5/issues/7007 chroma subsampling issue + Image.fromarray(crop[..., ::-1]).save(f, quality=95, subsampling=0) # save RGB + return crop diff --git a/src/train_utils/train_models/models/yolov5/utils/segment/__init__.py b/src/train_utils/train_models/models/yolov5/utils/segment/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/train_utils/train_models/models/yolov5/utils/segment/augmentations.py b/src/train_utils/train_models/models/yolov5/utils/segment/augmentations.py new file mode 100644 index 0000000..169adde --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/segment/augmentations.py @@ -0,0 +1,104 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Image augmentation functions +""" + +import math +import random + +import cv2 +import numpy as np + +from ..augmentations import box_candidates +from ..general import resample_segments, segment2box + + +def mixup(im, labels, segments, im2, labels2, segments2): + # Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf + r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0 + im = (im * r + im2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + segments = np.concatenate((segments, segments2), 0) + return im, labels, segments + + +def random_perspective(im, + targets=(), + segments=(), + degrees=10, + translate=.1, + scale=.1, + shear=10, + perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = im.shape[0] + border[0] * 2 # shape(h,w,c) + width = im.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -im.shape[1] / 2 # x translation (pixels) + C[1, 2] = -im.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * width) # x translation (pixels) + T[1, 2] = (random.uniform(0.5 - translate, 0.5 + translate) * height) # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(im[:, :, ::-1]) # base + # ax[1].imshow(im2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + new_segments = [] + if n: + new = np.zeros((n, 4)) + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]) # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + new_segments.append(xy) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01) + targets = targets[i] + targets[:, 1:5] = new[i] + new_segments = np.array(new_segments)[i] + + return im, targets, new_segments diff --git a/src/train_utils/train_models/models/yolov5/utils/segment/dataloaders.py b/src/train_utils/train_models/models/yolov5/utils/segment/dataloaders.py new file mode 100644 index 0000000..097a5d5 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/segment/dataloaders.py @@ -0,0 +1,332 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Dataloaders +""" + +import os +import random + +import cv2 +import numpy as np +import torch +from torch.utils.data import DataLoader, distributed + +from ..augmentations import augment_hsv, copy_paste, letterbox +from ..dataloaders import InfiniteDataLoader, LoadImagesAndLabels, seed_worker +from ..general import LOGGER, xyn2xy, xywhn2xyxy, xyxy2xywhn +from ..torch_utils import torch_distributed_zero_first +from .augmentations import mixup, random_perspective + +RANK = int(os.getenv('RANK', -1)) + + +def create_dataloader(path, + imgsz, + batch_size, + stride, + single_cls=False, + hyp=None, + augment=False, + cache=False, + pad=0.0, + rect=False, + rank=-1, + workers=8, + image_weights=False, + quad=False, + prefix='', + shuffle=False, + mask_downsample_ratio=1, + overlap_mask=False, + seed=0): + if rect and shuffle: + LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False') + shuffle = False + with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP + dataset = LoadImagesAndLabelsAndMasks( + path, + imgsz, + batch_size, + augment=augment, # augmentation + hyp=hyp, # hyperparameters + rect=rect, # rectangular batches + cache_images=cache, + single_cls=single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix, + downsample_ratio=mask_downsample_ratio, + overlap=overlap_mask) + + batch_size = min(batch_size, len(dataset)) + nd = torch.cuda.device_count() # number of CUDA devices + nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle) + loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates + generator = torch.Generator() + generator.manual_seed(6148914691236517205 + seed + RANK) + return loader( + dataset, + batch_size=batch_size, + shuffle=shuffle and sampler is None, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn, + worker_init_fn=seed_worker, + generator=generator, + ), dataset + + +class LoadImagesAndLabelsAndMasks(LoadImagesAndLabels): # for training/testing + + def __init__( + self, + path, + img_size=640, + batch_size=16, + augment=False, + hyp=None, + rect=False, + image_weights=False, + cache_images=False, + single_cls=False, + stride=32, + pad=0, + min_items=0, + prefix='', + downsample_ratio=1, + overlap=False, + ): + super().__init__(path, img_size, batch_size, augment, hyp, rect, image_weights, cache_images, single_cls, + stride, pad, min_items, prefix) + self.downsample_ratio = downsample_ratio + self.overlap = overlap + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + masks = [] + if mosaic: + # Load mosaic + img, labels, segments = self.load_mosaic(index) + shapes = None + + # MixUp augmentation + if random.random() < hyp['mixup']: + img, labels, segments = mixup(img, labels, segments, *self.load_mosaic(random.randint(0, self.n - 1))) + + else: + # Load image + img, (h0, w0), (h, w) = self.load_image(index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + # [array, array, ....], array.shape=(num_points, 2), xyxyxyxy + segments = self.segments[index].copy() + if len(segments): + for i_s in range(len(segments)): + segments[i_s] = xyn2xy( + segments[i_s], + ratio[0] * w, + ratio[1] * h, + padw=pad[0], + padh=pad[1], + ) + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + img, labels, segments = random_perspective(img, + labels, + segments=segments, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + nl = len(labels) # number of labels + if nl: + labels[:, 1:5] = xyxy2xywhn(labels[:, 1:5], w=img.shape[1], h=img.shape[0], clip=True, eps=1e-3) + if self.overlap: + masks, sorted_idx = polygons2masks_overlap(img.shape[:2], + segments, + downsample_ratio=self.downsample_ratio) + masks = masks[None] # (640, 640) -> (1, 640, 640) + labels = labels[sorted_idx] + else: + masks = polygons2masks(img.shape[:2], segments, color=1, downsample_ratio=self.downsample_ratio) + + masks = (torch.from_numpy(masks) if len(masks) else torch.zeros(1 if self.overlap else nl, img.shape[0] // + self.downsample_ratio, img.shape[1] // + self.downsample_ratio)) + # TODO: albumentations support + if self.augment: + # Albumentations + # there are some augmentation that won't change boxes and masks, + # so just be it for now. + img, labels = self.albumentations(img, labels) + nl = len(labels) # update after albumentations + + # HSV color-space + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nl: + labels[:, 2] = 1 - labels[:, 2] + masks = torch.flip(masks, dims=[1]) + + # Flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nl: + labels[:, 1] = 1 - labels[:, 1] + masks = torch.flip(masks, dims=[2]) + + # Cutouts # labels = cutout(img, labels, p=0.5) + + labels_out = torch.zeros((nl, 6)) + if nl: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB + img = np.ascontiguousarray(img) + + return (torch.from_numpy(img), labels_out, self.im_files[index], shapes, masks) + + def load_mosaic(self, index): + # YOLOv5 4-mosaic loader. Loads 1 image + 3 random images into a 4-image mosaic + labels4, segments4 = [], [] + s = self.img_size + yc, xc = (int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border) # mosaic center x, y + + # 3 additional image indices + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = self.load_image(index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + labels, segments = self.labels[index].copy(), self.segments[index].copy() + + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, p=self.hyp['copy_paste']) + img4, labels4, segments4 = random_perspective(img4, + labels4, + segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + return img4, labels4, segments4 + + @staticmethod + def collate_fn(batch): + img, label, path, shapes, masks = zip(*batch) # transposed + batched_masks = torch.cat(masks, 0) + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes, batched_masks + + +def polygon2mask(img_size, polygons, color=1, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (np.ndarray): [N, M], N is the number of polygons, + M is the number of points(Be divided by 2). + """ + mask = np.zeros(img_size, dtype=np.uint8) + polygons = np.asarray(polygons) + polygons = polygons.astype(np.int32) + shape = polygons.shape + polygons = polygons.reshape(shape[0], -1, 2) + cv2.fillPoly(mask, polygons, color=color) + nh, nw = (img_size[0] // downsample_ratio, img_size[1] // downsample_ratio) + # NOTE: fillPoly firstly then resize is trying the keep the same way + # of loss calculation when mask-ratio=1. + mask = cv2.resize(mask, (nw, nh)) + return mask + + +def polygons2masks(img_size, polygons, color, downsample_ratio=1): + """ + Args: + img_size (tuple): The image size. + polygons (list[np.ndarray]): each polygon is [N, M], + N is the number of polygons, + M is the number of points(Be divided by 2). + """ + masks = [] + for si in range(len(polygons)): + mask = polygon2mask(img_size, [polygons[si].reshape(-1)], color, downsample_ratio) + masks.append(mask) + return np.array(masks) + + +def polygons2masks_overlap(img_size, segments, downsample_ratio=1): + """Return a (640, 640) overlap mask.""" + masks = np.zeros((img_size[0] // downsample_ratio, img_size[1] // downsample_ratio), + dtype=np.int32 if len(segments) > 255 else np.uint8) + areas = [] + ms = [] + for si in range(len(segments)): + mask = polygon2mask( + img_size, + [segments[si].reshape(-1)], + downsample_ratio=downsample_ratio, + color=1, + ) + ms.append(mask) + areas.append(mask.sum()) + areas = np.asarray(areas) + index = np.argsort(-areas) + ms = np.array(ms)[index] + for i in range(len(segments)): + mask = ms[i] * (i + 1) + masks = masks + mask + masks = np.clip(masks, a_min=0, a_max=i + 1) + return masks, index diff --git a/src/train_utils/train_models/models/yolov5/utils/segment/general.py b/src/train_utils/train_models/models/yolov5/utils/segment/general.py new file mode 100644 index 0000000..f1b2f1d --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/segment/general.py @@ -0,0 +1,160 @@ +import cv2 +import numpy as np +import torch +import torch.nn.functional as F + + +def crop_mask(masks, boxes): + """ + "Crop" predicted masks by zeroing out everything not in the predicted bbox. + Vectorized by Chong (thanks Chong). + + Args: + - masks should be a size [n, h, w] tensor of masks + - boxes should be a size [n, 4] tensor of bbox coords in relative point form + """ + + n, h, w = masks.shape + x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(1,1,n) + r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,w,1) + c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(h,1,1) + + return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2)) + + +def process_mask_upsample(protos, masks_in, bboxes, shape): + """ + Crop after upsample. + protos: [mask_dim, mask_h, mask_w] + masks_in: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape: input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def process_mask(protos, masks_in, bboxes, shape, upsample=False): + """ + Crop before upsample. + proto_out: [mask_dim, mask_h, mask_w] + out_masks: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape:input_image_size, (h, w) + + return: h, w, n + """ + + c, mh, mw = protos.shape # CHW + ih, iw = shape + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW + + downsampled_bboxes = bboxes.clone() + downsampled_bboxes[:, 0] *= mw / iw + downsampled_bboxes[:, 2] *= mw / iw + downsampled_bboxes[:, 3] *= mh / ih + downsampled_bboxes[:, 1] *= mh / ih + + masks = crop_mask(masks, downsampled_bboxes) # CHW + if upsample: + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + return masks.gt_(0.5) + + +def process_mask_native(protos, masks_in, bboxes, shape): + """ + Crop after upsample. + protos: [mask_dim, mask_h, mask_w] + masks_in: [n, mask_dim], n is number of masks after nms + bboxes: [n, 4], n is number of masks after nms + shape: input_image_size, (h, w) + + return: h, w, n + """ + c, mh, mw = protos.shape # CHW + masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) + gain = min(mh / shape[0], mw / shape[1]) # gain = old / new + pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(mh - pad[1]), int(mw - pad[0]) + masks = masks[:, top:bottom, left:right] + + masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW + masks = crop_mask(masks, bboxes) # CHW + return masks.gt_(0.5) + + +def scale_image(im1_shape, masks, im0_shape, ratio_pad=None): + """ + img1_shape: model input shape, [h, w] + img0_shape: origin pic shape, [h, w, 3] + masks: [h, w, num] + """ + # Rescale coordinates (xyxy) from im1_shape to im0_shape + if ratio_pad is None: # calculate from im0_shape + gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new + pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding + else: + pad = ratio_pad[1] + top, left = int(pad[1]), int(pad[0]) # y, x + bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0]) + + if len(masks.shape) < 2: + raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}') + masks = masks[top:bottom, left:right] + # masks = masks.permute(2, 0, 1).contiguous() + # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0] + # masks = masks.permute(1, 2, 0).contiguous() + masks = cv2.resize(masks, (im0_shape[1], im0_shape[0])) + + if len(masks.shape) == 2: + masks = masks[:, :, None] + return masks + + +def mask_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [M, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, [N, M] + """ + intersection = torch.matmul(mask1, mask2.t()).clamp(0) + union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks_iou(mask1, mask2, eps=1e-7): + """ + mask1: [N, n] m1 means number of predicted objects + mask2: [N, n] m2 means number of gt objects + Note: n means image_w x image_h + + return: masks iou, (N, ) + """ + intersection = (mask1 * mask2).sum(1).clamp(0) # (N, ) + union = (mask1.sum(1) + mask2.sum(1))[None] - intersection # (area1 + area2) - intersection + return intersection / (union + eps) + + +def masks2segments(masks, strategy='largest'): + # Convert masks(n,160,160) into segments(n,xy) + segments = [] + for x in masks.int().cpu().numpy().astype('uint8'): + c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0] + if c: + if strategy == 'concat': # concatenate all segments + c = np.concatenate([x.reshape(-1, 2) for x in c]) + elif strategy == 'largest': # select largest segment + c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2) + else: + c = np.zeros((0, 2)) # no segments found + segments.append(c.astype('float32')) + return segments diff --git a/src/train_utils/train_models/models/yolov5/utils/segment/loss.py b/src/train_utils/train_models/models/yolov5/utils/segment/loss.py new file mode 100644 index 0000000..caeff3c --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/segment/loss.py @@ -0,0 +1,185 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +from ..general import xywh2xyxy +from ..loss import FocalLoss, smooth_BCE +from ..metrics import bbox_iou +from ..torch_utils import de_parallel +from .general import crop_mask + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False, overlap=False): + self.sort_obj_iou = False + self.overlap = overlap + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + m = de_parallel(model).model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(m.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 + self.ssi = list(m.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance + self.na = m.na # number of anchors + self.nc = m.nc # number of classes + self.nl = m.nl # number of layers + self.nm = m.nm # number of masks + self.anchors = m.anchors + self.device = device + + def __call__(self, preds, targets, masks): # predictions, targets, model + p, proto = preds + bs, nm, mask_h, mask_w = proto.shape # batch size, number of masks, mask height, mask width + lcls = torch.zeros(1, device=self.device) + lbox = torch.zeros(1, device=self.device) + lobj = torch.zeros(1, device=self.device) + lseg = torch.zeros(1, device=self.device) + tcls, tbox, indices, anchors, tidxs, xywhn = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros(pi.shape[:4], dtype=pi.dtype, device=self.device) # target obj + + n = b.shape[0] # number of targets + if n: + pxy, pwh, _, pcls, pmask = pi[b, a, gj, gi].split((2, 2, 1, self.nc, nm), 1) # subset of predictions + + # Box regression + pxy = pxy.sigmoid() * 2 - 0.5 + pwh = (pwh.sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox, tbox[i], CIoU=True).squeeze() # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + iou = iou.detach().clamp(0).type(tobj.dtype) + if self.sort_obj_iou: + j = iou.argsort() + b, a, gj, gi, iou = b[j], a[j], gj[j], gi[j], iou[j] + if self.gr < 1: + iou = (1.0 - self.gr) + self.gr * iou + tobj[b, a, gj, gi] = iou # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(pcls, self.cn, device=self.device) # targets + t[range(n), tcls[i]] = self.cp + lcls += self.BCEcls(pcls, t) # BCE + + # Mask regression + if tuple(masks.shape[-2:]) != (mask_h, mask_w): # downsample + masks = F.interpolate(masks[None], (mask_h, mask_w), mode='nearest')[0] + marea = xywhn[i][:, 2:].prod(1) # mask width, height normalized + mxyxy = xywh2xyxy(xywhn[i] * torch.tensor([mask_w, mask_h, mask_w, mask_h], device=self.device)) + for bi in b.unique(): + j = b == bi # matching index + if self.overlap: + mask_gti = torch.where(masks[bi][None] == tidxs[i][j].view(-1, 1, 1), 1.0, 0.0) + else: + mask_gti = masks[tidxs[i]][j] + lseg += self.single_mask_loss(mask_gti, pmask[j], proto[bi], mxyxy[j], marea[j]) + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + lseg *= self.hyp['box'] / bs + + loss = lbox + lobj + lcls + lseg + return loss * bs, torch.cat((lbox, lseg, lobj, lcls)).detach() + + def single_mask_loss(self, gt_mask, pred, proto, xyxy, area): + # Mask loss for one image + pred_mask = (pred @ proto.view(self.nm, -1)).view(-1, *proto.shape[1:]) # (n,32) @ (32,80,80) -> (n,80,80) + loss = F.binary_cross_entropy_with_logits(pred_mask, gt_mask, reduction='none') + return (crop_mask(loss, xyxy).mean(dim=(1, 2)) / area).mean() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch, tidxs, xywhn = [], [], [], [], [], [] + gain = torch.ones(8, device=self.device) # normalized to gridspace gain + ai = torch.arange(na, device=self.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + if self.overlap: + batch = p[0].shape[0] + ti = [] + for i in range(batch): + num = (targets[:, 0] == i).sum() # find number of targets of each image + ti.append(torch.arange(num, device=self.device).float().view(1, num).repeat(na, 1) + 1) # (na, num) + ti = torch.cat(ti, 1) # (na, nt) + else: + ti = torch.arange(nt, device=self.device).float().view(1, nt).repeat(na, 1) + targets = torch.cat((targets.repeat(na, 1, 1), ai[..., None], ti[..., None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=self.device).float() * g # offsets + + for i in range(self.nl): + anchors, shape = self.anchors[i], p[i].shape + gain[2:6] = torch.tensor(shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain # shape(3,n,7) + if nt: + # Matches + r = t[..., 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1 / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1 < g) & (gxy > 1)).T + l, m = ((gxi % 1 < g) & (gxi > 1)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + bc, gxy, gwh, at = t.chunk(4, 1) # (image, class), grid xy, grid wh, anchors + (a, tidx), (b, c) = at.long().T, bc.long().T # anchors, image, class + gij = (gxy - offsets).long() + gi, gj = gij.T # grid indices + + # Append + indices.append((b, a, gj.clamp_(0, shape[2] - 1), gi.clamp_(0, shape[3] - 1))) # image, anchor, grid + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + tidxs.append(tidx) + xywhn.append(torch.cat((gxy, gwh), 1) / gain[2:6]) # xywh normalized + + return tcls, tbox, indices, anch, tidxs, xywhn diff --git a/src/train_utils/train_models/models/yolov5/utils/segment/metrics.py b/src/train_utils/train_models/models/yolov5/utils/segment/metrics.py new file mode 100644 index 0000000..c9f137e --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/segment/metrics.py @@ -0,0 +1,210 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Model validation metrics +""" + +import numpy as np + +from ..metrics import ap_per_class + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9, 0.0, 0.0, 0.1, 0.9] + return (x[:, :8] * w).sum(1) + + +def ap_per_class_box_and_mask( + tp_m, + tp_b, + conf, + pred_cls, + target_cls, + plot=False, + save_dir='.', + names=(), +): + """ + Args: + tp_b: tp of boxes. + tp_m: tp of masks. + other arguments see `func: ap_per_class`. + """ + results_boxes = ap_per_class(tp_b, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix='Box')[2:] + results_masks = ap_per_class(tp_m, + conf, + pred_cls, + target_cls, + plot=plot, + save_dir=save_dir, + names=names, + prefix='Mask')[2:] + + results = { + 'boxes': { + 'p': results_boxes[0], + 'r': results_boxes[1], + 'ap': results_boxes[3], + 'f1': results_boxes[2], + 'ap_class': results_boxes[4]}, + 'masks': { + 'p': results_masks[0], + 'r': results_masks[1], + 'ap': results_masks[3], + 'f1': results_masks[2], + 'ap_class': results_masks[4]}} + return results + + +class Metric: + + def __init__(self) -> None: + self.p = [] # (nc, ) + self.r = [] # (nc, ) + self.f1 = [] # (nc, ) + self.all_ap = [] # (nc, 10) + self.ap_class_index = [] # (nc, ) + + @property + def ap50(self): + """AP@0.5 of all classes. + Return: + (nc, ) or []. + """ + return self.all_ap[:, 0] if len(self.all_ap) else [] + + @property + def ap(self): + """AP@0.5:0.95 + Return: + (nc, ) or []. + """ + return self.all_ap.mean(1) if len(self.all_ap) else [] + + @property + def mp(self): + """mean precision of all classes. + Return: + float. + """ + return self.p.mean() if len(self.p) else 0.0 + + @property + def mr(self): + """mean recall of all classes. + Return: + float. + """ + return self.r.mean() if len(self.r) else 0.0 + + @property + def map50(self): + """Mean AP@0.5 of all classes. + Return: + float. + """ + return self.all_ap[:, 0].mean() if len(self.all_ap) else 0.0 + + @property + def map(self): + """Mean AP@0.5:0.95 of all classes. + Return: + float. + """ + return self.all_ap.mean() if len(self.all_ap) else 0.0 + + def mean_results(self): + """Mean of results, return mp, mr, map50, map""" + return (self.mp, self.mr, self.map50, self.map) + + def class_result(self, i): + """class-aware result, return p[i], r[i], ap50[i], ap[i]""" + return (self.p[i], self.r[i], self.ap50[i], self.ap[i]) + + def get_maps(self, nc): + maps = np.zeros(nc) + self.map + for i, c in enumerate(self.ap_class_index): + maps[c] = self.ap[i] + return maps + + def update(self, results): + """ + Args: + results: tuple(p, r, ap, f1, ap_class) + """ + p, r, all_ap, f1, ap_class_index = results + self.p = p + self.r = r + self.all_ap = all_ap + self.f1 = f1 + self.ap_class_index = ap_class_index + + +class Metrics: + """Metric for boxes and masks.""" + + def __init__(self) -> None: + self.metric_box = Metric() + self.metric_mask = Metric() + + def update(self, results): + """ + Args: + results: Dict{'boxes': Dict{}, 'masks': Dict{}} + """ + self.metric_box.update(list(results['boxes'].values())) + self.metric_mask.update(list(results['masks'].values())) + + def mean_results(self): + return self.metric_box.mean_results() + self.metric_mask.mean_results() + + def class_result(self, i): + return self.metric_box.class_result(i) + self.metric_mask.class_result(i) + + def get_maps(self, nc): + return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc) + + @property + def ap_class_index(self): + # boxes and masks have the same ap_class_index + return self.metric_box.ap_class_index + + +KEYS = [ + 'train/box_loss', + 'train/seg_loss', # train loss + 'train/obj_loss', + 'train/cls_loss', + 'metrics/precision(B)', + 'metrics/recall(B)', + 'metrics/mAP_0.5(B)', + 'metrics/mAP_0.5:0.95(B)', # metrics + 'metrics/precision(M)', + 'metrics/recall(M)', + 'metrics/mAP_0.5(M)', + 'metrics/mAP_0.5:0.95(M)', # metrics + 'val/box_loss', + 'val/seg_loss', # val loss + 'val/obj_loss', + 'val/cls_loss', + 'x/lr0', + 'x/lr1', + 'x/lr2',] + +BEST_KEYS = [ + 'best/epoch', + 'best/precision(B)', + 'best/recall(B)', + 'best/mAP_0.5(B)', + 'best/mAP_0.5:0.95(B)', + 'best/precision(M)', + 'best/recall(M)', + 'best/mAP_0.5(M)', + 'best/mAP_0.5:0.95(M)',] diff --git a/src/train_utils/train_models/models/yolov5/utils/segment/plots.py b/src/train_utils/train_models/models/yolov5/utils/segment/plots.py new file mode 100644 index 0000000..1b22ec8 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/segment/plots.py @@ -0,0 +1,143 @@ +import contextlib +import math +from pathlib import Path + +import cv2 +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import torch + +from .. import threaded +from ..general import xywh2xyxy +from ..plots import Annotator, colors + + +@threaded +def plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None): + # Plot image grid with labels + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + if isinstance(masks, torch.Tensor): + masks = masks.cpu().numpy().astype(int) + + max_size = 1920 # max image size + max_subplots = 16 # max image subplots, i.e. 4x4 + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + if np.max(images[0]) <= 1: + images *= 255 # de-normalise (optional) + + # Build Image + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, im in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + im = im.transpose(1, 2, 0) + mosaic[y:y + h, x:x + w, :] = im + + # Resize (optional) + scale = max_size / ns / max(h, w) + if scale < 1: + h = math.ceil(scale * h) + w = math.ceil(scale * w) + mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h))) + + # Annotate + fs = int((h + w) * ns * 0.01) # font size + annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names) + for i in range(i + 1): + x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin + annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders + if paths: + annotator.text((x + 5, y + 5), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames + if len(targets) > 0: + idx = targets[:, 0] == i + ti = targets[idx] # image targets + + boxes = xywh2xyxy(ti[:, 2:6]).T + classes = ti[:, 1].astype('int') + labels = ti.shape[1] == 6 # labels if no conf column + conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale < 1: # absolute coords need scale if image scales + boxes *= scale + boxes[[0, 2]] += x + boxes[[1, 3]] += y + for j, box in enumerate(boxes.T.tolist()): + cls = classes[j] + color = colors(cls) + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + annotator.box_label(box, label, color=color) + + # Plot masks + if len(masks): + if masks.max() > 1.0: # mean that masks are overlap + image_masks = masks[[i]] # (1, 640, 640) + nl = len(ti) + index = np.arange(nl).reshape(nl, 1, 1) + 1 + image_masks = np.repeat(image_masks, nl, axis=0) + image_masks = np.where(image_masks == index, 1.0, 0.0) + else: + image_masks = masks[idx] + + im = np.asarray(annotator.im).copy() + for j, box in enumerate(boxes.T.tolist()): + if labels or conf[j] > 0.25: # 0.25 conf thresh + color = colors(classes[j]) + mh, mw = image_masks[j].shape + if mh != h or mw != w: + mask = image_masks[j].astype(np.uint8) + mask = cv2.resize(mask, (w, h)) + mask = mask.astype(bool) + else: + mask = image_masks[j].astype(bool) + with contextlib.suppress(Exception): + im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6 + annotator.fromarray(im) + annotator.im.save(fname) # save + + +def plot_results_with_masks(file='path/to/results.csv', dir='', best=True): + # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') + save_dir = Path(file).parent if file else Path(dir) + fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True) + ax = ax.ravel() + files = list(save_dir.glob('results*.csv')) + assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + for f in files: + try: + data = pd.read_csv(f) + index = np.argmax(0.9 * data.values[:, 8] + 0.1 * data.values[:, 7] + 0.9 * data.values[:, 12] + + 0.1 * data.values[:, 11]) + s = [x.strip() for x in data.columns] + x = data.values[:, 0] + for i, j in enumerate([1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]): + y = data.values[:, j] + # y[y == 0] = np.nan # don't show zero values + ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=2) + if best: + # best + ax[i].scatter(index, y[index], color='r', label=f'best:{index}', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[index], 5)}') + else: + # last + ax[i].scatter(x[-1], y[-1], color='r', label='last', marker='*', linewidth=3) + ax[i].set_title(s[j] + f'\n{round(y[-1], 5)}') + # if j in [8, 9, 10]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print(f'Warning: Plotting error for {f}: {e}') + ax[1].legend() + fig.savefig(save_dir / 'results.png', dpi=200) + plt.close() diff --git a/src/train_utils/train_models/models/yolov5/utils/torch_utils.py b/src/train_utils/train_models/models/yolov5/utils/torch_utils.py new file mode 100644 index 0000000..5b67b3f --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/torch_utils.py @@ -0,0 +1,432 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +PyTorch utils +""" + +import math +import os +import platform +import subprocess +import time +import warnings +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.parallel import DistributedDataParallel as DDP + +from utils.general import LOGGER, check_version, colorstr, file_date, git_describe + +LOCAL_RANK = int(os.getenv('LOCAL_RANK', -1)) # https://pytorch.org/docs/stable/elastic/run.html +RANK = int(os.getenv('RANK', -1)) +WORLD_SIZE = int(os.getenv('WORLD_SIZE', 1)) + +try: + import thop # for FLOPs computation +except ImportError: + thop = None + +# Suppress PyTorch warnings +warnings.filterwarnings('ignore', message='User provided device_type of \'cuda\', but CUDA is not available. Disabling') +warnings.filterwarnings('ignore', category=UserWarning) + + +def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')): + # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator + def decorate(fn): + return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn) + + return decorate + + +def smartCrossEntropyLoss(label_smoothing=0.0): + # Returns nn.CrossEntropyLoss with label smoothing enabled for torch>=1.10.0 + if check_version(torch.__version__, '1.10.0'): + return nn.CrossEntropyLoss(label_smoothing=label_smoothing) + if label_smoothing > 0: + LOGGER.warning(f'WARNING ⚠️ label smoothing {label_smoothing} requires torch>=1.10.0') + return nn.CrossEntropyLoss() + + +def smart_DDP(model): + # Model DDP creation with checks + assert not check_version(torch.__version__, '1.12.0', pinned=True), \ + 'torch==1.12.0 torchvision==0.13.0 DDP training is not supported due to a known issue. ' \ + 'Please upgrade or downgrade torch to use DDP. See https://github.com/ultralytics/yolov5/issues/8395' + if check_version(torch.__version__, '1.11.0'): + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK, static_graph=True) + else: + return DDP(model, device_ids=[LOCAL_RANK], output_device=LOCAL_RANK) + + +def reshape_classifier_output(model, n=1000): + # Update a TorchVision classification model to class count 'n' if required + from models.common import Classify + name, m = list((model.model if hasattr(model, 'model') else model).named_children())[-1] # last module + if isinstance(m, Classify): # YOLOv5 Classify() head + if m.linear.out_features != n: + m.linear = nn.Linear(m.linear.in_features, n) + elif isinstance(m, nn.Linear): # ResNet, EfficientNet + if m.out_features != n: + setattr(model, name, nn.Linear(m.in_features, n)) + elif isinstance(m, nn.Sequential): + types = [type(x) for x in m] + if nn.Linear in types: + i = types.index(nn.Linear) # nn.Linear index + if m[i].out_features != n: + m[i] = nn.Linear(m[i].in_features, n) + elif nn.Conv2d in types: + i = types.index(nn.Conv2d) # nn.Conv2d index + if m[i].out_channels != n: + m[i] = nn.Conv2d(m[i].in_channels, n, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + # Decorator to make all processes in distributed training wait for each local_master to do something + if local_rank not in [-1, 0]: + dist.barrier(device_ids=[local_rank]) + yield + if local_rank == 0: + dist.barrier(device_ids=[0]) + + +def device_count(): + # Returns number of CUDA devices available. Safe version of torch.cuda.device_count(). Supports Linux and Windows + assert platform.system() in ('Linux', 'Windows'), 'device_count() only supported on Linux or Windows' + try: + cmd = 'nvidia-smi -L | wc -l' if platform.system() == 'Linux' else 'nvidia-smi -L | find /c /v ""' # Windows + return int(subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().split()[-1]) + except Exception: + return 0 + + +def select_device(device='', batch_size=0, newline=True): + # device = None or 'cpu' or 0 or '0' or '0,1,2,3' + s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} ' + device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0' + cpu = device == 'cpu' + mps = device == 'mps' # Apple Metal Performance Shaders (MPS) + if cpu or mps: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available() + assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \ + f"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)" + + if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available + devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + n = len(devices) # device count + if n > 1 and batch_size > 0: # check batch_size is divisible by device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * (len(s) + 1) + for i, d in enumerate(devices): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\n" # bytes to MB + arg = 'cuda:0' + elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available + s += 'MPS\n' + arg = 'mps' + else: # revert to CPU + s += 'CPU\n' + arg = 'cpu' + + if not newline: + s = s.rstrip() + LOGGER.info(s) + return torch.device(arg) + + +def time_sync(): + # PyTorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(input, ops, n=10, device=None): + """ YOLOv5 speed/memory/FLOPs profiler + Usage: + input = torch.randn(16, 3, 640, 640) + m1 = lambda x: x * torch.sigmoid(x) + m2 = nn.SiLU() + profile(input, [m1, m2], n=100) # profile over 100 iterations + """ + results = [] + if not isinstance(device, torch.device): + device = select_device(device) + print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}") + + for x in input if isinstance(input, list) else [input]: + x = x.to(device) + x.requires_grad = True + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + except Exception: + flops = 0 + + try: + for _ in range(n): + t[0] = time_sync() + y = m(x) + t[1] = time_sync() + try: + _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward() + t[2] = time_sync() + except Exception: # no backward method + # print(e) # for debug + t[2] = float('nan') + tf += (t[1] - t[0]) * 1000 / n # ms per op forward + tb += (t[2] - t[1]) * 1000 / n # ms per op backward + mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) + s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes + p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + results.append([p, flops, mem, tf, tb, s_in, s_out]) + except Exception as e: + print(e) + results.append(None) + torch.cuda.empty_cache() + return results + + +def is_parallel(model): + # Returns True if model is of type DP or DDP + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def de_parallel(model): + # De-parallelize a model: returns single-GPU model if model is of type DP or DDP + return model.module if is_parallel(model) else model + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0, 0 + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + LOGGER.info(f'Model pruned to {sparsity(model):.3g} global sparsity') + + +def fuse_conv_and_bn(conv, bn): + # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + dilation=conv.dilation, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # Prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # Prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, imgsz=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print(f"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}") + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPs + p = next(model.parameters()) + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride + im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format + flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs + imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float + fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs + except Exception: + fs = '' + + name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model' + LOGGER.info(f'{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}') + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # Scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w)) + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +def smart_optimizer(model, name='Adam', lr=0.001, momentum=0.9, decay=1e-5): + # YOLOv5 3-param group optimizer: 0) weights with decay, 1) weights no decay, 2) biases no decay + g = [], [], [] # optimizer parameter groups + bn = tuple(v for k, v in nn.__dict__.items() if 'Norm' in k) # normalization layers, i.e. BatchNorm2d() + for v in model.modules(): + for p_name, p in v.named_parameters(recurse=0): + if p_name == 'bias': # bias (no decay) + g[2].append(p) + elif p_name == 'weight' and isinstance(v, bn): # weight (no decay) + g[1].append(p) + else: + g[0].append(p) # weight (with decay) + + if name == 'Adam': + optimizer = torch.optim.Adam(g[2], lr=lr, betas=(momentum, 0.999)) # adjust beta1 to momentum + elif name == 'AdamW': + optimizer = torch.optim.AdamW(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0) + elif name == 'RMSProp': + optimizer = torch.optim.RMSprop(g[2], lr=lr, momentum=momentum) + elif name == 'SGD': + optimizer = torch.optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True) + else: + raise NotImplementedError(f'Optimizer {name} not implemented.') + + optimizer.add_param_group({'params': g[0], 'weight_decay': decay}) # add g0 with weight_decay + optimizer.add_param_group({'params': g[1], 'weight_decay': 0.0}) # add g1 (BatchNorm2d weights) + LOGGER.info(f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}) with parameter groups " + f'{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias') + return optimizer + + +def smart_hub_load(repo='ultralytics/yolov5', model='yolov5s', **kwargs): + # YOLOv5 torch.hub.load() wrapper with smart error/issue handling + if check_version(torch.__version__, '1.9.1'): + kwargs['skip_validation'] = True # validation causes GitHub API rate limit errors + if check_version(torch.__version__, '1.12.0'): + kwargs['trust_repo'] = True # argument required starting in torch 0.12 + try: + return torch.hub.load(repo, model, **kwargs) + except Exception: + return torch.hub.load(repo, model, force_reload=True, **kwargs) + + +def smart_resume(ckpt, optimizer, ema=None, weights='yolov5s.pt', epochs=300, resume=True): + # Resume training from a partially trained checkpoint + best_fitness = 0.0 + start_epoch = ckpt['epoch'] + 1 + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) # optimizer + best_fitness = ckpt['best_fitness'] + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) # EMA + ema.updates = ckpt['updates'] + if resume: + assert start_epoch > 0, f'{weights} training to {epochs} epochs is finished, nothing to resume.\n' \ + f"Start a new training without --resume, i.e. 'python train.py --weights {weights}'" + LOGGER.info(f'Resuming training from {weights} from epoch {start_epoch} to {epochs} total epochs') + if epochs < start_epoch: + LOGGER.info(f"{weights} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {epochs} more epochs.") + epochs += ckpt['epoch'] # finetune additional epochs + return best_fitness, start_epoch, epochs + + +class EarlyStopping: + # YOLOv5 simple early stopper + def __init__(self, patience=30): + self.best_fitness = 0.0 # i.e. mAP + self.best_epoch = 0 + self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.possible_stop = False # possible stop may occur next epoch + + def __call__(self, epoch, fitness): + if fitness >= self.best_fitness: # >= 0 to allow for early zero-fitness stage of training + self.best_epoch = epoch + self.best_fitness = fitness + delta = epoch - self.best_epoch # epochs without improvement + self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch + stop = delta >= self.patience # stop training if patience exceeded + if stop: + LOGGER.info(f'Stopping training early as no improvement observed in last {self.patience} epochs. ' + f'Best results observed at epoch {self.best_epoch}, best model saved as best.pt.\n' + f'To update EarlyStopping(patience={self.patience}) pass a new patience value, ' + f'i.e. `python train.py --patience 300` or use `--patience 0` to disable EarlyStopping.') + return stop + + +class ModelEMA: + """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models + Keeps a moving average of everything in the model state_dict (parameters and buffers) + For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + """ + + def __init__(self, model, decay=0.9999, tau=2000, updates=0): + # Create EMA + self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + self.updates += 1 + d = self.decay(self.updates) + + msd = de_parallel(model).state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: # true for FP16 and FP32 + v *= d + v += (1 - d) * msd[k].detach() + # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32' + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) diff --git a/src/train_utils/train_models/models/yolov5/utils/triton.py b/src/train_utils/train_models/models/yolov5/utils/triton.py new file mode 100644 index 0000000..2592802 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/utils/triton.py @@ -0,0 +1,85 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" Utils to interact with the Triton Inference Server +""" + +import typing +from urllib.parse import urlparse + +import torch + + +class TritonRemoteModel: + """ A wrapper over a model served by the Triton Inference Server. It can + be configured to communicate over GRPC or HTTP. It accepts Torch Tensors + as input and returns them as outputs. + """ + + def __init__(self, url: str): + """ + Keyword arguments: + url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000 + """ + + parsed_url = urlparse(url) + if parsed_url.scheme == 'grpc': + from tritonclient.grpc import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository.models[0].name + self.metadata = self.client.get_model_metadata(self.model_name, as_json=True) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] + + else: + from tritonclient.http import InferenceServerClient, InferInput + + self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client + model_repository = self.client.get_model_repository_index() + self.model_name = model_repository[0]['name'] + self.metadata = self.client.get_model_metadata(self.model_name) + + def create_input_placeholders() -> typing.List[InferInput]: + return [ + InferInput(i['name'], [int(s) for s in i['shape']], i['datatype']) for i in self.metadata['inputs']] + + self._create_input_placeholders_fn = create_input_placeholders + + @property + def runtime(self): + """Returns the model runtime""" + return self.metadata.get('backend', self.metadata.get('platform')) + + def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]: + """ Invokes the model. Parameters can be provided via args or kwargs. + args, if provided, are assumed to match the order of inputs of the model. + kwargs are matched with the model input names. + """ + inputs = self._create_inputs(*args, **kwargs) + response = self.client.infer(model_name=self.model_name, inputs=inputs) + result = [] + for output in self.metadata['outputs']: + tensor = torch.as_tensor(response.as_numpy(output['name'])) + result.append(tensor) + return result[0] if len(result) == 1 else result + + def _create_inputs(self, *args, **kwargs): + args_len, kwargs_len = len(args), len(kwargs) + if not args_len and not kwargs_len: + raise RuntimeError('No inputs provided.') + if args_len and kwargs_len: + raise RuntimeError('Cannot specify args and kwargs at the same time') + + placeholders = self._create_input_placeholders_fn() + if args_len: + if args_len != len(placeholders): + raise RuntimeError(f'Expected {len(placeholders)} inputs, got {args_len}.') + for input, value in zip(placeholders, args): + input.set_data_from_numpy(value.cpu().numpy()) + else: + for input in placeholders: + value = kwargs[input.name] + input.set_data_from_numpy(value.cpu().numpy()) + return placeholders diff --git a/src/train_utils/train_models/models/yolov5/val.py b/src/train_utils/train_models/models/yolov5/val.py new file mode 100644 index 0000000..d4073b4 --- /dev/null +++ b/src/train_utils/train_models/models/yolov5/val.py @@ -0,0 +1,409 @@ +# YOLOv5 🚀 by Ultralytics, GPL-3.0 license +""" +Validate a trained YOLOv5 detection model on a detection dataset + +Usage: + $ python val.py --weights yolov5s.pt --data coco128.yaml --img 640 + +Usage - formats: + $ python val.py --weights yolov5s.pt # PyTorch + yolov5s.torchscript # TorchScript + yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn + yolov5s_openvino_model # OpenVINO + yolov5s.engine # TensorRT + yolov5s.mlmodel # CoreML (macOS-only) + yolov5s_saved_model # TensorFlow SavedModel + yolov5s.pb # TensorFlow GraphDef + yolov5s.tflite # TensorFlow Lite + yolov5s_edgetpu.tflite # TensorFlow Edge TPU + yolov5s_paddle_model # PaddlePaddle +""" + +import argparse +import json +import os +import subprocess +import sys +from pathlib import Path + +import numpy as np +import torch +from tqdm import tqdm + +FILE = Path(__file__).resolve() +ROOT = FILE.parents[0] # YOLOv5 root directory +if str(ROOT) not in sys.path: + sys.path.append(str(ROOT)) # add ROOT to PATH +ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative + +from models.common import DetectMultiBackend +from utils.callbacks import Callbacks +from utils.dataloaders import create_dataloader +from utils.general import (LOGGER, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, + check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, + print_args, scale_boxes, xywh2xyxy, xyxy2xywh) +from utils.metrics import ConfusionMatrix, ap_per_class, box_iou +from utils.plots import output_to_target, plot_images, plot_val_study +from utils.torch_utils import select_device, smart_inference_mode + + +def save_one_txt(predn, save_conf, shape, file): + # Save one txt result + gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(file, 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + +def save_one_json(predn, jdict, path, class_map): + # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(predn.tolist(), box.tolist()): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + +def process_batch(detections, labels, iouv): + """ + Return correct prediction matrix + Arguments: + detections (array[N, 6]), x1, y1, x2, y2, conf, class + labels (array[M, 5]), class, x1, y1, x2, y2 + Returns: + correct (array[N, 10]), for 10 IoU levels + """ + correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) + iou = box_iou(labels[:, 1:], detections[:, :4]) + correct_class = labels[:, 0:1] == detections[:, 5] + for i in range(len(iouv)): + x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + # matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + correct[matches[:, 1].astype(int), i] = True + return torch.tensor(correct, dtype=torch.bool, device=iouv.device) + + +@smart_inference_mode() +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + max_det=300, # maximum detections per image + task='val', # train, val, test, speed or study + device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu + workers=8, # max dataloader workers (per RANK in DDP mode) + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / 'runs/val', # save to project/name + name='exp', # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + dnn=False, # use OpenCV DNN for ONNX inference + model=None, + dataloader=None, + save_dir=Path(''), + plots=True, + callbacks=Callbacks(), + compute_loss=None, +): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model + half &= device.type != 'cpu' # half precision only supported on CUDA + model.half() if half else model.float() + else: # called directly + device = select_device(device, batch_size=batch_size) + + # Directories + save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) + stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine + imgsz = check_img_size(imgsz, s=stride) # check image size + half = model.fp16 # FP16 supported on limited backends with CUDA + if engine: + batch_size = model.batch_size + else: + device = model.device + if not (pt or jit): + batch_size = 1 # export.py models default to batch-size 1 + LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') + + # Data + data = check_dataset(data) # check + + # Configure + model.eval() + cuda = device.type != 'cpu' + is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Dataloader + if not training: + if pt and not single_cls: # check --weights are trained on --data + ncm = model.model.nc + assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ + f'classes). Pass correct combination of --weights and --data that are trained together.' + model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup + pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks + task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], + imgsz, + batch_size, + stride, + single_cls, + pad=pad, + rect=rect, + workers=workers, + prefix=colorstr(f'{task}: '))[0] + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = model.names if hasattr(model, 'names') else model.module.names # get class names + if isinstance(names, (list, tuple)): # old format + names = dict(enumerate(names)) + class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) + s = ('%22s' + '%11s' * 6) % ('Class', 'Images', 'Instances', 'P', 'R', 'mAP50', 'mAP50-95') + tp, fp, p, r, f1, mp, mr, map50, ap50, map = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 + dt = Profile(), Profile(), Profile() # profiling times + loss = torch.zeros(3, device=device) + jdict, stats, ap, ap_class = [], [], [], [] + callbacks.run('on_val_start') + pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar + for batch_i, (im, targets, paths, shapes) in enumerate(pbar): + callbacks.run('on_val_batch_start') + with dt[0]: + if cuda: + im = im.to(device, non_blocking=True) + targets = targets.to(device) + im = im.half() if half else im.float() # uint8 to fp16/32 + im /= 255 # 0 - 255 to 0.0 - 1.0 + nb, _, height, width = im.shape # batch size, channels, height, width + + # Inference + with dt[1]: + preds, train_out = model(im) if compute_loss else (model(im, augment=augment), None) + + # Loss + if compute_loss: + loss += compute_loss(train_out, targets)[1] # box, obj, cls + + # NMS + targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + with dt[2]: + preds = non_max_suppression(preds, + conf_thres, + iou_thres, + labels=lb, + multi_label=True, + agnostic=single_cls, + max_det=max_det) + + # Metrics + for si, pred in enumerate(preds): + labels = targets[targets[:, 0] == si, 1:] + nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions + path, shape = Path(paths[si]), shapes[si][0] + correct = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init + seen += 1 + + if npr == 0: + if nl: + stats.append((correct, *torch.zeros((2, 0), device=device), labels[:, 0])) + if plots: + confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) + continue + + # Predictions + if single_cls: + pred[:, 5] = 0 + predn = pred.clone() + scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred + + # Evaluate + if nl: + tbox = xywh2xyxy(labels[:, 1:5]) # target boxes + scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels + labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels + correct = process_batch(predn, labelsn, iouv) + if plots: + confusion_matrix.process_batch(predn, labelsn) + stats.append((correct, pred[:, 4], pred[:, 5], labels[:, 0])) # (correct, conf, pcls, tcls) + + # Save/log + if save_txt: + save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') + if save_json: + save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary + callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) + + # Plot images + if plots and batch_i < 3: + plot_images(im, targets, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) # labels + plot_images(im, output_to_target(preds), paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred + + callbacks.run('on_val_batch_end', batch_i, im, targets, paths, shapes, preds) + + # Compute metrics + stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + tp, fp, p, r, f1, ap, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) + ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() + nt = np.bincount(stats[3].astype(int), minlength=nc) # number of targets per class + + # Print results + pf = '%22s' + '%11i' * 2 + '%11.3g' * 4 # print format + LOGGER.info(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + if nt.sum() == 0: + LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(ap_class): + LOGGER.info(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + + # Print speeds + t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image + if not training: + shape = (batch_size, 3, imgsz, imgsz) + LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + callbacks.run('on_val_end', nt, tp, fp, p, r, f1, ap, ap50, ap_class, confusion_matrix) + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = str(Path('../datasets/coco/annotations/instances_val2017.json')) # annotations + pred_json = str(save_dir / f'{w}_predictions.json') # predictions + LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + check_requirements('pycocotools>=2.0.6') + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + eval = COCOeval(anno, pred, 'bbox') + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # image IDs to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) + except Exception as e: + LOGGER.info(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") + maps = np.zeros(nc) + map + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t + + +def parse_opt(): + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') + parser.add_argument('--batch-size', type=int, default=32, help='batch size') + parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') + parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') + parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') + opt = parser.parse_args() + opt.data = check_yaml(opt.data) # check YAML + opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_txt |= opt.save_hybrid + print_args(vars(opt)) + return opt + + +def main(opt): + check_requirements(exclude=('tensorboard', 'thop')) + + if opt.task in ('train', 'val', 'test'): # run normally + if opt.conf_thres > 0.001: # https://github.com/ultralytics/yolov5/issues/1466 + LOGGER.info(f'WARNING ⚠️ confidence threshold {opt.conf_thres} > 0.001 produces invalid results') + if opt.save_hybrid: + LOGGER.info('WARNING ⚠️ --save-hybrid will return high mAP from hybrid labels, not from predictions alone') + run(**vars(opt)) + + else: + weights = opt.weights if isinstance(opt.weights, list) else [opt.weights] + opt.half = torch.cuda.is_available() and opt.device != 'cpu' # FP16 for fastest results + if opt.task == 'speed': # speed benchmarks + # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... + opt.conf_thres, opt.iou_thres, opt.save_json = 0.25, 0.45, False + for opt.weights in weights: + run(**vars(opt), plots=False) + + elif opt.task == 'study': # speed vs mAP benchmarks + # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... + for opt.weights in weights: + f = f'study_{Path(opt.data).stem}_{Path(opt.weights).stem}.txt' # filename to save to + x, y = list(range(256, 1536 + 128, 128)), [] # x axis (image sizes), y axis + for opt.imgsz in x: # img-size + LOGGER.info(f'\nRunning {f} --imgsz {opt.imgsz}...') + r, _, t = run(**vars(opt), plots=False) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + subprocess.run(['zip', '-r', 'study.zip', 'study_*.txt']) + plot_val_study(x=x) # plot + else: + raise NotImplementedError(f'--task {opt.task} not in ("train", "val", "test", "speed", "study")') + + +if __name__ == '__main__': + opt = parse_opt() + main(opt) diff --git a/src/train_utils/train_models/models/yolov7/.gitignore b/src/train_utils/train_models/models/yolov7/.gitignore new file mode 100644 index 0000000..d1bbbbe --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/.gitignore @@ -0,0 +1,263 @@ +# Repo-specific GitIgnore ---------------------------------------------------------------------------------------------- +*.jpg +*.jpeg +*.png +*.bmp +*.tif +*.tiff +*.heic +*.JPG +*.JPEG +*.PNG +*.BMP +*.TIF +*.TIFF +*.HEIC +*.mp4 +*.mov +*.MOV +*.avi +*.data +*.json +*.cfg +!setup.cfg +!cfg/yolov3*.cfg + +storage.googleapis.com +runs/* +data/* +data/images/* +!data/*.yaml +!data/hyps +!data/scripts +!data/images +!data/images/zidane.jpg +!data/images/bus.jpg +!data/*.sh + +results*.csv + +# Datasets ------------------------------------------------------------------------------------------------------------- +coco/ +coco128/ +VOC/ + +coco2017labels-segments.zip +test2017.zip +train2017.zip +val2017.zip + +# MATLAB GitIgnore ----------------------------------------------------------------------------------------------------- +*.m~ +*.mat +!targets*.mat + +# Neural Network weights ----------------------------------------------------------------------------------------------- +*.weights +*.pt +*.pb +*.onnx +*.engine +*.mlmodel +*.torchscript +*.tflite +*.h5 +*_saved_model/ +*_web_model/ +*_openvino_model/ +darknet53.conv.74 +yolov3-tiny.conv.15 +*.ptl +*.trt + +# GitHub Python GitIgnore ---------------------------------------------------------------------------------------------- +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +/wandb/ +.installed.cfg +*.egg + + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# SageMath parsed files +*.sage.py + +# dotenv +.env + +# virtualenv +.venv* +venv*/ +ENV*/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ + + +# https://github.com/github/gitignore/blob/master/Global/macOS.gitignore ----------------------------------------------- + +# General +.DS_Store +.AppleDouble +.LSOverride + +# Icon must end with two \r +Icon +Icon? + +# Thumbnails +._* + +# Files that might appear in the root of a volume +.DocumentRevisions-V100 +.fseventsd +.Spotlight-V100 +.TemporaryItems +.Trashes +.VolumeIcon.icns +.com.apple.timemachine.donotpresent + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + + +# https://github.com/github/gitignore/blob/master/Global/JetBrains.gitignore +# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm +# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 + +# User-specific stuff: +.idea/* +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/dictionaries +.html # Bokeh Plots +.pg # TensorFlow Frozen Graphs +.avi # videos + +# Sensitive or high-churn files: +.idea/**/dataSources/ +.idea/**/dataSources.ids +.idea/**/dataSources.local.xml +.idea/**/sqlDataSources.xml +.idea/**/dynamic.xml +.idea/**/uiDesigner.xml + +# Gradle: +.idea/**/gradle.xml +.idea/**/libraries + +# CMake +cmake-build-debug/ +cmake-build-release/ + +# Mongo Explorer plugin: +.idea/**/mongoSettings.xml + +## File-based project format: +*.iws + +## Plugin-specific files: + +# IntelliJ +out/ + +# mpeltonen/sbt-idea plugin +.idea_modules/ + +# JIRA plugin +atlassian-ide-plugin.xml + +# Cursive Clojure plugin +.idea/replstate.xml + +# Crashlytics plugin (for Android Studio and IntelliJ) +com_crashlytics_export_strings.xml +crashlytics.properties +crashlytics-build.properties +fabric.properties diff --git a/src/train_utils/train_models/models/yolov7/LICENSE.md b/src/train_utils/train_models/models/yolov7/LICENSE.md new file mode 100644 index 0000000..f288702 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/LICENSE.md @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/src/train_utils/train_models/models/yolov7/README.md b/src/train_utils/train_models/models/yolov7/README.md new file mode 100644 index 0000000..b108f43 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/README.md @@ -0,0 +1,288 @@ +# Official YOLOv7 + +Implementation of paper - [YOLOv7: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors](https://arxiv.org/abs/2207.02696) + +[![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/yolov7-trainable-bag-of-freebies-sets-new/real-time-object-detection-on-coco)](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=yolov7-trainable-bag-of-freebies-sets-new) +[![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/yolov7) +Open In Colab +[![arxiv.org](http://img.shields.io/badge/cs.CV-arXiv%3A2207.02696-B31B1B.svg)](https://arxiv.org/abs/2207.02696) + + + +## Web Demo + +- Integrated into [Huggingface Spaces 🤗](https://huggingface.co/spaces/akhaliq/yolov7) using Gradio. Try out the Web Demo [![Hugging Face Spaces](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/akhaliq/yolov7) + +## Performance + +MS COCO + +| Model | Test Size | APtest | AP50test | AP75test | batch 1 fps | batch 32 average time | +| :-- | :-: | :-: | :-: | :-: | :-: | :-: | +| [**YOLOv7**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt) | 640 | **51.4%** | **69.7%** | **55.9%** | 161 *fps* | 2.8 *ms* | +| [**YOLOv7-X**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x.pt) | 640 | **53.1%** | **71.2%** | **57.8%** | 114 *fps* | 4.3 *ms* | +| | | | | | | | +| [**YOLOv7-W6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6.pt) | 1280 | **54.9%** | **72.6%** | **60.1%** | 84 *fps* | 7.6 *ms* | +| [**YOLOv7-E6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt) | 1280 | **56.0%** | **73.5%** | **61.2%** | 56 *fps* | 12.3 *ms* | +| [**YOLOv7-D6**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6.pt) | 1280 | **56.6%** | **74.0%** | **61.8%** | 44 *fps* | 15.0 *ms* | +| [**YOLOv7-E6E**](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e.pt) | 1280 | **56.8%** | **74.4%** | **62.1%** | 36 *fps* | 18.7 *ms* | + +## Installation + +Docker environment (recommended) +
Expand + +``` shell +# create the docker container, you can change the share memory size if you have more. +nvidia-docker run --name yolov7 -it -v your_coco_path/:/coco/ -v your_code_path/:/yolov7 --shm-size=64g nvcr.io/nvidia/pytorch:21.08-py3 + +# apt install required packages +apt update +apt install -y zip htop screen libgl1-mesa-glx + +# pip install required packages +pip install seaborn thop + +# go to code folder +cd /yolov7 +``` + +
+ +## Testing + +[`yolov7.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7.pt) [`yolov7x.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x.pt) [`yolov7-w6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6.pt) [`yolov7-e6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6.pt) [`yolov7-d6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6.pt) [`yolov7-e6e.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e.pt) + +``` shell +python test.py --data data/coco.yaml --img 640 --batch 32 --conf 0.001 --iou 0.65 --device 0 --weights yolov7.pt --name yolov7_640_val +``` + +You will get the results: + +``` + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.51206 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.69730 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.55521 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.35247 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.55937 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.66693 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.38453 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.63765 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.68772 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.53766 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.73549 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.83868 +``` + +To measure accuracy, download [COCO-annotations for Pycocotools](http://images.cocodataset.org/annotations/annotations_trainval2017.zip) to the `./coco/annotations/instances_val2017.json` + +## Training + +Data preparation + +``` shell +bash scripts/get_coco.sh +``` + +* Download MS COCO dataset images ([train](http://images.cocodataset.org/zips/train2017.zip), [val](http://images.cocodataset.org/zips/val2017.zip), [test](http://images.cocodataset.org/zips/test2017.zip)) and [labels](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/coco2017labels-segments.zip). If you have previously used a different version of YOLO, we strongly recommend that you delete `train2017.cache` and `val2017.cache` files, and redownload [labels](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/coco2017labels-segments.zip) + +Single GPU training + +``` shell +# train p5 models +python train.py --workers 8 --device 0 --batch-size 32 --data data/coco.yaml --img 640 640 --cfg cfg/training/yolov7.yaml --weights '' --name yolov7 --hyp data/hyp.scratch.p5.yaml + +# train p6 models +python train_aux.py --workers 8 --device 0 --batch-size 16 --data data/coco.yaml --img 1280 1280 --cfg cfg/training/yolov7-w6.yaml --weights '' --name yolov7-w6 --hyp data/hyp.scratch.p6.yaml +``` + +Multiple GPU training + +``` shell +# train p5 models +python -m torch.distributed.launch --nproc_per_node 4 --master_port 9527 train.py --workers 8 --device 0,1,2,3 --sync-bn --batch-size 128 --data data/coco.yaml --img 640 640 --cfg cfg/training/yolov7.yaml --weights '' --name yolov7 --hyp data/hyp.scratch.p5.yaml + +# train p6 models +python -m torch.distributed.launch --nproc_per_node 8 --master_port 9527 train_aux.py --workers 8 --device 0,1,2,3,4,5,6,7 --sync-bn --batch-size 128 --data data/coco.yaml --img 1280 1280 --cfg cfg/training/yolov7-w6.yaml --weights '' --name yolov7-w6 --hyp data/hyp.scratch.p6.yaml +``` + +## Transfer learning + +[`yolov7_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7_training.pt) [`yolov7x_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7x_training.pt) [`yolov7-w6_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6_training.pt) [`yolov7-e6_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6_training.pt) [`yolov7-d6_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-d6_training.pt) [`yolov7-e6e_training.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-e6e_training.pt) + +Single GPU finetuning for custom dataset + +``` shell +# finetune p5 models +python train.py --workers 8 --device 0 --batch-size 32 --data data/custom.yaml --img 640 640 --cfg cfg/training/yolov7-custom.yaml --weights 'yolov7_training.pt' --name yolov7-custom --hyp data/hyp.scratch.custom.yaml + +# finetune p6 models +python train_aux.py --workers 8 --device 0 --batch-size 16 --data data/custom.yaml --img 1280 1280 --cfg cfg/training/yolov7-w6-custom.yaml --weights 'yolov7-w6_training.pt' --name yolov7-w6-custom --hyp data/hyp.scratch.custom.yaml +``` + +## Re-parameterization + +See [reparameterization.ipynb](tools/reparameterization.ipynb) + +## Inference + +On video: +``` shell +python detect.py --weights yolov7.pt --conf 0.25 --img-size 640 --source yourvideo.mp4 +``` + +On image: +``` shell +python detect.py --weights yolov7.pt --conf 0.25 --img-size 640 --source inference/images/horses.jpg +``` + + + + +## Export + +**Pytorch to CoreML (and inference on MacOS/iOS)** Open In Colab + +**Pytorch to ONNX with NMS (and inference)** Open In Colab +```shell +python export.py --weights yolov7-tiny.pt --grid --end2end --simplify \ + --topk-all 100 --iou-thres 0.65 --conf-thres 0.35 --img-size 640 640 --max-wh 640 +``` + +**Pytorch to TensorRT with NMS (and inference)** Open In Colab + +```shell +wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-tiny.pt +python export.py --weights ./yolov7-tiny.pt --grid --end2end --simplify --topk-all 100 --iou-thres 0.65 --conf-thres 0.35 --img-size 640 640 +git clone https://github.com/Linaom1214/tensorrt-python.git +python ./tensorrt-python/export.py -o yolov7-tiny.onnx -e yolov7-tiny-nms.trt -p fp16 +``` + +**Pytorch to TensorRT another way** Open In Colab
Expand + + +```shell +wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-tiny.pt +python export.py --weights yolov7-tiny.pt --grid --include-nms +git clone https://github.com/Linaom1214/tensorrt-python.git +python ./tensorrt-python/export.py -o yolov7-tiny.onnx -e yolov7-tiny-nms.trt -p fp16 + +# Or use trtexec to convert ONNX to TensorRT engine +/usr/src/tensorrt/bin/trtexec --onnx=yolov7-tiny.onnx --saveEngine=yolov7-tiny-nms.trt --fp16 +``` + +
+ +Tested with: Python 3.7.13, Pytorch 1.12.0+cu113 + +## Pose estimation + +[`code`](https://github.com/WongKinYiu/yolov7/tree/pose) [`yolov7-w6-pose.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-w6-pose.pt) + +See [keypoint.ipynb](https://github.com/WongKinYiu/yolov7/blob/main/tools/keypoint.ipynb). + + + + +## Instance segmentation + +[`code`](https://github.com/WongKinYiu/yolov7/tree/mask) [`yolov7-mask.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-mask.pt) + +See [instance.ipynb](https://github.com/WongKinYiu/yolov7/blob/main/tools/instance.ipynb). + + + +## Instance segmentation + +[`code`](https://github.com/WongKinYiu/yolov7/tree/u7/seg) [`yolov7-seg.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-seg.pt) + +YOLOv7 for instance segmentation (YOLOR + YOLOv5 + YOLACT) + +| Model | Test Size | APbox | AP50box | AP75box | APmask | AP50mask | AP75mask | +| :-- | :-: | :-: | :-: | :-: | :-: | :-: | :-: | +| **YOLOv7-seg** | 640 | **51.4%** | **69.4%** | **55.8%** | **41.5%** | **65.5%** | **43.7%** | + +## Anchor free detection head + +[`code`](https://github.com/WongKinYiu/yolov7/tree/u6) [`yolov7-u6.pt`](https://github.com/WongKinYiu/yolov7/releases/download/v0.1/yolov7-u6.pt) + +YOLOv7 with decoupled TAL head (YOLOR + YOLOv5 + YOLOv6) + +| Model | Test Size | APval | AP50val | AP75val | +| :-- | :-: | :-: | :-: | :-: | +| **YOLOv7-u6** | 640 | **52.6%** | **69.7%** | **57.3%** | + + +## Citation + +``` +@article{wang2022yolov7, + title={{YOLOv7}: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors}, + author={Wang, Chien-Yao and Bochkovskiy, Alexey and Liao, Hong-Yuan Mark}, + journal={arXiv preprint arXiv:2207.02696}, + year={2022} +} +``` + +``` +@article{wang2022designing, + title={Designing Network Design Strategies Through Gradient Path Analysis}, + author={Wang, Chien-Yao and Liao, Hong-Yuan Mark and Yeh, I-Hau}, + journal={arXiv preprint arXiv:2211.04800}, + year={2022} +} +``` + + +## Teaser + +Yolov7-semantic & YOLOv7-panoptic & YOLOv7-caption + + + + +## Acknowledgements + +
Expand + +* [https://github.com/AlexeyAB/darknet](https://github.com/AlexeyAB/darknet) +* [https://github.com/WongKinYiu/yolor](https://github.com/WongKinYiu/yolor) +* [https://github.com/WongKinYiu/PyTorch_YOLOv4](https://github.com/WongKinYiu/PyTorch_YOLOv4) +* [https://github.com/WongKinYiu/ScaledYOLOv4](https://github.com/WongKinYiu/ScaledYOLOv4) +* [https://github.com/Megvii-BaseDetection/YOLOX](https://github.com/Megvii-BaseDetection/YOLOX) +* [https://github.com/ultralytics/yolov3](https://github.com/ultralytics/yolov3) +* [https://github.com/ultralytics/yolov5](https://github.com/ultralytics/yolov5) +* [https://github.com/DingXiaoH/RepVGG](https://github.com/DingXiaoH/RepVGG) +* [https://github.com/JUGGHM/OREPA_CVPR2022](https://github.com/JUGGHM/OREPA_CVPR2022) +* [https://github.com/TexasInstruments/edgeai-yolov5/tree/yolo-pose](https://github.com/TexasInstruments/edgeai-yolov5/tree/yolo-pose) + +
diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/r50-csp.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/r50-csp.yaml new file mode 100644 index 0000000..94559f7 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/r50-csp.yaml @@ -0,0 +1,49 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [12,16, 19,36, 40,28] # P3/8 + - [36,75, 76,55, 72,146] # P4/16 + - [142,110, 192,243, 459,401] # P5/32 + +# CSP-ResNet backbone +backbone: + # [from, number, module, args] + [[-1, 1, Stem, [128]], # 0-P1/2 + [-1, 3, ResCSPC, [128]], + [-1, 1, Conv, [256, 3, 2]], # 2-P3/8 + [-1, 4, ResCSPC, [256]], + [-1, 1, Conv, [512, 3, 2]], # 4-P3/8 + [-1, 6, ResCSPC, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 6-P3/8 + [-1, 3, ResCSPC, [1024]], # 7 + ] + +# CSP-Res-PAN head +head: + [[-1, 1, SPPCSPC, [512]], # 8 + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [5, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + [-1, 2, ResCSPB, [256]], # 13 + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [3, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + [-1, 2, ResCSPB, [128]], # 18 + [-1, 1, Conv, [256, 3, 1]], + [-2, 1, Conv, [256, 3, 2]], + [[-1, 13], 1, Concat, [1]], # cat + [-1, 2, ResCSPB, [256]], # 22 + [-1, 1, Conv, [512, 3, 1]], + [-2, 1, Conv, [512, 3, 2]], + [[-1, 8], 1, Concat, [1]], # cat + [-1, 2, ResCSPB, [512]], # 26 + [-1, 1, Conv, [1024, 3, 1]], + + [[19,23,27], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/x50-csp.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/x50-csp.yaml new file mode 100644 index 0000000..8de14f8 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/x50-csp.yaml @@ -0,0 +1,49 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [12,16, 19,36, 40,28] # P3/8 + - [36,75, 76,55, 72,146] # P4/16 + - [142,110, 192,243, 459,401] # P5/32 + +# CSP-ResNeXt backbone +backbone: + # [from, number, module, args] + [[-1, 1, Stem, [128]], # 0-P1/2 + [-1, 3, ResXCSPC, [128]], + [-1, 1, Conv, [256, 3, 2]], # 2-P3/8 + [-1, 4, ResXCSPC, [256]], + [-1, 1, Conv, [512, 3, 2]], # 4-P3/8 + [-1, 6, ResXCSPC, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 6-P3/8 + [-1, 3, ResXCSPC, [1024]], # 7 + ] + +# CSP-ResX-PAN head +head: + [[-1, 1, SPPCSPC, [512]], # 8 + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [5, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + [-1, 2, ResXCSPB, [256]], # 13 + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [3, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + [-1, 2, ResXCSPB, [128]], # 18 + [-1, 1, Conv, [256, 3, 1]], + [-2, 1, Conv, [256, 3, 2]], + [[-1, 13], 1, Concat, [1]], # cat + [-1, 2, ResXCSPB, [256]], # 22 + [-1, 1, Conv, [512, 3, 1]], + [-2, 1, Conv, [512, 3, 2]], + [[-1, 8], 1, Concat, [1]], # cat + [-1, 2, ResXCSPB, [512]], # 26 + [-1, 1, Conv, [1024, 3, 1]], + + [[19,23,27], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-csp-x.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-csp-x.yaml new file mode 100644 index 0000000..6e234c5 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-csp-x.yaml @@ -0,0 +1,52 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.33 # model depth multiple +width_multiple: 1.25 # layer channel multiple + +# anchors +anchors: + - [12,16, 19,36, 40,28] # P3/8 + - [36,75, 76,55, 72,146] # P4/16 + - [142,110, 192,243, 459,401] # P5/32 + +# CSP-Darknet backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, BottleneckCSPC, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, BottleneckCSPC, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, BottleneckCSPC, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, BottleneckCSPC, [1024]], # 10 + ] + +# CSP-Dark-PAN head +head: + [[-1, 1, SPPCSPC, [512]], # 11 + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [8, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + [-1, 2, BottleneckCSPB, [256]], # 16 + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [6, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + [-1, 2, BottleneckCSPB, [128]], # 21 + [-1, 1, Conv, [256, 3, 1]], + [-2, 1, Conv, [256, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat + [-1, 2, BottleneckCSPB, [256]], # 25 + [-1, 1, Conv, [512, 3, 1]], + [-2, 1, Conv, [512, 3, 2]], + [[-1, 11], 1, Concat, [1]], # cat + [-1, 2, BottleneckCSPB, [512]], # 29 + [-1, 1, Conv, [1024, 3, 1]], + + [[22,26,30], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-csp.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-csp.yaml new file mode 100644 index 0000000..3beecf3 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-csp.yaml @@ -0,0 +1,52 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [12,16, 19,36, 40,28] # P3/8 + - [36,75, 76,55, 72,146] # P4/16 + - [142,110, 192,243, 459,401] # P5/32 + +# CSP-Darknet backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, BottleneckCSPC, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, BottleneckCSPC, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, BottleneckCSPC, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, BottleneckCSPC, [1024]], # 10 + ] + +# CSP-Dark-PAN head +head: + [[-1, 1, SPPCSPC, [512]], # 11 + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [8, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + [-1, 2, BottleneckCSPB, [256]], # 16 + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [6, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + [-1, 2, BottleneckCSPB, [128]], # 21 + [-1, 1, Conv, [256, 3, 1]], + [-2, 1, Conv, [256, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat + [-1, 2, BottleneckCSPB, [256]], # 25 + [-1, 1, Conv, [512, 3, 1]], + [-2, 1, Conv, [512, 3, 2]], + [[-1, 11], 1, Concat, [1]], # cat + [-1, 2, BottleneckCSPB, [512]], # 29 + [-1, 1, Conv, [1024, 3, 1]], + + [[22,26,30], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-d6.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-d6.yaml new file mode 100644 index 0000000..297b0d1 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-d6.yaml @@ -0,0 +1,63 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # expand model depth +width_multiple: 1.25 # expand layer channels + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# CSP-Darknet backbone +backbone: + # [from, number, module, args] + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [64, 3, 1]], # 1-P1/2 + [-1, 1, DownC, [128]], # 2-P2/4 + [-1, 3, BottleneckCSPA, [128]], + [-1, 1, DownC, [256]], # 4-P3/8 + [-1, 15, BottleneckCSPA, [256]], + [-1, 1, DownC, [512]], # 6-P4/16 + [-1, 15, BottleneckCSPA, [512]], + [-1, 1, DownC, [768]], # 8-P5/32 + [-1, 7, BottleneckCSPA, [768]], + [-1, 1, DownC, [1024]], # 10-P6/64 + [-1, 7, BottleneckCSPA, [1024]], # 11 + ] + +# CSP-Dark-PAN head +head: + [[-1, 1, SPPCSPC, [512]], # 12 + [-1, 1, Conv, [384, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-6, 1, Conv, [384, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [384]], # 17 + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-13, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [256]], # 22 + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-20, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [128]], # 27 + [-1, 1, Conv, [256, 3, 1]], + [-2, 1, DownC, [256]], + [[-1, 22], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [256]], # 31 + [-1, 1, Conv, [512, 3, 1]], + [-2, 1, DownC, [384]], + [[-1, 17], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [384]], # 35 + [-1, 1, Conv, [768, 3, 1]], + [-2, 1, DownC, [512]], + [[-1, 12], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [512]], # 39 + [-1, 1, Conv, [1024, 3, 1]], + + [[28,32,36,40], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-e6.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-e6.yaml new file mode 100644 index 0000000..58afc5b --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-e6.yaml @@ -0,0 +1,63 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # expand model depth +width_multiple: 1.25 # expand layer channels + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# CSP-Darknet backbone +backbone: + # [from, number, module, args] + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [64, 3, 1]], # 1-P1/2 + [-1, 1, DownC, [128]], # 2-P2/4 + [-1, 3, BottleneckCSPA, [128]], + [-1, 1, DownC, [256]], # 4-P3/8 + [-1, 7, BottleneckCSPA, [256]], + [-1, 1, DownC, [512]], # 6-P4/16 + [-1, 7, BottleneckCSPA, [512]], + [-1, 1, DownC, [768]], # 8-P5/32 + [-1, 3, BottleneckCSPA, [768]], + [-1, 1, DownC, [1024]], # 10-P6/64 + [-1, 3, BottleneckCSPA, [1024]], # 11 + ] + +# CSP-Dark-PAN head +head: + [[-1, 1, SPPCSPC, [512]], # 12 + [-1, 1, Conv, [384, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-6, 1, Conv, [384, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [384]], # 17 + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-13, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [256]], # 22 + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-20, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [128]], # 27 + [-1, 1, Conv, [256, 3, 1]], + [-2, 1, DownC, [256]], + [[-1, 22], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [256]], # 31 + [-1, 1, Conv, [512, 3, 1]], + [-2, 1, DownC, [384]], + [[-1, 17], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [384]], # 35 + [-1, 1, Conv, [768, 3, 1]], + [-2, 1, DownC, [512]], + [[-1, 12], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [512]], # 39 + [-1, 1, Conv, [1024, 3, 1]], + + [[28,32,36,40], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-p6.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-p6.yaml new file mode 100644 index 0000000..924cf5c --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-p6.yaml @@ -0,0 +1,63 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # expand model depth +width_multiple: 1.0 # expand layer channels + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# CSP-Darknet backbone +backbone: + # [from, number, module, args] + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [64, 3, 1]], # 1-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 2-P2/4 + [-1, 3, BottleneckCSPA, [128]], + [-1, 1, Conv, [256, 3, 2]], # 4-P3/8 + [-1, 7, BottleneckCSPA, [256]], + [-1, 1, Conv, [384, 3, 2]], # 6-P4/16 + [-1, 7, BottleneckCSPA, [384]], + [-1, 1, Conv, [512, 3, 2]], # 8-P5/32 + [-1, 3, BottleneckCSPA, [512]], + [-1, 1, Conv, [640, 3, 2]], # 10-P6/64 + [-1, 3, BottleneckCSPA, [640]], # 11 + ] + +# CSP-Dark-PAN head +head: + [[-1, 1, SPPCSPC, [320]], # 12 + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-6, 1, Conv, [256, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [256]], # 17 + [-1, 1, Conv, [192, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-13, 1, Conv, [192, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [192]], # 22 + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-20, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [128]], # 27 + [-1, 1, Conv, [256, 3, 1]], + [-2, 1, Conv, [192, 3, 2]], + [[-1, 22], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [192]], # 31 + [-1, 1, Conv, [384, 3, 1]], + [-2, 1, Conv, [256, 3, 2]], + [[-1, 17], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [256]], # 35 + [-1, 1, Conv, [512, 3, 1]], + [-2, 1, Conv, [320, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [320]], # 39 + [-1, 1, Conv, [640, 3, 1]], + + [[28,32,36,40], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-w6.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-w6.yaml new file mode 100644 index 0000000..a2fc969 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolor-w6.yaml @@ -0,0 +1,63 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # expand model depth +width_multiple: 1.0 # expand layer channels + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# CSP-Darknet backbone +backbone: + # [from, number, module, args] + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [64, 3, 1]], # 1-P1/2 + [-1, 1, Conv, [128, 3, 2]], # 2-P2/4 + [-1, 3, BottleneckCSPA, [128]], + [-1, 1, Conv, [256, 3, 2]], # 4-P3/8 + [-1, 7, BottleneckCSPA, [256]], + [-1, 1, Conv, [512, 3, 2]], # 6-P4/16 + [-1, 7, BottleneckCSPA, [512]], + [-1, 1, Conv, [768, 3, 2]], # 8-P5/32 + [-1, 3, BottleneckCSPA, [768]], + [-1, 1, Conv, [1024, 3, 2]], # 10-P6/64 + [-1, 3, BottleneckCSPA, [1024]], # 11 + ] + +# CSP-Dark-PAN head +head: + [[-1, 1, SPPCSPC, [512]], # 12 + [-1, 1, Conv, [384, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-6, 1, Conv, [384, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [384]], # 17 + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-13, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [256]], # 22 + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [-20, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + [-1, 3, BottleneckCSPB, [128]], # 27 + [-1, 1, Conv, [256, 3, 1]], + [-2, 1, Conv, [256, 3, 2]], + [[-1, 22], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [256]], # 31 + [-1, 1, Conv, [512, 3, 1]], + [-2, 1, Conv, [384, 3, 2]], + [[-1, 17], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [384]], # 35 + [-1, 1, Conv, [768, 3, 1]], + [-2, 1, Conv, [512, 3, 2]], + [[-1, 12], 1, Concat, [1]], # cat + [-1, 3, BottleneckCSPB, [512]], # 39 + [-1, 1, Conv, [1024, 3, 1]], + + [[28,32,36,40], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/yolov3-spp.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolov3-spp.yaml new file mode 100644 index 0000000..38dcc44 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolov3-spp.yaml @@ -0,0 +1,51 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3-SPP head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, SPP, [512, [5, 9, 13]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/yolov3.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolov3.yaml new file mode 100644 index 0000000..f2e7613 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolov3.yaml @@ -0,0 +1,51 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# darknet53 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, Bottleneck, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, Bottleneck, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, Bottleneck, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, Bottleneck, [1024]], # 10 + ] + +# YOLOv3 head +head: + [[-1, 1, Bottleneck, [1024, False]], + [-1, 1, Conv, [512, [1, 1]]], + [-1, 1, Conv, [1024, 3, 1]], + [-1, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [1024, 3, 1]], # 15 (P5/32-large) + + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 8], 1, Concat, [1]], # cat backbone P4 + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Bottleneck, [512, False]], + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], # 22 (P4/16-medium) + + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [[-1, 6], 1, Concat, [1]], # cat backbone P3 + [-1, 1, Bottleneck, [256, False]], + [-1, 2, Bottleneck, [256, False]], # 27 (P3/8-small) + + [[27, 22, 15], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/baseline/yolov4-csp.yaml b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolov4-csp.yaml new file mode 100644 index 0000000..3c908c7 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/baseline/yolov4-csp.yaml @@ -0,0 +1,52 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [12,16, 19,36, 40,28] # P3/8 + - [36,75, 76,55, 72,146] # P4/16 + - [142,110, 192,243, 459,401] # P5/32 + +# CSP-Darknet backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Bottleneck, [64]], + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 2, BottleneckCSPC, [128]], + [-1, 1, Conv, [256, 3, 2]], # 5-P3/8 + [-1, 8, BottleneckCSPC, [256]], + [-1, 1, Conv, [512, 3, 2]], # 7-P4/16 + [-1, 8, BottleneckCSPC, [512]], + [-1, 1, Conv, [1024, 3, 2]], # 9-P5/32 + [-1, 4, BottleneckCSPC, [1024]], # 10 + ] + +# CSP-Dark-PAN head +head: + [[-1, 1, SPPCSPC, [512]], # 11 + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [8, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + [-1, 2, BottleneckCSPB, [256]], # 16 + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [6, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + [-1, 2, BottleneckCSPB, [128]], # 21 + [-1, 1, Conv, [256, 3, 1]], + [-2, 1, Conv, [256, 3, 2]], + [[-1, 16], 1, Concat, [1]], # cat + [-1, 2, BottleneckCSPB, [256]], # 25 + [-1, 1, Conv, [512, 3, 1]], + [-2, 1, Conv, [512, 3, 2]], + [[-1, 11], 1, Concat, [1]], # cat + [-1, 2, BottleneckCSPB, [512]], # 29 + [-1, 1, Conv, [1024, 3, 1]], + + [[22,26,30], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-d6.yaml b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-d6.yaml new file mode 100644 index 0000000..75a8cf5 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-d6.yaml @@ -0,0 +1,202 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# yolov7-d6 backbone +backbone: + # [from, number, module, args], + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [96, 3, 1]], # 1-P1/2 + + [-1, 1, DownC, [192]], # 2-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [192, 1, 1]], # 14 + + [-1, 1, DownC, [384]], # 15-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 27 + + [-1, 1, DownC, [768]], # 28-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [768, 1, 1]], # 40 + + [-1, 1, DownC, [1152]], # 41-P5/32 + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [1152, 1, 1]], # 53 + + [-1, 1, DownC, [1536]], # 54-P6/64 + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [1536, 1, 1]], # 66 + ] + +# yolov7-d6 head +head: + [[-1, 1, SPPCSPC, [768]], # 67 + + [-1, 1, Conv, [576, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [53, 1, Conv, [576, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [576, 1, 1]], # 83 + + [-1, 1, Conv, [384, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [40, 1, Conv, [384, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 99 + + [-1, 1, Conv, [192, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [27, 1, Conv, [192, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [192, 1, 1]], # 115 + + [-1, 1, DownC, [384]], + [[-1, 99], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 129 + + [-1, 1, DownC, [576]], + [[-1, 83], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [576, 1, 1]], # 143 + + [-1, 1, DownC, [768]], + [[-1, 67], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [768, 1, 1]], # 157 + + [115, 1, Conv, [384, 3, 1]], + [129, 1, Conv, [768, 3, 1]], + [143, 1, Conv, [1152, 3, 1]], + [157, 1, Conv, [1536, 3, 1]], + + [[158,159,160,161], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-e6.yaml b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-e6.yaml new file mode 100644 index 0000000..e680406 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-e6.yaml @@ -0,0 +1,180 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# yolov7-e6 backbone +backbone: + # [from, number, module, args], + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [80, 3, 1]], # 1-P1/2 + + [-1, 1, DownC, [160]], # 2-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 12 + + [-1, 1, DownC, [320]], # 13-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 23 + + [-1, 1, DownC, [640]], # 24-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 34 + + [-1, 1, DownC, [960]], # 35-P5/32 + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [960, 1, 1]], # 45 + + [-1, 1, DownC, [1280]], # 46-P6/64 + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 56 + ] + +# yolov7-e6 head +head: + [[-1, 1, SPPCSPC, [640]], # 57 + + [-1, 1, Conv, [480, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [45, 1, Conv, [480, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 71 + + [-1, 1, Conv, [320, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [34, 1, Conv, [320, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 85 + + [-1, 1, Conv, [160, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [23, 1, Conv, [160, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 99 + + [-1, 1, DownC, [320]], + [[-1, 85], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 111 + + [-1, 1, DownC, [480]], + [[-1, 71], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 123 + + [-1, 1, DownC, [640]], + [[-1, 57], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 135 + + [99, 1, Conv, [320, 3, 1]], + [111, 1, Conv, [640, 3, 1]], + [123, 1, Conv, [960, 3, 1]], + [135, 1, Conv, [1280, 3, 1]], + + [[136,137,138,139], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-e6e.yaml b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-e6e.yaml new file mode 100644 index 0000000..135990d --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-e6e.yaml @@ -0,0 +1,301 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# yolov7-e6e backbone +backbone: + # [from, number, module, args], + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [80, 3, 1]], # 1-P1/2 + + [-1, 1, DownC, [160]], # 2-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 12 + [-11, 1, Conv, [64, 1, 1]], + [-12, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 22 + [[-1, -11], 1, Shortcut, [1]], # 23 + + [-1, 1, DownC, [320]], # 24-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 34 + [-11, 1, Conv, [128, 1, 1]], + [-12, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 44 + [[-1, -11], 1, Shortcut, [1]], # 45 + + [-1, 1, DownC, [640]], # 46-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 56 + [-11, 1, Conv, [256, 1, 1]], + [-12, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 66 + [[-1, -11], 1, Shortcut, [1]], # 67 + + [-1, 1, DownC, [960]], # 68-P5/32 + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [960, 1, 1]], # 78 + [-11, 1, Conv, [384, 1, 1]], + [-12, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [960, 1, 1]], # 88 + [[-1, -11], 1, Shortcut, [1]], # 89 + + [-1, 1, DownC, [1280]], # 90-P6/64 + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 100 + [-11, 1, Conv, [512, 1, 1]], + [-12, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 110 + [[-1, -11], 1, Shortcut, [1]], # 111 + ] + +# yolov7-e6e head +head: + [[-1, 1, SPPCSPC, [640]], # 112 + + [-1, 1, Conv, [480, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [89, 1, Conv, [480, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 126 + [-11, 1, Conv, [384, 1, 1]], + [-12, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 136 + [[-1, -11], 1, Shortcut, [1]], # 137 + + [-1, 1, Conv, [320, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [67, 1, Conv, [320, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 151 + [-11, 1, Conv, [256, 1, 1]], + [-12, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 161 + [[-1, -11], 1, Shortcut, [1]], # 162 + + [-1, 1, Conv, [160, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [45, 1, Conv, [160, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 176 + [-11, 1, Conv, [128, 1, 1]], + [-12, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 186 + [[-1, -11], 1, Shortcut, [1]], # 187 + + [-1, 1, DownC, [320]], + [[-1, 162], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 199 + [-11, 1, Conv, [256, 1, 1]], + [-12, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 209 + [[-1, -11], 1, Shortcut, [1]], # 210 + + [-1, 1, DownC, [480]], + [[-1, 137], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 222 + [-11, 1, Conv, [384, 1, 1]], + [-12, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 232 + [[-1, -11], 1, Shortcut, [1]], # 233 + + [-1, 1, DownC, [640]], + [[-1, 112], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 245 + [-11, 1, Conv, [512, 1, 1]], + [-12, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 255 + [[-1, -11], 1, Shortcut, [1]], # 256 + + [187, 1, Conv, [320, 3, 1]], + [210, 1, Conv, [640, 3, 1]], + [233, 1, Conv, [960, 3, 1]], + [256, 1, Conv, [1280, 3, 1]], + + [[257,258,259,260], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-tiny-silu.yaml b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-tiny-silu.yaml new file mode 100644 index 0000000..9250573 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-tiny-silu.yaml @@ -0,0 +1,112 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# YOLOv7-tiny backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 2]], # 0-P1/2 + + [-1, 1, Conv, [64, 3, 2]], # 1-P2/4 + + [-1, 1, Conv, [32, 1, 1]], + [-2, 1, Conv, [32, 1, 1]], + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, Conv, [32, 3, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1]], # 7 + + [-1, 1, MP, []], # 8-P3/8 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1]], # 14 + + [-1, 1, MP, []], # 15-P4/16 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 21 + + [-1, 1, MP, []], # 22-P5/32 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1]], # 28 + ] + +# YOLOv7-tiny head +head: + [[-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, SP, [5]], + [-2, 1, SP, [9]], + [-3, 1, SP, [13]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], + [[-1, -7], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 37 + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [21, 1, Conv, [128, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1]], # 47 + + [-1, 1, Conv, [64, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [14, 1, Conv, [64, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [32, 1, 1]], + [-2, 1, Conv, [32, 1, 1]], + [-1, 1, Conv, [32, 3, 1]], + [-1, 1, Conv, [32, 3, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1]], # 57 + + [-1, 1, Conv, [128, 3, 2]], + [[-1, 47], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1]], # 65 + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 37], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 73 + + [57, 1, Conv, [128, 3, 1]], + [65, 1, Conv, [256, 3, 1]], + [73, 1, Conv, [512, 3, 1]], + + [[74,75,76], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-tiny.yaml b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-tiny.yaml new file mode 100644 index 0000000..b09f130 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-tiny.yaml @@ -0,0 +1,112 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# yolov7-tiny backbone +backbone: + # [from, number, module, args] c2, k=1, s=1, p=None, g=1, act=True + [[-1, 1, Conv, [32, 3, 2, None, 1, nn.LeakyReLU(0.1)]], # 0-P1/2 + + [-1, 1, Conv, [64, 3, 2, None, 1, nn.LeakyReLU(0.1)]], # 1-P2/4 + + [-1, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 7 + + [-1, 1, MP, []], # 8-P3/8 + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 14 + + [-1, 1, MP, []], # 15-P4/16 + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 21 + + [-1, 1, MP, []], # 22-P5/32 + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [256, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [256, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 28 + ] + +# yolov7-tiny head +head: + [[-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, SP, [5]], + [-2, 1, SP, [9]], + [-3, 1, SP, [13]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -7], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 37 + + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [21, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 47 + + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [14, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 57 + + [-1, 1, Conv, [128, 3, 2, None, 1, nn.LeakyReLU(0.1)]], + [[-1, 47], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 65 + + [-1, 1, Conv, [256, 3, 2, None, 1, nn.LeakyReLU(0.1)]], + [[-1, 37], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 73 + + [57, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [65, 1, Conv, [256, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [73, 1, Conv, [512, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + + [[74,75,76], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-w6.yaml b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-w6.yaml new file mode 100644 index 0000000..5637a61 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7-w6.yaml @@ -0,0 +1,158 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# yolov7-w6 backbone +backbone: + # [from, number, module, args] + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [64, 3, 1]], # 1-P1/2 + + [-1, 1, Conv, [128, 3, 2]], # 2-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1]], # 10 + + [-1, 1, Conv, [256, 3, 2]], # 11-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 19 + + [-1, 1, Conv, [512, 3, 2]], # 20-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1]], # 28 + + [-1, 1, Conv, [768, 3, 2]], # 29-P5/32 + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [768, 1, 1]], # 37 + + [-1, 1, Conv, [1024, 3, 2]], # 38-P6/64 + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [1024, 1, 1]], # 46 + ] + +# yolov7-w6 head +head: + [[-1, 1, SPPCSPC, [512]], # 47 + + [-1, 1, Conv, [384, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [37, 1, Conv, [384, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 59 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [28, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 71 + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [19, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1]], # 83 + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 71], 1, Concat, [1]], # cat + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 93 + + [-1, 1, Conv, [384, 3, 2]], + [[-1, 59], 1, Concat, [1]], # cat + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 103 + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 47], 1, Concat, [1]], # cat + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1]], # 113 + + [83, 1, Conv, [256, 3, 1]], + [93, 1, Conv, [512, 3, 1]], + [103, 1, Conv, [768, 3, 1]], + [113, 1, Conv, [1024, 3, 1]], + + [[114,115,116,117], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7.yaml b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7.yaml new file mode 100644 index 0000000..201f98d --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7.yaml @@ -0,0 +1,140 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [12,16, 19,36, 40,28] # P3/8 + - [36,75, 76,55, 72,146] # P4/16 + - [142,110, 192,243, 459,401] # P5/32 + +# yolov7 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Conv, [64, 3, 1]], + + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 11 + + [-1, 1, MP, []], + [-1, 1, Conv, [128, 1, 1]], + [-3, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 16-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1]], # 24 + + [-1, 1, MP, []], + [-1, 1, Conv, [256, 1, 1]], + [-3, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 29-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [1024, 1, 1]], # 37 + + [-1, 1, MP, []], + [-1, 1, Conv, [512, 1, 1]], + [-3, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 42-P5/32 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [1024, 1, 1]], # 50 + ] + +# yolov7 head +head: + [[-1, 1, SPPCSPC, [512]], # 51 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [37, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 63 + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [24, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1]], # 75 + + [-1, 1, MP, []], + [-1, 1, Conv, [128, 1, 1]], + [-3, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 2]], + [[-1, -3, 63], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 88 + + [-1, 1, MP, []], + [-1, 1, Conv, [256, 1, 1]], + [-3, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 2]], + [[-1, -3, 51], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1]], # 101 + + [75, 1, RepConv, [256, 3, 1]], + [88, 1, RepConv, [512, 3, 1]], + [101, 1, RepConv, [1024, 3, 1]], + + [[102,103,104], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7x.yaml b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7x.yaml new file mode 100644 index 0000000..c1b4acc --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/deploy/yolov7x.yaml @@ -0,0 +1,156 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [12,16, 19,36, 40,28] # P3/8 + - [36,75, 76,55, 72,146] # P4/16 + - [142,110, 192,243, 459,401] # P5/32 + +# yolov7x backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [40, 3, 1]], # 0 + + [-1, 1, Conv, [80, 3, 2]], # 1-P1/2 + [-1, 1, Conv, [80, 3, 1]], + + [-1, 1, Conv, [160, 3, 2]], # 3-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 13 + + [-1, 1, MP, []], + [-1, 1, Conv, [160, 1, 1]], + [-3, 1, Conv, [160, 1, 1]], + [-1, 1, Conv, [160, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 18-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 28 + + [-1, 1, MP, []], + [-1, 1, Conv, [320, 1, 1]], + [-3, 1, Conv, [320, 1, 1]], + [-1, 1, Conv, [320, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 33-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 43 + + [-1, 1, MP, []], + [-1, 1, Conv, [640, 1, 1]], + [-3, 1, Conv, [640, 1, 1]], + [-1, 1, Conv, [640, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 48-P5/32 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 58 + ] + +# yolov7x head +head: + [[-1, 1, SPPCSPC, [640]], # 59 + + [-1, 1, Conv, [320, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [43, 1, Conv, [320, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 73 + + [-1, 1, Conv, [160, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [28, 1, Conv, [160, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 87 + + [-1, 1, MP, []], + [-1, 1, Conv, [160, 1, 1]], + [-3, 1, Conv, [160, 1, 1]], + [-1, 1, Conv, [160, 3, 2]], + [[-1, -3, 73], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 102 + + [-1, 1, MP, []], + [-1, 1, Conv, [320, 1, 1]], + [-3, 1, Conv, [320, 1, 1]], + [-1, 1, Conv, [320, 3, 2]], + [[-1, -3, 59], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 117 + + [87, 1, Conv, [320, 3, 1]], + [102, 1, Conv, [640, 3, 1]], + [117, 1, Conv, [1280, 3, 1]], + + [[118,119,120], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-d6.yaml b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-d6.yaml new file mode 100644 index 0000000..4faedda --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-d6.yaml @@ -0,0 +1,207 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# yolov7 backbone +backbone: + # [from, number, module, args], + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [96, 3, 1]], # 1-P1/2 + + [-1, 1, DownC, [192]], # 2-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [192, 1, 1]], # 14 + + [-1, 1, DownC, [384]], # 15-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 27 + + [-1, 1, DownC, [768]], # 28-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [768, 1, 1]], # 40 + + [-1, 1, DownC, [1152]], # 41-P5/32 + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [1152, 1, 1]], # 53 + + [-1, 1, DownC, [1536]], # 54-P6/64 + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [1536, 1, 1]], # 66 + ] + +# yolov7 head +head: + [[-1, 1, SPPCSPC, [768]], # 67 + + [-1, 1, Conv, [576, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [53, 1, Conv, [576, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [576, 1, 1]], # 83 + + [-1, 1, Conv, [384, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [40, 1, Conv, [384, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 99 + + [-1, 1, Conv, [192, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [27, 1, Conv, [192, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [192, 1, 1]], # 115 + + [-1, 1, DownC, [384]], + [[-1, 99], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 129 + + [-1, 1, DownC, [576]], + [[-1, 83], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [576, 1, 1]], # 143 + + [-1, 1, DownC, [768]], + [[-1, 67], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8, -9, -10], 1, Concat, [1]], + [-1, 1, Conv, [768, 1, 1]], # 157 + + [115, 1, Conv, [384, 3, 1]], + [129, 1, Conv, [768, 3, 1]], + [143, 1, Conv, [1152, 3, 1]], + [157, 1, Conv, [1536, 3, 1]], + + [115, 1, Conv, [384, 3, 1]], + [99, 1, Conv, [768, 3, 1]], + [83, 1, Conv, [1152, 3, 1]], + [67, 1, Conv, [1536, 3, 1]], + + [[158,159,160,161,162,163,164,165], 1, IAuxDetect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-e6.yaml b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-e6.yaml new file mode 100644 index 0000000..58b27f0 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-e6.yaml @@ -0,0 +1,185 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# yolov7 backbone +backbone: + # [from, number, module, args], + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [80, 3, 1]], # 1-P1/2 + + [-1, 1, DownC, [160]], # 2-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 12 + + [-1, 1, DownC, [320]], # 13-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 23 + + [-1, 1, DownC, [640]], # 24-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 34 + + [-1, 1, DownC, [960]], # 35-P5/32 + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [960, 1, 1]], # 45 + + [-1, 1, DownC, [1280]], # 46-P6/64 + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 56 + ] + +# yolov7 head +head: + [[-1, 1, SPPCSPC, [640]], # 57 + + [-1, 1, Conv, [480, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [45, 1, Conv, [480, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 71 + + [-1, 1, Conv, [320, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [34, 1, Conv, [320, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 85 + + [-1, 1, Conv, [160, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [23, 1, Conv, [160, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 99 + + [-1, 1, DownC, [320]], + [[-1, 85], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 111 + + [-1, 1, DownC, [480]], + [[-1, 71], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 123 + + [-1, 1, DownC, [640]], + [[-1, 57], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 135 + + [99, 1, Conv, [320, 3, 1]], + [111, 1, Conv, [640, 3, 1]], + [123, 1, Conv, [960, 3, 1]], + [135, 1, Conv, [1280, 3, 1]], + + [99, 1, Conv, [320, 3, 1]], + [85, 1, Conv, [640, 3, 1]], + [71, 1, Conv, [960, 3, 1]], + [57, 1, Conv, [1280, 3, 1]], + + [[136,137,138,139,140,141,142,143], 1, IAuxDetect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-e6e.yaml b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-e6e.yaml new file mode 100644 index 0000000..3c83661 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-e6e.yaml @@ -0,0 +1,306 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# yolov7 backbone +backbone: + # [from, number, module, args], + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [80, 3, 1]], # 1-P1/2 + + [-1, 1, DownC, [160]], # 2-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 12 + [-11, 1, Conv, [64, 1, 1]], + [-12, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 22 + [[-1, -11], 1, Shortcut, [1]], # 23 + + [-1, 1, DownC, [320]], # 24-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 34 + [-11, 1, Conv, [128, 1, 1]], + [-12, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 44 + [[-1, -11], 1, Shortcut, [1]], # 45 + + [-1, 1, DownC, [640]], # 46-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 56 + [-11, 1, Conv, [256, 1, 1]], + [-12, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 66 + [[-1, -11], 1, Shortcut, [1]], # 67 + + [-1, 1, DownC, [960]], # 68-P5/32 + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [960, 1, 1]], # 78 + [-11, 1, Conv, [384, 1, 1]], + [-12, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [960, 1, 1]], # 88 + [[-1, -11], 1, Shortcut, [1]], # 89 + + [-1, 1, DownC, [1280]], # 90-P6/64 + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 100 + [-11, 1, Conv, [512, 1, 1]], + [-12, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 110 + [[-1, -11], 1, Shortcut, [1]], # 111 + ] + +# yolov7 head +head: + [[-1, 1, SPPCSPC, [640]], # 112 + + [-1, 1, Conv, [480, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [89, 1, Conv, [480, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 126 + [-11, 1, Conv, [384, 1, 1]], + [-12, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 136 + [[-1, -11], 1, Shortcut, [1]], # 137 + + [-1, 1, Conv, [320, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [67, 1, Conv, [320, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 151 + [-11, 1, Conv, [256, 1, 1]], + [-12, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 161 + [[-1, -11], 1, Shortcut, [1]], # 162 + + [-1, 1, Conv, [160, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [45, 1, Conv, [160, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 176 + [-11, 1, Conv, [128, 1, 1]], + [-12, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 186 + [[-1, -11], 1, Shortcut, [1]], # 187 + + [-1, 1, DownC, [320]], + [[-1, 162], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 199 + [-11, 1, Conv, [256, 1, 1]], + [-12, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 209 + [[-1, -11], 1, Shortcut, [1]], # 210 + + [-1, 1, DownC, [480]], + [[-1, 137], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 222 + [-11, 1, Conv, [384, 1, 1]], + [-12, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [480, 1, 1]], # 232 + [[-1, -11], 1, Shortcut, [1]], # 233 + + [-1, 1, DownC, [640]], + [[-1, 112], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 245 + [-11, 1, Conv, [512, 1, 1]], + [-12, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 255 + [[-1, -11], 1, Shortcut, [1]], # 256 + + [187, 1, Conv, [320, 3, 1]], + [210, 1, Conv, [640, 3, 1]], + [233, 1, Conv, [960, 3, 1]], + [256, 1, Conv, [1280, 3, 1]], + + [186, 1, Conv, [320, 3, 1]], + [161, 1, Conv, [640, 3, 1]], + [136, 1, Conv, [960, 3, 1]], + [112, 1, Conv, [1280, 3, 1]], + + [[257,258,259,260,261,262,263,264], 1, IAuxDetect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-tiny.yaml b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-tiny.yaml new file mode 100644 index 0000000..3679b0d --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-tiny.yaml @@ -0,0 +1,112 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [10,13, 16,30, 33,23] # P3/8 + - [30,61, 62,45, 59,119] # P4/16 + - [116,90, 156,198, 373,326] # P5/32 + +# yolov7-tiny backbone +backbone: + # [from, number, module, args] c2, k=1, s=1, p=None, g=1, act=True + [[-1, 1, Conv, [32, 3, 2, None, 1, nn.LeakyReLU(0.1)]], # 0-P1/2 + + [-1, 1, Conv, [64, 3, 2, None, 1, nn.LeakyReLU(0.1)]], # 1-P2/4 + + [-1, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 7 + + [-1, 1, MP, []], # 8-P3/8 + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 14 + + [-1, 1, MP, []], # 15-P4/16 + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 21 + + [-1, 1, MP, []], # 22-P5/32 + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [256, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [256, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 28 + ] + +# yolov7-tiny head +head: + [[-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, SP, [5]], + [-2, 1, SP, [9]], + [-3, 1, SP, [13]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -7], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 37 + + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [21, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 47 + + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [14, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [32, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [32, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 57 + + [-1, 1, Conv, [128, 3, 2, None, 1, nn.LeakyReLU(0.1)]], + [[-1, 47], 1, Concat, [1]], + + [-1, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [64, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [64, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 65 + + [-1, 1, Conv, [256, 3, 2, None, 1, nn.LeakyReLU(0.1)]], + [[-1, 37], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-2, 1, Conv, [128, 1, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [-1, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [[-1, -2, -3, -4], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1, None, 1, nn.LeakyReLU(0.1)]], # 73 + + [57, 1, Conv, [128, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [65, 1, Conv, [256, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + [73, 1, Conv, [512, 3, 1, None, 1, nn.LeakyReLU(0.1)]], + + [[74,75,76], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-w6.yaml b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-w6.yaml new file mode 100644 index 0000000..4b9c013 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7-w6.yaml @@ -0,0 +1,163 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [ 19,27, 44,40, 38,94 ] # P3/8 + - [ 96,68, 86,152, 180,137 ] # P4/16 + - [ 140,301, 303,264, 238,542 ] # P5/32 + - [ 436,615, 739,380, 925,792 ] # P6/64 + +# yolov7 backbone +backbone: + # [from, number, module, args] + [[-1, 1, ReOrg, []], # 0 + [-1, 1, Conv, [64, 3, 1]], # 1-P1/2 + + [-1, 1, Conv, [128, 3, 2]], # 2-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1]], # 10 + + [-1, 1, Conv, [256, 3, 2]], # 11-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 19 + + [-1, 1, Conv, [512, 3, 2]], # 20-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1]], # 28 + + [-1, 1, Conv, [768, 3, 2]], # 29-P5/32 + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [-1, 1, Conv, [384, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [768, 1, 1]], # 37 + + [-1, 1, Conv, [1024, 3, 2]], # 38-P6/64 + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [1024, 1, 1]], # 46 + ] + +# yolov7 head +head: + [[-1, 1, SPPCSPC, [512]], # 47 + + [-1, 1, Conv, [384, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [37, 1, Conv, [384, 1, 1]], # route backbone P5 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 59 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [28, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 71 + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [19, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1]], # 83 + + [-1, 1, Conv, [256, 3, 2]], + [[-1, 71], 1, Concat, [1]], # cat + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 93 + + [-1, 1, Conv, [384, 3, 2]], + [[-1, 59], 1, Concat, [1]], # cat + + [-1, 1, Conv, [384, 1, 1]], + [-2, 1, Conv, [384, 1, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [-1, 1, Conv, [192, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [384, 1, 1]], # 103 + + [-1, 1, Conv, [512, 3, 2]], + [[-1, 47], 1, Concat, [1]], # cat + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1]], # 113 + + [83, 1, Conv, [256, 3, 1]], + [93, 1, Conv, [512, 3, 1]], + [103, 1, Conv, [768, 3, 1]], + [113, 1, Conv, [1024, 3, 1]], + + [83, 1, Conv, [320, 3, 1]], + [71, 1, Conv, [640, 3, 1]], + [59, 1, Conv, [960, 3, 1]], + [47, 1, Conv, [1280, 3, 1]], + + [[114,115,116,117,118,119,120,121], 1, IAuxDetect, [nc, anchors]], # Detect(P3, P4, P5, P6) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/training/yolov7.yaml b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7.yaml new file mode 100644 index 0000000..9a807e5 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7.yaml @@ -0,0 +1,140 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [12,16, 19,36, 40,28] # P3/8 + - [36,75, 76,55, 72,146] # P4/16 + - [142,110, 192,243, 459,401] # P5/32 + +# yolov7 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [32, 3, 1]], # 0 + + [-1, 1, Conv, [64, 3, 2]], # 1-P1/2 + [-1, 1, Conv, [64, 3, 1]], + + [-1, 1, Conv, [128, 3, 2]], # 3-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 11 + + [-1, 1, MP, []], + [-1, 1, Conv, [128, 1, 1]], + [-3, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 16-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1]], # 24 + + [-1, 1, MP, []], + [-1, 1, Conv, [256, 1, 1]], + [-3, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 29-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [1024, 1, 1]], # 37 + + [-1, 1, MP, []], + [-1, 1, Conv, [512, 1, 1]], + [-3, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 42-P5/32 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [1024, 1, 1]], # 50 + ] + +# yolov7 head +head: + [[-1, 1, SPPCSPC, [512]], # 51 + + [-1, 1, Conv, [256, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [37, 1, Conv, [256, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 63 + + [-1, 1, Conv, [128, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [24, 1, Conv, [128, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [128, 1, 1]], # 75 + + [-1, 1, MP, []], + [-1, 1, Conv, [128, 1, 1]], + [-3, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 2]], + [[-1, -3, 63], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [256, 1, 1]], # 88 + + [-1, 1, MP, []], + [-1, 1, Conv, [256, 1, 1]], + [-3, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 2]], + [[-1, -3, 51], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -2, -3, -4, -5, -6], 1, Concat, [1]], + [-1, 1, Conv, [512, 1, 1]], # 101 + + [75, 1, RepConv, [256, 3, 1]], + [88, 1, RepConv, [512, 3, 1]], + [101, 1, RepConv, [1024, 3, 1]], + + [[102,103,104], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/cfg/training/yolov7x.yaml b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7x.yaml new file mode 100644 index 0000000..207be88 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/cfg/training/yolov7x.yaml @@ -0,0 +1,156 @@ +# parameters +nc: 80 # number of classes +depth_multiple: 1.0 # model depth multiple +width_multiple: 1.0 # layer channel multiple + +# anchors +anchors: + - [12,16, 19,36, 40,28] # P3/8 + - [36,75, 76,55, 72,146] # P4/16 + - [142,110, 192,243, 459,401] # P5/32 + +# yolov7 backbone +backbone: + # [from, number, module, args] + [[-1, 1, Conv, [40, 3, 1]], # 0 + + [-1, 1, Conv, [80, 3, 2]], # 1-P1/2 + [-1, 1, Conv, [80, 3, 1]], + + [-1, 1, Conv, [160, 3, 2]], # 3-P2/4 + [-1, 1, Conv, [64, 1, 1]], + [-2, 1, Conv, [64, 1, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [-1, 1, Conv, [64, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 13 + + [-1, 1, MP, []], + [-1, 1, Conv, [160, 1, 1]], + [-3, 1, Conv, [160, 1, 1]], + [-1, 1, Conv, [160, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 18-P3/8 + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 28 + + [-1, 1, MP, []], + [-1, 1, Conv, [320, 1, 1]], + [-3, 1, Conv, [320, 1, 1]], + [-1, 1, Conv, [320, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 33-P4/16 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 43 + + [-1, 1, MP, []], + [-1, 1, Conv, [640, 1, 1]], + [-3, 1, Conv, [640, 1, 1]], + [-1, 1, Conv, [640, 3, 2]], + [[-1, -3], 1, Concat, [1]], # 48-P5/32 + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [1280, 1, 1]], # 58 + ] + +# yolov7 head +head: + [[-1, 1, SPPCSPC, [640]], # 59 + + [-1, 1, Conv, [320, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [43, 1, Conv, [320, 1, 1]], # route backbone P4 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 73 + + [-1, 1, Conv, [160, 1, 1]], + [-1, 1, nn.Upsample, [None, 2, 'nearest']], + [28, 1, Conv, [160, 1, 1]], # route backbone P3 + [[-1, -2], 1, Concat, [1]], + + [-1, 1, Conv, [128, 1, 1]], + [-2, 1, Conv, [128, 1, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [-1, 1, Conv, [128, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [160, 1, 1]], # 87 + + [-1, 1, MP, []], + [-1, 1, Conv, [160, 1, 1]], + [-3, 1, Conv, [160, 1, 1]], + [-1, 1, Conv, [160, 3, 2]], + [[-1, -3, 73], 1, Concat, [1]], + + [-1, 1, Conv, [256, 1, 1]], + [-2, 1, Conv, [256, 1, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [-1, 1, Conv, [256, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [320, 1, 1]], # 102 + + [-1, 1, MP, []], + [-1, 1, Conv, [320, 1, 1]], + [-3, 1, Conv, [320, 1, 1]], + [-1, 1, Conv, [320, 3, 2]], + [[-1, -3, 59], 1, Concat, [1]], + + [-1, 1, Conv, [512, 1, 1]], + [-2, 1, Conv, [512, 1, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [-1, 1, Conv, [512, 3, 1]], + [[-1, -3, -5, -7, -8], 1, Concat, [1]], + [-1, 1, Conv, [640, 1, 1]], # 117 + + [87, 1, Conv, [320, 3, 1]], + [102, 1, Conv, [640, 3, 1]], + [117, 1, Conv, [1280, 3, 1]], + + [[118,119,120], 1, IDetect, [nc, anchors]], # Detect(P3, P4, P5) + ] diff --git a/src/train_utils/train_models/models/yolov7/data/coco.yaml b/src/train_utils/train_models/models/yolov7/data/coco.yaml new file mode 100644 index 0000000..a1d126c --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/data/coco.yaml @@ -0,0 +1,23 @@ +# COCO 2017 dataset http://cocodataset.org + +# download command/URL (optional) +download: bash ./scripts/get_coco.sh + +# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] +train: ./coco/train2017.txt # 118287 images +val: ./coco/val2017.txt # 5000 images +test: ./coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 + +# number of classes +nc: 80 + +# class names +names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', + 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', + 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', + 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush' ] diff --git a/src/train_utils/train_models/models/yolov7/data/hyp.scratch.custom.yaml b/src/train_utils/train_models/models/yolov7/data/hyp.scratch.custom.yaml new file mode 100644 index 0000000..8570d73 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/data/hyp.scratch.custom.yaml @@ -0,0 +1,31 @@ +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.2 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.0 # image mixup (probability) +copy_paste: 0.0 # image copy paste (probability) +paste_in: 0.0 # image copy paste (probability), use 0 for faster training +loss_ota: 1 # use ComputeLossOTA, use 0 for faster training \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/data/hyp.scratch.p5.yaml b/src/train_utils/train_models/models/yolov7/data/hyp.scratch.p5.yaml new file mode 100644 index 0000000..a409bac --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/data/hyp.scratch.p5.yaml @@ -0,0 +1,31 @@ +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.1 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.2 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.15 # image mixup (probability) +copy_paste: 0.0 # image copy paste (probability) +paste_in: 0.15 # image copy paste (probability), use 0 for faster training +loss_ota: 1 # use ComputeLossOTA, use 0 for faster training \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/data/hyp.scratch.p6.yaml b/src/train_utils/train_models/models/yolov7/data/hyp.scratch.p6.yaml new file mode 100644 index 0000000..192d0d5 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/data/hyp.scratch.p6.yaml @@ -0,0 +1,31 @@ +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.3 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 0.7 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.2 # image translation (+/- fraction) +scale: 0.9 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.15 # image mixup (probability) +copy_paste: 0.0 # image copy paste (probability) +paste_in: 0.15 # image copy paste (probability), use 0 for faster training +loss_ota: 1 # use ComputeLossOTA, use 0 for faster training \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/data/hyp.scratch.tiny.yaml b/src/train_utils/train_models/models/yolov7/data/hyp.scratch.tiny.yaml new file mode 100644 index 0000000..b0dc14a --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/data/hyp.scratch.tiny.yaml @@ -0,0 +1,31 @@ +lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3) +lrf: 0.01 # final OneCycleLR learning rate (lr0 * lrf) +momentum: 0.937 # SGD momentum/Adam beta1 +weight_decay: 0.0005 # optimizer weight decay 5e-4 +warmup_epochs: 3.0 # warmup epochs (fractions ok) +warmup_momentum: 0.8 # warmup initial momentum +warmup_bias_lr: 0.1 # warmup initial bias lr +box: 0.05 # box loss gain +cls: 0.5 # cls loss gain +cls_pw: 1.0 # cls BCELoss positive_weight +obj: 1.0 # obj loss gain (scale with pixels) +obj_pw: 1.0 # obj BCELoss positive_weight +iou_t: 0.20 # IoU training threshold +anchor_t: 4.0 # anchor-multiple threshold +# anchors: 3 # anchors per output layer (0 to ignore) +fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5) +hsv_h: 0.015 # image HSV-Hue augmentation (fraction) +hsv_s: 0.7 # image HSV-Saturation augmentation (fraction) +hsv_v: 0.4 # image HSV-Value augmentation (fraction) +degrees: 0.0 # image rotation (+/- deg) +translate: 0.1 # image translation (+/- fraction) +scale: 0.5 # image scale (+/- gain) +shear: 0.0 # image shear (+/- deg) +perspective: 0.0 # image perspective (+/- fraction), range 0-0.001 +flipud: 0.0 # image flip up-down (probability) +fliplr: 0.5 # image flip left-right (probability) +mosaic: 1.0 # image mosaic (probability) +mixup: 0.05 # image mixup (probability) +copy_paste: 0.0 # image copy paste (probability) +paste_in: 0.05 # image copy paste (probability), use 0 for faster training +loss_ota: 1 # use ComputeLossOTA, use 0 for faster training diff --git a/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/README.md b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/README.md new file mode 100644 index 0000000..13af4da --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/README.md @@ -0,0 +1,164 @@ +# YOLOv7 on Triton Inference Server + +Instructions to deploy YOLOv7 as TensorRT engine to [Triton Inference Server](https://github.com/NVIDIA/triton-inference-server). + +Triton Inference Server takes care of model deployment with many out-of-the-box benefits, like a GRPC and HTTP interface, automatic scheduling on multiple GPUs, shared memory (even on GPU), dynamic server-side batching, health metrics and memory resource management. + +There are no additional dependencies needed to run this deployment, except a working docker daemon with GPU support. + +## Export TensorRT + +See https://github.com/WongKinYiu/yolov7#export for more info. + +```bash +#install onnx-simplifier not listed in general yolov7 requirements.txt +pip3 install onnx-simplifier + +# Pytorch Yolov7 -> ONNX with grid, EfficientNMS plugin and dynamic batch size +python export.py --weights ./yolov7.pt --grid --end2end --dynamic-batch --simplify --topk-all 100 --iou-thres 0.65 --conf-thres 0.35 --img-size 640 640 +# ONNX -> TensorRT with trtexec and docker +docker run -it --rm --gpus=all nvcr.io/nvidia/tensorrt:22.06-py3 +# Copy onnx -> container: docker cp yolov7.onnx :/workspace/ +# Export with FP16 precision, min batch 1, opt batch 8 and max batch 8 +./tensorrt/bin/trtexec --onnx=yolov7.onnx --minShapes=images:1x3x640x640 --optShapes=images:8x3x640x640 --maxShapes=images:8x3x640x640 --fp16 --workspace=4096 --saveEngine=yolov7-fp16-1x8x8.engine --timingCacheFile=timing.cache +# Test engine +./tensorrt/bin/trtexec --loadEngine=yolov7-fp16-1x8x8.engine +# Copy engine -> host: docker cp :/workspace/yolov7-fp16-1x8x8.engine . +``` + +Example output of test with RTX 3090. + +``` +[I] === Performance summary === +[I] Throughput: 73.4985 qps +[I] Latency: min = 14.8578 ms, max = 15.8344 ms, mean = 15.07 ms, median = 15.0422 ms, percentile(99%) = 15.7443 ms +[I] End-to-End Host Latency: min = 25.8715 ms, max = 28.4102 ms, mean = 26.672 ms, median = 26.6082 ms, percentile(99%) = 27.8314 ms +[I] Enqueue Time: min = 0.793701 ms, max = 1.47144 ms, mean = 1.2008 ms, median = 1.28644 ms, percentile(99%) = 1.38965 ms +[I] H2D Latency: min = 1.50073 ms, max = 1.52454 ms, mean = 1.51225 ms, median = 1.51404 ms, percentile(99%) = 1.51941 ms +[I] GPU Compute Time: min = 13.3386 ms, max = 14.3186 ms, mean = 13.5448 ms, median = 13.5178 ms, percentile(99%) = 14.2151 ms +[I] D2H Latency: min = 0.00878906 ms, max = 0.0172729 ms, mean = 0.0128844 ms, median = 0.0125732 ms, percentile(99%) = 0.0166016 ms +[I] Total Host Walltime: 3.04768 s +[I] Total GPU Compute Time: 3.03404 s +[I] Explanations of the performance metrics are printed in the verbose logs. +``` +Note: 73.5 qps x batch 8 = 588 fps @ ~15ms latency. + +## Model Repository + +See [Triton Model Repository Documentation](https://github.com/triton-inference-server/server/blob/main/docs/model_repository.md#model-repository) for more info. + +```bash +# Create folder structure +mkdir -p triton-deploy/models/yolov7/1/ +touch triton-deploy/models/yolov7/config.pbtxt +# Place model +mv yolov7-fp16-1x8x8.engine triton-deploy/models/yolov7/1/model.plan +``` + +## Model Configuration + +See [Triton Model Configuration Documentation](https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.md#model-configuration) for more info. + +Minimal configuration for `triton-deploy/models/yolov7/config.pbtxt`: + +``` +name: "yolov7" +platform: "tensorrt_plan" +max_batch_size: 8 +dynamic_batching { } +``` + +Example repository: + +```bash +$ tree triton-deploy/ +triton-deploy/ +└── models + └── yolov7 + ├── 1 + │   └── model.plan + └── config.pbtxt + +3 directories, 2 files +``` + +## Start Triton Inference Server + +``` +docker run --gpus all --rm --ipc=host --shm-size=1g --ulimit memlock=-1 --ulimit stack=67108864 -p8000:8000 -p8001:8001 -p8002:8002 -v$(pwd)/triton-deploy/models:/models nvcr.io/nvidia/tritonserver:22.06-py3 tritonserver --model-repository=/models --strict-model-config=false --log-verbose 1 +``` + +In the log you should see: + +``` ++--------+---------+--------+ +| Model | Version | Status | ++--------+---------+--------+ +| yolov7 | 1 | READY | ++--------+---------+--------+ +``` + +## Performance with Model Analyzer + +See [Triton Model Analyzer Documentation](https://github.com/triton-inference-server/server/blob/main/docs/model_analyzer.md#model-analyzer) for more info. + +Performance numbers @ RTX 3090 + AMD Ryzen 9 5950X + +Example test for 16 concurrent clients using shared memory, each with batch size 1 requests: + +```bash +docker run -it --ipc=host --net=host nvcr.io/nvidia/tritonserver:22.06-py3-sdk /bin/bash + +./install/bin/perf_analyzer -m yolov7 -u 127.0.0.1:8001 -i grpc --shared-memory system --concurrency-range 16 + +# Result (truncated) +Concurrency: 16, throughput: 590.119 infer/sec, latency 27080 usec +``` + +Throughput for 16 clients with batch size 1 is the same as for a single thread running the engine at 16 batch size locally thanks to Triton [Dynamic Batching Strategy](https://github.com/triton-inference-server/server/blob/main/docs/model_configuration.md#dynamic-batcher). Result without dynamic batching (disable in model configuration) considerably worse: + +```bash +# Result (truncated) +Concurrency: 16, throughput: 335.587 infer/sec, latency 47616 usec +``` + +## How to run model in your code + +Example client can be found in client.py. It can run dummy input, images and videos. + +```bash +pip3 install tritonclient[all] opencv-python +python3 client.py image data/dog.jpg +``` + +![exemplary output result](data/dog_result.jpg) + +``` +$ python3 client.py --help +usage: client.py [-h] [-m MODEL] [--width WIDTH] [--height HEIGHT] [-u URL] [-o OUT] [-f FPS] [-i] [-v] [-t CLIENT_TIMEOUT] [-s] [-r ROOT_CERTIFICATES] [-p PRIVATE_KEY] [-x CERTIFICATE_CHAIN] {dummy,image,video} [input] + +positional arguments: + {dummy,image,video} Run mode. 'dummy' will send an emtpy buffer to the server to test if inference works. 'image' will process an image. 'video' will process a video. + input Input file to load from in image or video mode + +optional arguments: + -h, --help show this help message and exit + -m MODEL, --model MODEL + Inference model name, default yolov7 + --width WIDTH Inference model input width, default 640 + --height HEIGHT Inference model input height, default 640 + -u URL, --url URL Inference server URL, default localhost:8001 + -o OUT, --out OUT Write output into file instead of displaying it + -f FPS, --fps FPS Video output fps, default 24.0 FPS + -i, --model-info Print model status, configuration and statistics + -v, --verbose Enable verbose client output + -t CLIENT_TIMEOUT, --client-timeout CLIENT_TIMEOUT + Client timeout in seconds, default no timeout + -s, --ssl Enable SSL encrypted channel to the server + -r ROOT_CERTIFICATES, --root-certificates ROOT_CERTIFICATES + File holding PEM-encoded root certificates, default none + -p PRIVATE_KEY, --private-key PRIVATE_KEY + File holding PEM-encoded private key, default is none + -x CERTIFICATE_CHAIN, --certificate-chain CERTIFICATE_CHAIN + File holding PEM-encoded certicate chain default is none +``` diff --git a/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/boundingbox.py b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/boundingbox.py new file mode 100644 index 0000000..8b95330 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/boundingbox.py @@ -0,0 +1,33 @@ +class BoundingBox: + def __init__(self, classID, confidence, x1, x2, y1, y2, image_width, image_height): + self.classID = classID + self.confidence = confidence + self.x1 = x1 + self.x2 = x2 + self.y1 = y1 + self.y2 = y2 + self.u1 = x1 / image_width + self.u2 = x2 / image_width + self.v1 = y1 / image_height + self.v2 = y2 / image_height + + def box(self): + return (self.x1, self.y1, self.x2, self.y2) + + def width(self): + return self.x2 - self.x1 + + def height(self): + return self.y2 - self.y1 + + def center_absolute(self): + return (0.5 * (self.x1 + self.x2), 0.5 * (self.y1 + self.y2)) + + def center_normalized(self): + return (0.5 * (self.u1 + self.u2), 0.5 * (self.v1 + self.v2)) + + def size_absolute(self): + return (self.x2 - self.x1, self.y2 - self.y1) + + def size_normalized(self): + return (self.u2 - self.u1, self.v2 - self.v1) diff --git a/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/client.py b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/client.py new file mode 100644 index 0000000..aedca11 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/client.py @@ -0,0 +1,334 @@ +#!/usr/bin/env python + +import argparse +import numpy as np +import sys +import cv2 + +import tritonclient.grpc as grpcclient +from tritonclient.utils import InferenceServerException + +from processing import preprocess, postprocess +from render import render_box, render_filled_box, get_text_size, render_text, RAND_COLORS +from labels import COCOLabels + +INPUT_NAMES = ["images"] +OUTPUT_NAMES = ["num_dets", "det_boxes", "det_scores", "det_classes"] + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('mode', + choices=['dummy', 'image', 'video'], + default='dummy', + help='Run mode. \'dummy\' will send an emtpy buffer to the server to test if inference works. \'image\' will process an image. \'video\' will process a video.') + parser.add_argument('input', + type=str, + nargs='?', + help='Input file to load from in image or video mode') + parser.add_argument('-m', + '--model', + type=str, + required=False, + default='yolov7', + help='Inference model name, default yolov7') + parser.add_argument('--width', + type=int, + required=False, + default=640, + help='Inference model input width, default 640') + parser.add_argument('--height', + type=int, + required=False, + default=640, + help='Inference model input height, default 640') + parser.add_argument('-u', + '--url', + type=str, + required=False, + default='localhost:8001', + help='Inference server URL, default localhost:8001') + parser.add_argument('-o', + '--out', + type=str, + required=False, + default='', + help='Write output into file instead of displaying it') + parser.add_argument('-f', + '--fps', + type=float, + required=False, + default=24.0, + help='Video output fps, default 24.0 FPS') + parser.add_argument('-i', + '--model-info', + action="store_true", + required=False, + default=False, + help='Print model status, configuration and statistics') + parser.add_argument('-v', + '--verbose', + action="store_true", + required=False, + default=False, + help='Enable verbose client output') + parser.add_argument('-t', + '--client-timeout', + type=float, + required=False, + default=None, + help='Client timeout in seconds, default no timeout') + parser.add_argument('-s', + '--ssl', + action="store_true", + required=False, + default=False, + help='Enable SSL encrypted channel to the server') + parser.add_argument('-r', + '--root-certificates', + type=str, + required=False, + default=None, + help='File holding PEM-encoded root certificates, default none') + parser.add_argument('-p', + '--private-key', + type=str, + required=False, + default=None, + help='File holding PEM-encoded private key, default is none') + parser.add_argument('-x', + '--certificate-chain', + type=str, + required=False, + default=None, + help='File holding PEM-encoded certicate chain default is none') + + FLAGS = parser.parse_args() + + # Create server context + try: + triton_client = grpcclient.InferenceServerClient( + url=FLAGS.url, + verbose=FLAGS.verbose, + ssl=FLAGS.ssl, + root_certificates=FLAGS.root_certificates, + private_key=FLAGS.private_key, + certificate_chain=FLAGS.certificate_chain) + except Exception as e: + print("context creation failed: " + str(e)) + sys.exit() + + # Health check + if not triton_client.is_server_live(): + print("FAILED : is_server_live") + sys.exit(1) + + if not triton_client.is_server_ready(): + print("FAILED : is_server_ready") + sys.exit(1) + + if not triton_client.is_model_ready(FLAGS.model): + print("FAILED : is_model_ready") + sys.exit(1) + + if FLAGS.model_info: + # Model metadata + try: + metadata = triton_client.get_model_metadata(FLAGS.model) + print(metadata) + except InferenceServerException as ex: + if "Request for unknown model" not in ex.message(): + print("FAILED : get_model_metadata") + print("Got: {}".format(ex.message())) + sys.exit(1) + else: + print("FAILED : get_model_metadata") + sys.exit(1) + + # Model configuration + try: + config = triton_client.get_model_config(FLAGS.model) + if not (config.config.name == FLAGS.model): + print("FAILED: get_model_config") + sys.exit(1) + print(config) + except InferenceServerException as ex: + print("FAILED : get_model_config") + print("Got: {}".format(ex.message())) + sys.exit(1) + + # DUMMY MODE + if FLAGS.mode == 'dummy': + print("Running in 'dummy' mode") + print("Creating emtpy buffer filled with ones...") + inputs = [] + outputs = [] + inputs.append(grpcclient.InferInput(INPUT_NAMES[0], [1, 3, FLAGS.width, FLAGS.height], "FP32")) + inputs[0].set_data_from_numpy(np.ones(shape=(1, 3, FLAGS.width, FLAGS.height), dtype=np.float32)) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[0])) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[1])) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[2])) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[3])) + + print("Invoking inference...") + results = triton_client.infer(model_name=FLAGS.model, + inputs=inputs, + outputs=outputs, + client_timeout=FLAGS.client_timeout) + if FLAGS.model_info: + statistics = triton_client.get_inference_statistics(model_name=FLAGS.model) + if len(statistics.model_stats) != 1: + print("FAILED: get_inference_statistics") + sys.exit(1) + print(statistics) + print("Done") + + for output in OUTPUT_NAMES: + result = results.as_numpy(output) + print(f"Received result buffer \"{output}\" of size {result.shape}") + print(f"Naive buffer sum: {np.sum(result)}") + + # IMAGE MODE + if FLAGS.mode == 'image': + print("Running in 'image' mode") + if not FLAGS.input: + print("FAILED: no input image") + sys.exit(1) + + inputs = [] + outputs = [] + inputs.append(grpcclient.InferInput(INPUT_NAMES[0], [1, 3, FLAGS.width, FLAGS.height], "FP32")) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[0])) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[1])) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[2])) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[3])) + + print("Creating buffer from image file...") + input_image = cv2.imread(str(FLAGS.input)) + if input_image is None: + print(f"FAILED: could not load input image {str(FLAGS.input)}") + sys.exit(1) + input_image_buffer = preprocess(input_image, [FLAGS.width, FLAGS.height]) + input_image_buffer = np.expand_dims(input_image_buffer, axis=0) + + inputs[0].set_data_from_numpy(input_image_buffer) + + print("Invoking inference...") + results = triton_client.infer(model_name=FLAGS.model, + inputs=inputs, + outputs=outputs, + client_timeout=FLAGS.client_timeout) + if FLAGS.model_info: + statistics = triton_client.get_inference_statistics(model_name=FLAGS.model) + if len(statistics.model_stats) != 1: + print("FAILED: get_inference_statistics") + sys.exit(1) + print(statistics) + print("Done") + + for output in OUTPUT_NAMES: + result = results.as_numpy(output) + print(f"Received result buffer \"{output}\" of size {result.shape}") + print(f"Naive buffer sum: {np.sum(result)}") + + num_dets = results.as_numpy(OUTPUT_NAMES[0]) + det_boxes = results.as_numpy(OUTPUT_NAMES[1]) + det_scores = results.as_numpy(OUTPUT_NAMES[2]) + det_classes = results.as_numpy(OUTPUT_NAMES[3]) + detected_objects = postprocess(num_dets, det_boxes, det_scores, det_classes, input_image.shape[1], input_image.shape[0], [FLAGS.width, FLAGS.height]) + print(f"Detected objects: {len(detected_objects)}") + + for box in detected_objects: + print(f"{COCOLabels(box.classID).name}: {box.confidence}") + input_image = render_box(input_image, box.box(), color=tuple(RAND_COLORS[box.classID % 64].tolist())) + size = get_text_size(input_image, f"{COCOLabels(box.classID).name}: {box.confidence:.2f}", normalised_scaling=0.6) + input_image = render_filled_box(input_image, (box.x1 - 3, box.y1 - 3, box.x1 + size[0], box.y1 + size[1]), color=(220, 220, 220)) + input_image = render_text(input_image, f"{COCOLabels(box.classID).name}: {box.confidence:.2f}", (box.x1, box.y1), color=(30, 30, 30), normalised_scaling=0.5) + + if FLAGS.out: + cv2.imwrite(FLAGS.out, input_image) + print(f"Saved result to {FLAGS.out}") + else: + cv2.imshow('image', input_image) + cv2.waitKey(0) + cv2.destroyAllWindows() + + # VIDEO MODE + if FLAGS.mode == 'video': + print("Running in 'video' mode") + if not FLAGS.input: + print("FAILED: no input video") + sys.exit(1) + + inputs = [] + outputs = [] + inputs.append(grpcclient.InferInput(INPUT_NAMES[0], [1, 3, FLAGS.width, FLAGS.height], "FP32")) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[0])) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[1])) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[2])) + outputs.append(grpcclient.InferRequestedOutput(OUTPUT_NAMES[3])) + + print("Opening input video stream...") + cap = cv2.VideoCapture(FLAGS.input) + if not cap.isOpened(): + print(f"FAILED: cannot open video {FLAGS.input}") + sys.exit(1) + + counter = 0 + out = None + print("Invoking inference...") + while True: + ret, frame = cap.read() + if not ret: + print("failed to fetch next frame") + break + + if counter == 0 and FLAGS.out: + print("Opening output video stream...") + fourcc = cv2.VideoWriter_fourcc('M', 'P', '4', 'V') + out = cv2.VideoWriter(FLAGS.out, fourcc, FLAGS.fps, (frame.shape[1], frame.shape[0])) + + input_image_buffer = preprocess(frame, [FLAGS.width, FLAGS.height]) + input_image_buffer = np.expand_dims(input_image_buffer, axis=0) + + inputs[0].set_data_from_numpy(input_image_buffer) + + results = triton_client.infer(model_name=FLAGS.model, + inputs=inputs, + outputs=outputs, + client_timeout=FLAGS.client_timeout) + + num_dets = results.as_numpy("num_dets") + det_boxes = results.as_numpy("det_boxes") + det_scores = results.as_numpy("det_scores") + det_classes = results.as_numpy("det_classes") + detected_objects = postprocess(num_dets, det_boxes, det_scores, det_classes, frame.shape[1], frame.shape[0], [FLAGS.width, FLAGS.height]) + print(f"Frame {counter}: {len(detected_objects)} objects") + counter += 1 + + for box in detected_objects: + print(f"{COCOLabels(box.classID).name}: {box.confidence}") + frame = render_box(frame, box.box(), color=tuple(RAND_COLORS[box.classID % 64].tolist())) + size = get_text_size(frame, f"{COCOLabels(box.classID).name}: {box.confidence:.2f}", normalised_scaling=0.6) + frame = render_filled_box(frame, (box.x1 - 3, box.y1 - 3, box.x1 + size[0], box.y1 + size[1]), color=(220, 220, 220)) + frame = render_text(frame, f"{COCOLabels(box.classID).name}: {box.confidence:.2f}", (box.x1, box.y1), color=(30, 30, 30), normalised_scaling=0.5) + + if FLAGS.out: + out.write(frame) + else: + cv2.imshow('image', frame) + if cv2.waitKey(1) == ord('q'): + break + + if FLAGS.model_info: + statistics = triton_client.get_inference_statistics(model_name=FLAGS.model) + if len(statistics.model_stats) != 1: + print("FAILED: get_inference_statistics") + sys.exit(1) + print(statistics) + print("Done") + + cap.release() + if FLAGS.out: + out.release() + else: + cv2.destroyAllWindows() diff --git a/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/labels.py b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/labels.py new file mode 100644 index 0000000..ba6c5c5 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/labels.py @@ -0,0 +1,83 @@ +from enum import Enum + +class COCOLabels(Enum): + PERSON = 0 + BICYCLE = 1 + CAR = 2 + MOTORBIKE = 3 + AEROPLANE = 4 + BUS = 5 + TRAIN = 6 + TRUCK = 7 + BOAT = 8 + TRAFFIC_LIGHT = 9 + FIRE_HYDRANT = 10 + STOP_SIGN = 11 + PARKING_METER = 12 + BENCH = 13 + BIRD = 14 + CAT = 15 + DOG = 16 + HORSE = 17 + SHEEP = 18 + COW = 19 + ELEPHANT = 20 + BEAR = 21 + ZEBRA = 22 + GIRAFFE = 23 + BACKPACK = 24 + UMBRELLA = 25 + HANDBAG = 26 + TIE = 27 + SUITCASE = 28 + FRISBEE = 29 + SKIS = 30 + SNOWBOARD = 31 + SPORTS_BALL = 32 + KITE = 33 + BASEBALL_BAT = 34 + BASEBALL_GLOVE = 35 + SKATEBOARD = 36 + SURFBOARD = 37 + TENNIS_RACKET = 38 + BOTTLE = 39 + WINE_GLASS = 40 + CUP = 41 + FORK = 42 + KNIFE = 43 + SPOON = 44 + BOWL = 45 + BANANA = 46 + APPLE = 47 + SANDWICH = 48 + ORANGE = 49 + BROCCOLI = 50 + CARROT = 51 + HOT_DOG = 52 + PIZZA = 53 + DONUT = 54 + CAKE = 55 + CHAIR = 56 + SOFA = 57 + POTTEDPLANT = 58 + BED = 59 + DININGTABLE = 60 + TOILET = 61 + TVMONITOR = 62 + LAPTOP = 63 + MOUSE = 64 + REMOTE = 65 + KEYBOARD = 66 + CELL_PHONE = 67 + MICROWAVE = 68 + OVEN = 69 + TOASTER = 70 + SINK = 71 + REFRIGERATOR = 72 + BOOK = 73 + CLOCK = 74 + VASE = 75 + SCISSORS = 76 + TEDDY_BEAR = 77 + HAIR_DRIER = 78 + TOOTHBRUSH = 79 diff --git a/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/processing.py b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/processing.py new file mode 100644 index 0000000..3d51c50 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/processing.py @@ -0,0 +1,51 @@ +from boundingbox import BoundingBox + +import cv2 +import numpy as np + +def preprocess(img, input_shape, letter_box=True): + if letter_box: + img_h, img_w, _ = img.shape + new_h, new_w = input_shape[0], input_shape[1] + offset_h, offset_w = 0, 0 + if (new_w / img_w) <= (new_h / img_h): + new_h = int(img_h * new_w / img_w) + offset_h = (input_shape[0] - new_h) // 2 + else: + new_w = int(img_w * new_h / img_h) + offset_w = (input_shape[1] - new_w) // 2 + resized = cv2.resize(img, (new_w, new_h)) + img = np.full((input_shape[0], input_shape[1], 3), 127, dtype=np.uint8) + img[offset_h:(offset_h + new_h), offset_w:(offset_w + new_w), :] = resized + else: + img = cv2.resize(img, (input_shape[1], input_shape[0])) + + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = img.transpose((2, 0, 1)).astype(np.float32) + img /= 255.0 + return img + +def postprocess(num_dets, det_boxes, det_scores, det_classes, img_w, img_h, input_shape, letter_box=True): + boxes = det_boxes[0, :num_dets[0][0]] / np.array([input_shape[0], input_shape[1], input_shape[0], input_shape[1]], dtype=np.float32) + scores = det_scores[0, :num_dets[0][0]] + classes = det_classes[0, :num_dets[0][0]].astype(np.int) + + old_h, old_w = img_h, img_w + offset_h, offset_w = 0, 0 + if letter_box: + if (img_w / input_shape[1]) >= (img_h / input_shape[0]): + old_h = int(input_shape[0] * img_w / input_shape[1]) + offset_h = (old_h - img_h) // 2 + else: + old_w = int(input_shape[1] * img_h / input_shape[0]) + offset_w = (old_w - img_w) // 2 + + boxes = boxes * np.array([old_w, old_h, old_w, old_h], dtype=np.float32) + if letter_box: + boxes -= np.array([offset_w, offset_h, offset_w, offset_h], dtype=np.float32) + boxes = boxes.astype(np.int) + + detected_objects = [] + for box, score, label in zip(boxes, scores, classes): + detected_objects.append(BoundingBox(label, score, box[0], box[2], box[1], box[3], img_w, img_h)) + return detected_objects diff --git a/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/render.py b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/render.py new file mode 100644 index 0000000..dea0401 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/deploy/triton-inference-server/render.py @@ -0,0 +1,110 @@ +import numpy as np + +import cv2 + +from math import sqrt + +_LINE_THICKNESS_SCALING = 500.0 + +np.random.seed(0) +RAND_COLORS = np.random.randint(50, 255, (64, 3), "int") # used for class visu +RAND_COLORS[0] = [220, 220, 220] + +def render_box(img, box, color=(200, 200, 200)): + """ + Render a box. Calculates scaling and thickness automatically. + :param img: image to render into + :param box: (x1, y1, x2, y2) - box coordinates + :param color: (b, g, r) - box color + :return: updated image + """ + x1, y1, x2, y2 = box + thickness = int( + round( + (img.shape[0] * img.shape[1]) + / (_LINE_THICKNESS_SCALING * _LINE_THICKNESS_SCALING) + ) + ) + thickness = max(1, thickness) + img = cv2.rectangle( + img, + (int(x1), int(y1)), + (int(x2), int(y2)), + color, + thickness=thickness + ) + return img + +def render_filled_box(img, box, color=(200, 200, 200)): + """ + Render a box. Calculates scaling and thickness automatically. + :param img: image to render into + :param box: (x1, y1, x2, y2) - box coordinates + :param color: (b, g, r) - box color + :return: updated image + """ + x1, y1, x2, y2 = box + img = cv2.rectangle( + img, + (int(x1), int(y1)), + (int(x2), int(y2)), + color, + thickness=cv2.FILLED + ) + return img + +_TEXT_THICKNESS_SCALING = 700.0 +_TEXT_SCALING = 520.0 + + +def get_text_size(img, text, normalised_scaling=1.0): + """ + Get calculated text size (as box width and height) + :param img: image reference, used to determine appropriate text scaling + :param text: text to display + :param normalised_scaling: additional normalised scaling. Default 1.0. + :return: (width, height) - width and height of text box + """ + thickness = int( + round( + (img.shape[0] * img.shape[1]) + / (_TEXT_THICKNESS_SCALING * _TEXT_THICKNESS_SCALING) + ) + * normalised_scaling + ) + thickness = max(1, thickness) + scaling = img.shape[0] / _TEXT_SCALING * normalised_scaling + return cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, scaling, thickness)[0] + + +def render_text(img, text, pos, color=(200, 200, 200), normalised_scaling=1.0): + """ + Render a text into the image. Calculates scaling and thickness automatically. + :param img: image to render into + :param text: text to display + :param pos: (x, y) - upper left coordinates of render position + :param color: (b, g, r) - text color + :param normalised_scaling: additional normalised scaling. Default 1.0. + :return: updated image + """ + x, y = pos + thickness = int( + round( + (img.shape[0] * img.shape[1]) + / (_TEXT_THICKNESS_SCALING * _TEXT_THICKNESS_SCALING) + ) + * normalised_scaling + ) + thickness = max(1, thickness) + scaling = img.shape[0] / _TEXT_SCALING * normalised_scaling + size = get_text_size(img, text, normalised_scaling) + cv2.putText( + img, + text, + (int(x), int(y + size[1])), + cv2.FONT_HERSHEY_SIMPLEX, + scaling, + color, + thickness=thickness, + ) + return img diff --git a/src/train_utils/train_models/models/yolov7/detect.py b/src/train_utils/train_models/models/yolov7/detect.py new file mode 100644 index 0000000..5e0c441 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/detect.py @@ -0,0 +1,196 @@ +import argparse +import time +from pathlib import Path + +import cv2 +import torch +import torch.backends.cudnn as cudnn +from numpy import random + +from models.experimental import attempt_load +from utils.datasets import LoadStreams, LoadImages +from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ + scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path +from utils.plots import plot_one_box +from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel + + +def detect(save_img=False): + source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace + save_img = not opt.nosave and not source.endswith('.txt') # save inference images + webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( + ('rtsp://', 'rtmp://', 'http://', 'https://')) + + # Directories + save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Initialize + set_logging() + device = select_device(opt.device) + half = device.type != 'cpu' # half precision only supported on CUDA + + # Load model + model = attempt_load(weights, map_location=device) # load FP32 model + stride = int(model.stride.max()) # model stride + imgsz = check_img_size(imgsz, s=stride) # check img_size + + if trace: + model = TracedModel(model, device, opt.img_size) + + if half: + model.half() # to FP16 + + # Second-stage classifier + classify = False + if classify: + modelc = load_classifier(name='resnet101', n=2) # initialize + modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval() + + # Set Dataloader + vid_path, vid_writer = None, None + if webcam: + view_img = check_imshow() + cudnn.benchmark = True # set True to speed up constant image size inference + dataset = LoadStreams(source, img_size=imgsz, stride=stride) + else: + dataset = LoadImages(source, img_size=imgsz, stride=stride) + + # Get names and colors + names = model.module.names if hasattr(model, 'module') else model.names + colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] + + # Run inference + if device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + old_img_w = old_img_h = imgsz + old_img_b = 1 + + t0 = time.time() + for path, img, im0s, vid_cap in dataset: + img = torch.from_numpy(img).to(device) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + if img.ndimension() == 3: + img = img.unsqueeze(0) + + # Warmup + if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]): + old_img_b = img.shape[0] + old_img_h = img.shape[2] + old_img_w = img.shape[3] + for i in range(3): + model(img, augment=opt.augment)[0] + + # Inference + t1 = time_synchronized() + with torch.no_grad(): # Calculating gradients would cause a GPU memory leak + pred = model(img, augment=opt.augment)[0] + t2 = time_synchronized() + + # Apply NMS + pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms) + t3 = time_synchronized() + + # Apply Classifier + if classify: + pred = apply_classifier(pred, modelc, img, im0s) + + # Process detections + for i, det in enumerate(pred): # detections per image + if webcam: # batch_size >= 1 + p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count + else: + p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0) + + p = Path(p) # to Path + save_path = str(save_dir / p.name) # img.jpg + txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt + gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh + if len(det): + # Rescale boxes from img_size to im0 size + det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() + + # Print results + for c in det[:, -1].unique(): + n = (det[:, -1] == c).sum() # detections per class + s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string + + # Write results + for *xyxy, conf, cls in reversed(det): + if save_txt: # Write to file + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format + with open(txt_path + '.txt', 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + if save_img or view_img: # Add bbox to image + label = f'{names[int(cls)]} {conf:.2f}' + plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1) + + # Print time (inference + NMS) + print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS') + + # Stream results + if view_img: + cv2.imshow(str(p), im0) + cv2.waitKey(1) # 1 millisecond + + # Save results (image with detections) + if save_img: + if dataset.mode == 'image': + cv2.imwrite(save_path, im0) + print(f" The image with the result is saved in: {save_path}") + else: # 'video' or 'stream' + if vid_path != save_path: # new video + vid_path = save_path + if isinstance(vid_writer, cv2.VideoWriter): + vid_writer.release() # release previous video writer + if vid_cap: # video + fps = vid_cap.get(cv2.CAP_PROP_FPS) + w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + else: # stream + fps, w, h = 30, im0.shape[1], im0.shape[0] + save_path += '.mp4' + vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) + vid_writer.write(im0) + + if save_txt or save_img: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + #print(f"Results saved to {save_dir}{s}") + + print(f'Done. ({time.time() - t0:.3f}s)') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)') + parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--view-img', action='store_true', help='display results') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--nosave', action='store_true', help='do not save images/videos') + parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') + parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--update', action='store_true', help='update all models') + parser.add_argument('--project', default='runs/detect', help='save results to project/name') + parser.add_argument('--name', default='exp', help='save results to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--no-trace', action='store_true', help='don`t trace model') + opt = parser.parse_args() + print(opt) + #check_requirements(exclude=('pycocotools', 'thop')) + + with torch.no_grad(): + if opt.update: # update all models (to fix SourceChangeWarning) + for opt.weights in ['yolov7.pt']: + detect() + strip_optimizer(opt.weights) + else: + detect() diff --git a/src/train_utils/train_models/models/yolov7/export.py b/src/train_utils/train_models/models/yolov7/export.py new file mode 100644 index 0000000..cf918aa --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/export.py @@ -0,0 +1,205 @@ +import argparse +import sys +import time +import warnings + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +import torch +import torch.nn as nn +from torch.utils.mobile_optimizer import optimize_for_mobile + +import models +from models.experimental import attempt_load, End2End +from utils.activations import Hardswish, SiLU +from utils.general import set_logging, check_img_size +from utils.torch_utils import select_device +from utils.add_nms import RegisterNMS + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='./yolor-csp-c.pt', help='weights path') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='image size') # height, width + parser.add_argument('--batch-size', type=int, default=1, help='batch size') + parser.add_argument('--dynamic', action='store_true', help='dynamic ONNX axes') + parser.add_argument('--dynamic-batch', action='store_true', help='dynamic batch onnx for tensorrt and onnx-runtime') + parser.add_argument('--grid', action='store_true', help='export Detect() layer grid') + parser.add_argument('--end2end', action='store_true', help='export end2end onnx') + parser.add_argument('--max-wh', type=int, default=None, help='None for tensorrt nms, int value for onnx-runtime nms') + parser.add_argument('--topk-all', type=int, default=100, help='topk objects for every images') + parser.add_argument('--iou-thres', type=float, default=0.45, help='iou threshold for NMS') + parser.add_argument('--conf-thres', type=float, default=0.25, help='conf threshold for NMS') + parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--simplify', action='store_true', help='simplify onnx model') + parser.add_argument('--include-nms', action='store_true', help='export end2end onnx') + parser.add_argument('--fp16', action='store_true', help='CoreML FP16 half-precision export') + parser.add_argument('--int8', action='store_true', help='CoreML INT8 quantization') + opt = parser.parse_args() + opt.img_size *= 2 if len(opt.img_size) == 1 else 1 # expand + opt.dynamic = opt.dynamic and not opt.end2end + opt.dynamic = False if opt.dynamic_batch else opt.dynamic + print(opt) + set_logging() + t = time.time() + + # Load PyTorch model + device = select_device(opt.device) + model = attempt_load(opt.weights, map_location=device) # load FP32 model + labels = model.names + + # Checks + gs = int(max(model.stride)) # grid size (max stride) + opt.img_size = [check_img_size(x, gs) for x in opt.img_size] # verify img_size are gs-multiples + + # Input + img = torch.zeros(opt.batch_size, 3, *opt.img_size).to(device) # image size(1,3,320,192) iDetection + + # Update model + for k, m in model.named_modules(): + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + if isinstance(m, models.common.Conv): # assign export-friendly activations + if isinstance(m.act, nn.Hardswish): + m.act = Hardswish() + elif isinstance(m.act, nn.SiLU): + m.act = SiLU() + # elif isinstance(m, models.yolo.Detect): + # m.forward = m.forward_export # assign forward (optional) + model.model[-1].export = not opt.grid # set Detect() layer grid export + y = model(img) # dry run + if opt.include_nms: + model.model[-1].include_nms = True + y = None + + # TorchScript export + try: + print('\nStarting TorchScript export with torch %s...' % torch.__version__) + f = opt.weights.replace('.pt', '.torchscript.pt') # filename + ts = torch.jit.trace(model, img, strict=False) + ts.save(f) + print('TorchScript export success, saved as %s' % f) + except Exception as e: + print('TorchScript export failure: %s' % e) + + # CoreML export + try: + import coremltools as ct + + print('\nStarting CoreML export with coremltools %s...' % ct.__version__) + # convert model from torchscript and apply pixel scaling as per detect.py + ct_model = ct.convert(ts, inputs=[ct.ImageType('image', shape=img.shape, scale=1 / 255.0, bias=[0, 0, 0])]) + bits, mode = (8, 'kmeans_lut') if opt.int8 else (16, 'linear') if opt.fp16 else (32, None) + if bits < 32: + if sys.platform.lower() == 'darwin': # quantization only supported on macOS + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) # suppress numpy==1.20 float warning + ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode) + else: + print('quantization only supported on macOS, skipping...') + + f = opt.weights.replace('.pt', '.mlmodel') # filename + ct_model.save(f) + print('CoreML export success, saved as %s' % f) + except Exception as e: + print('CoreML export failure: %s' % e) + + # TorchScript-Lite export + try: + print('\nStarting TorchScript-Lite export with torch %s...' % torch.__version__) + f = opt.weights.replace('.pt', '.torchscript.ptl') # filename + tsl = torch.jit.trace(model, img, strict=False) + tsl = optimize_for_mobile(tsl) + tsl._save_for_lite_interpreter(f) + print('TorchScript-Lite export success, saved as %s' % f) + except Exception as e: + print('TorchScript-Lite export failure: %s' % e) + + # ONNX export + try: + import onnx + + print('\nStarting ONNX export with onnx %s...' % onnx.__version__) + f = opt.weights.replace('.pt', '.onnx') # filename + model.eval() + output_names = ['classes', 'boxes'] if y is None else ['output'] + dynamic_axes = None + if opt.dynamic: + dynamic_axes = {'images': {0: 'batch', 2: 'height', 3: 'width'}, # size(1,3,640,640) + 'output': {0: 'batch', 2: 'y', 3: 'x'}} + if opt.dynamic_batch: + opt.batch_size = 'batch' + dynamic_axes = { + 'images': { + 0: 'batch', + }, } + if opt.end2end and opt.max_wh is None: + output_axes = { + 'num_dets': {0: 'batch'}, + 'det_boxes': {0: 'batch'}, + 'det_scores': {0: 'batch'}, + 'det_classes': {0: 'batch'}, + } + else: + output_axes = { + 'output': {0: 'batch'}, + } + dynamic_axes.update(output_axes) + if opt.grid: + if opt.end2end: + print('\nStarting export end2end onnx model for %s...' % 'TensorRT' if opt.max_wh is None else 'onnxruntime') + model = End2End(model,opt.topk_all,opt.iou_thres,opt.conf_thres,opt.max_wh,device,len(labels)) + if opt.end2end and opt.max_wh is None: + output_names = ['num_dets', 'det_boxes', 'det_scores', 'det_classes'] + shapes = [opt.batch_size, 1, opt.batch_size, opt.topk_all, 4, + opt.batch_size, opt.topk_all, opt.batch_size, opt.topk_all] + else: + output_names = ['output'] + else: + model.model[-1].concat = True + + torch.onnx.export(model, img, f, verbose=False, opset_version=12, input_names=['images'], + output_names=output_names, + dynamic_axes=dynamic_axes) + + # Checks + onnx_model = onnx.load(f) # load onnx model + onnx.checker.check_model(onnx_model) # check onnx model + + if opt.end2end and opt.max_wh is None: + for i in onnx_model.graph.output: + for j in i.type.tensor_type.shape.dim: + j.dim_param = str(shapes.pop(0)) + + # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model + + # # Metadata + # d = {'stride': int(max(model.stride))} + # for k, v in d.items(): + # meta = onnx_model.metadata_props.add() + # meta.key, meta.value = k, str(v) + # onnx.save(onnx_model, f) + + if opt.simplify: + try: + import onnxsim + + print('\nStarting to simplify ONNX...') + onnx_model, check = onnxsim.simplify(onnx_model) + assert check, 'assert check failed' + except Exception as e: + print(f'Simplifier failure: {e}') + + # print(onnx.helper.printable_graph(onnx_model.graph)) # print a human readable model + onnx.save(onnx_model,f) + print('ONNX export success, saved as %s' % f) + + if opt.include_nms: + print('Registering NMS plugin for ONNX...') + mo = RegisterNMS(f) + mo.register_nms() + mo.save(f) + + except Exception as e: + print('ONNX export failure: %s' % e) + + # Finish + print('\nExport complete (%.2fs). Visualize with https://github.com/lutzroeder/netron.' % (time.time() - t)) diff --git a/src/train_utils/train_models/models/yolov7/hubconf.py b/src/train_utils/train_models/models/yolov7/hubconf.py new file mode 100644 index 0000000..50ff257 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/hubconf.py @@ -0,0 +1,97 @@ +"""PyTorch Hub models + +Usage: + import torch + model = torch.hub.load('repo', 'model') +""" + +from pathlib import Path + +import torch + +from models.yolo import Model +from utils.general import check_requirements, set_logging +from utils.google_utils import attempt_download +from utils.torch_utils import select_device + +dependencies = ['torch', 'yaml'] +check_requirements(Path(__file__).parent / 'requirements.txt', exclude=('pycocotools', 'thop')) +set_logging() + + +def create(name, pretrained, channels, classes, autoshape): + """Creates a specified model + + Arguments: + name (str): name of model, i.e. 'yolov7' + pretrained (bool): load pretrained weights into the model + channels (int): number of input channels + classes (int): number of model classes + + Returns: + pytorch model + """ + try: + cfg = list((Path(__file__).parent / 'cfg').rglob(f'{name}.yaml'))[0] # model.yaml path + model = Model(cfg, channels, classes) + if pretrained: + fname = f'{name}.pt' # checkpoint filename + attempt_download(fname) # download if not found locally + ckpt = torch.load(fname, map_location=torch.device('cpu')) # load + msd = model.state_dict() # model state_dict + csd = ckpt['model'].float().state_dict() # checkpoint state_dict as FP32 + csd = {k: v for k, v in csd.items() if msd[k].shape == v.shape} # filter + model.load_state_dict(csd, strict=False) # load + if len(ckpt['model'].names) == classes: + model.names = ckpt['model'].names # set class names attribute + if autoshape: + model = model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available + return model.to(device) + + except Exception as e: + s = 'Cache maybe be out of date, try force_reload=True.' + raise Exception(s) from e + + +def custom(path_or_model='path/to/model.pt', autoshape=True): + """custom mode + + Arguments (3 options): + path_or_model (str): 'path/to/model.pt' + path_or_model (dict): torch.load('path/to/model.pt') + path_or_model (nn.Module): torch.load('path/to/model.pt')['model'] + + Returns: + pytorch model + """ + model = torch.load(path_or_model, map_location=torch.device('cpu')) if isinstance(path_or_model, str) else path_or_model # load checkpoint + if isinstance(model, dict): + model = model['ema' if model.get('ema') else 'model'] # load model + + hub_model = Model(model.yaml).to(next(model.parameters()).device) # create + hub_model.load_state_dict(model.float().state_dict()) # load state_dict + hub_model.names = model.names # class names + if autoshape: + hub_model = hub_model.autoshape() # for file/URI/PIL/cv2/np inputs and NMS + device = select_device('0' if torch.cuda.is_available() else 'cpu') # default to GPU if available + return hub_model.to(device) + + +def yolov7(pretrained=True, channels=3, classes=80, autoshape=True): + return create('yolov7', pretrained, channels, classes, autoshape) + + +if __name__ == '__main__': + model = custom(path_or_model='yolov7.pt') # custom example + # model = create(name='yolov7', pretrained=True, channels=3, classes=80, autoshape=True) # pretrained example + + # Verify inference + import numpy as np + from PIL import Image + + imgs = [np.zeros((640, 480, 3))] + + results = model(imgs) # batched inference + results.print() + results.save() diff --git a/src/train_utils/train_models/models/yolov7/models/__init__.py b/src/train_utils/train_models/models/yolov7/models/__init__.py new file mode 100644 index 0000000..84952a8 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/models/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/models/common.py b/src/train_utils/train_models/models/yolov7/models/common.py new file mode 100644 index 0000000..edb5edc --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/models/common.py @@ -0,0 +1,2019 @@ +import math +from copy import copy +from pathlib import Path + +import numpy as np +import pandas as pd +import requests +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.ops import DeformConv2d +from PIL import Image +from torch.cuda import amp + +from utils.datasets import letterbox +from utils.general import non_max_suppression, make_divisible, scale_coords, increment_path, xyxy2xywh +from utils.plots import color_list, plot_one_box +from utils.torch_utils import time_synchronized + + +##### basic #### + +def autopad(k, p=None): # kernel, padding + # Pad to 'same' + if p is None: + p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad + return p + + +class MP(nn.Module): + def __init__(self, k=2): + super(MP, self).__init__() + self.m = nn.MaxPool2d(kernel_size=k, stride=k) + + def forward(self, x): + return self.m(x) + + +class SP(nn.Module): + def __init__(self, k=3, s=1): + super(SP, self).__init__() + self.m = nn.MaxPool2d(kernel_size=k, stride=s, padding=k // 2) + + def forward(self, x): + return self.m(x) + + +class ReOrg(nn.Module): + def __init__(self): + super(ReOrg, self).__init__() + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1) + + +class Concat(nn.Module): + def __init__(self, dimension=1): + super(Concat, self).__init__() + self.d = dimension + + def forward(self, x): + return torch.cat(x, self.d) + + +class Chuncat(nn.Module): + def __init__(self, dimension=1): + super(Chuncat, self).__init__() + self.d = dimension + + def forward(self, x): + x1 = [] + x2 = [] + for xi in x: + xi1, xi2 = xi.chunk(2, self.d) + x1.append(xi1) + x2.append(xi2) + return torch.cat(x1+x2, self.d) + + +class Shortcut(nn.Module): + def __init__(self, dimension=0): + super(Shortcut, self).__init__() + self.d = dimension + + def forward(self, x): + return x[0]+x[1] + + +class Foldcut(nn.Module): + def __init__(self, dimension=0): + super(Foldcut, self).__init__() + self.d = dimension + + def forward(self, x): + x1, x2 = x.chunk(2, self.d) + return x1+x2 + + +class Conv(nn.Module): + # Standard convolution + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Conv, self).__init__() + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + def forward(self, x): + return self.act(self.bn(self.conv(x))) + + def fuseforward(self, x): + return self.act(self.conv(x)) + + +class RobustConv(nn.Module): + # Robust convolution (use high kernel size 7-11 for: downsampling and other layers). Train for 300 - 450 epochs. + def __init__(self, c1, c2, k=7, s=1, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups + super(RobustConv, self).__init__() + self.conv_dw = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act) + self.conv1x1 = nn.Conv2d(c1, c2, 1, 1, 0, groups=1, bias=True) + self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None + + def forward(self, x): + x = x.to(memory_format=torch.channels_last) + x = self.conv1x1(self.conv_dw(x)) + if self.gamma is not None: + x = x.mul(self.gamma.reshape(1, -1, 1, 1)) + return x + + +class RobustConv2(nn.Module): + # Robust convolution 2 (use [32, 5, 2] or [32, 7, 4] or [32, 11, 8] for one of the paths in CSP). + def __init__(self, c1, c2, k=7, s=4, p=None, g=1, act=True, layer_scale_init_value=1e-6): # ch_in, ch_out, kernel, stride, padding, groups + super(RobustConv2, self).__init__() + self.conv_strided = Conv(c1, c1, k=k, s=s, p=p, g=c1, act=act) + self.conv_deconv = nn.ConvTranspose2d(in_channels=c1, out_channels=c2, kernel_size=s, stride=s, + padding=0, bias=True, dilation=1, groups=1 + ) + self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(c2)) if layer_scale_init_value > 0 else None + + def forward(self, x): + x = self.conv_deconv(self.conv_strided(x)) + if self.gamma is not None: + x = x.mul(self.gamma.reshape(1, -1, 1, 1)) + return x + + +def DWConv(c1, c2, k=1, s=1, act=True): + # Depthwise convolution + return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act) + + +class GhostConv(nn.Module): + # Ghost Convolution https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups + super(GhostConv, self).__init__() + c_ = c2 // 2 # hidden channels + self.cv1 = Conv(c1, c_, k, s, None, g, act) + self.cv2 = Conv(c_, c_, 5, 1, None, c_, act) + + def forward(self, x): + y = self.cv1(x) + return torch.cat([y, self.cv2(y)], 1) + + +class Stem(nn.Module): + # Stem + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Stem, self).__init__() + c_ = int(c2/2) # hidden channels + self.cv1 = Conv(c1, c_, 3, 2) + self.cv2 = Conv(c_, c_, 1, 1) + self.cv3 = Conv(c_, c_, 3, 2) + self.pool = torch.nn.MaxPool2d(2, stride=2) + self.cv4 = Conv(2 * c_, c2, 1, 1) + + def forward(self, x): + x = self.cv1(x) + return self.cv4(torch.cat((self.cv3(self.cv2(x)), self.pool(x)), dim=1)) + + +class DownC(nn.Module): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, n=1, k=2): + super(DownC, self).__init__() + c_ = int(c1) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2//2, 3, k) + self.cv3 = Conv(c1, c2//2, 1, 1) + self.mp = nn.MaxPool2d(kernel_size=k, stride=k) + + def forward(self, x): + return torch.cat((self.cv2(self.cv1(x)), self.cv3(self.mp(x))), dim=1) + + +class SPP(nn.Module): + # Spatial pyramid pooling layer used in YOLOv3-SPP + def __init__(self, c1, c2, k=(5, 9, 13)): + super(SPP, self).__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + + def forward(self, x): + x = self.cv1(x) + return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1)) + + +class Bottleneck(nn.Module): + # Darknet bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super(Bottleneck, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c2, 3, 1, g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Res(nn.Module): + # ResNet bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super(Res, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c_, 3, 1, g=g) + self.cv3 = Conv(c_, c2, 1, 1) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv3(self.cv2(self.cv1(x))) if self.add else self.cv3(self.cv2(self.cv1(x))) + + +class ResX(Res): + # ResNet bottleneck + def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__(c1, c2, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + + +class Ghost(nn.Module): + # Ghost Bottleneck https://github.com/huawei-noah/ghostnet + def __init__(self, c1, c2, k=3, s=1): # ch_in, ch_out, kernel, stride + super(Ghost, self).__init__() + c_ = c2 // 2 + self.conv = nn.Sequential(GhostConv(c1, c_, 1, 1), # pw + DWConv(c_, c_, k, s, act=False) if s == 2 else nn.Identity(), # dw + GhostConv(c_, c2, 1, 1, act=False)) # pw-linear + self.shortcut = nn.Sequential(DWConv(c1, c1, k, s, act=False), + Conv(c1, c2, 1, 1, act=False)) if s == 2 else nn.Identity() + + def forward(self, x): + return self.conv(x) + self.shortcut(x) + +##### end of basic ##### + + +##### cspnet ##### + +class SPPCSPC(nn.Module): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)): + super(SPPCSPC, self).__init__() + c_ = int(2 * c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(c_, c_, 3, 1) + self.cv4 = Conv(c_, c_, 1, 1) + self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k]) + self.cv5 = Conv(4 * c_, c_, 1, 1) + self.cv6 = Conv(c_, c_, 3, 1) + self.cv7 = Conv(2 * c_, c2, 1, 1) + + def forward(self, x): + x1 = self.cv4(self.cv3(self.cv1(x))) + y1 = self.cv6(self.cv5(torch.cat([x1] + [m(x1) for m in self.m], 1))) + y2 = self.cv2(x) + return self.cv7(torch.cat((y1, y2), dim=1)) + +class GhostSPPCSPC(SPPCSPC): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5, k=(5, 9, 13)): + super().__init__(c1, c2, n, shortcut, g, e, k) + c_ = int(2 * c2 * e) # hidden channels + self.cv1 = GhostConv(c1, c_, 1, 1) + self.cv2 = GhostConv(c1, c_, 1, 1) + self.cv3 = GhostConv(c_, c_, 3, 1) + self.cv4 = GhostConv(c_, c_, 1, 1) + self.cv5 = GhostConv(4 * c_, c_, 1, 1) + self.cv6 = GhostConv(c_, c_, 3, 1) + self.cv7 = GhostConv(2 * c_, c2, 1, 1) + + +class GhostStem(Stem): + # Stem + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super().__init__(c1, c2, k, s, p, g, act) + c_ = int(c2/2) # hidden channels + self.cv1 = GhostConv(c1, c_, 3, 2) + self.cv2 = GhostConv(c_, c_, 1, 1) + self.cv3 = GhostConv(c_, c_, 3, 2) + self.cv4 = GhostConv(2 * c_, c2, 1, 1) + + +class BottleneckCSPA(nn.Module): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSPA, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.m(self.cv1(x)) + y2 = self.cv2(x) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class BottleneckCSPB(nn.Module): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSPB, self).__init__() + c_ = int(c2) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + x1 = self.cv1(x) + y1 = self.m(x1) + y2 = self.cv2(x1) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class BottleneckCSPC(nn.Module): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(BottleneckCSPC, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(c_, c_, 1, 1) + self.cv4 = Conv(2 * c_, c2, 1, 1) + self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(torch.cat((y1, y2), dim=1)) + + +class ResCSPA(BottleneckCSPA): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class ResCSPB(BottleneckCSPB): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class ResCSPC(BottleneckCSPC): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class ResXCSPA(ResCSPA): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class ResXCSPB(ResCSPB): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class ResXCSPC(ResCSPC): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Res(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class GhostCSPA(BottleneckCSPA): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) + + +class GhostCSPB(BottleneckCSPB): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) + + +class GhostCSPC(BottleneckCSPC): + # CSP https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[Ghost(c_, c_) for _ in range(n)]) + +##### end of cspnet ##### + + +##### yolor ##### + +class ImplicitA(nn.Module): + def __init__(self, channel, mean=0., std=.02): + super(ImplicitA, self).__init__() + self.channel = channel + self.mean = mean + self.std = std + self.implicit = nn.Parameter(torch.zeros(1, channel, 1, 1)) + nn.init.normal_(self.implicit, mean=self.mean, std=self.std) + + def forward(self, x): + return self.implicit + x + + +class ImplicitM(nn.Module): + def __init__(self, channel, mean=1., std=.02): + super(ImplicitM, self).__init__() + self.channel = channel + self.mean = mean + self.std = std + self.implicit = nn.Parameter(torch.ones(1, channel, 1, 1)) + nn.init.normal_(self.implicit, mean=self.mean, std=self.std) + + def forward(self, x): + return self.implicit * x + +##### end of yolor ##### + + +##### repvgg ##### + +class RepConv(nn.Module): + # Represented convolution + # https://arxiv.org/abs/2101.03697 + + def __init__(self, c1, c2, k=3, s=1, p=None, g=1, act=True, deploy=False): + super(RepConv, self).__init__() + + self.deploy = deploy + self.groups = g + self.in_channels = c1 + self.out_channels = c2 + + assert k == 3 + assert autopad(k, p) == 1 + + padding_11 = autopad(k, p) - k // 2 + + self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity()) + + if deploy: + self.rbr_reparam = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=True) + + else: + self.rbr_identity = (nn.BatchNorm2d(num_features=c1) if c2 == c1 and s == 1 else None) + + self.rbr_dense = nn.Sequential( + nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False), + nn.BatchNorm2d(num_features=c2), + ) + + self.rbr_1x1 = nn.Sequential( + nn.Conv2d( c1, c2, 1, s, padding_11, groups=g, bias=False), + nn.BatchNorm2d(num_features=c2), + ) + + def forward(self, inputs): + if hasattr(self, "rbr_reparam"): + return self.act(self.rbr_reparam(inputs)) + + if self.rbr_identity is None: + id_out = 0 + else: + id_out = self.rbr_identity(inputs) + + return self.act(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out) + + def get_equivalent_kernel_bias(self): + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) + kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) + kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) + return ( + kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, + bias3x3 + bias1x1 + biasid, + ) + + def _pad_1x1_to_3x3_tensor(self, kernel1x1): + if kernel1x1 is None: + return 0 + else: + return nn.functional.pad(kernel1x1, [1, 1, 1, 1]) + + def _fuse_bn_tensor(self, branch): + if branch is None: + return 0, 0 + if isinstance(branch, nn.Sequential): + kernel = branch[0].weight + running_mean = branch[1].running_mean + running_var = branch[1].running_var + gamma = branch[1].weight + beta = branch[1].bias + eps = branch[1].eps + else: + assert isinstance(branch, nn.BatchNorm2d) + if not hasattr(self, "id_tensor"): + input_dim = self.in_channels // self.groups + kernel_value = np.zeros( + (self.in_channels, input_dim, 3, 3), dtype=np.float32 + ) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, 1, 1] = 1 + self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + def repvgg_convert(self): + kernel, bias = self.get_equivalent_kernel_bias() + return ( + kernel.detach().cpu().numpy(), + bias.detach().cpu().numpy(), + ) + + def fuse_conv_bn(self, conv, bn): + + std = (bn.running_var + bn.eps).sqrt() + bias = bn.bias - bn.running_mean * bn.weight / std + + t = (bn.weight / std).reshape(-1, 1, 1, 1) + weights = conv.weight * t + + bn = nn.Identity() + conv = nn.Conv2d(in_channels = conv.in_channels, + out_channels = conv.out_channels, + kernel_size = conv.kernel_size, + stride=conv.stride, + padding = conv.padding, + dilation = conv.dilation, + groups = conv.groups, + bias = True, + padding_mode = conv.padding_mode) + + conv.weight = torch.nn.Parameter(weights) + conv.bias = torch.nn.Parameter(bias) + return conv + + def fuse_repvgg_block(self): + if self.deploy: + return + print(f"RepConv.fuse_repvgg_block") + + self.rbr_dense = self.fuse_conv_bn(self.rbr_dense[0], self.rbr_dense[1]) + + self.rbr_1x1 = self.fuse_conv_bn(self.rbr_1x1[0], self.rbr_1x1[1]) + rbr_1x1_bias = self.rbr_1x1.bias + weight_1x1_expanded = torch.nn.functional.pad(self.rbr_1x1.weight, [1, 1, 1, 1]) + + # Fuse self.rbr_identity + if (isinstance(self.rbr_identity, nn.BatchNorm2d) or isinstance(self.rbr_identity, nn.modules.batchnorm.SyncBatchNorm)): + # print(f"fuse: rbr_identity == BatchNorm2d or SyncBatchNorm") + identity_conv_1x1 = nn.Conv2d( + in_channels=self.in_channels, + out_channels=self.out_channels, + kernel_size=1, + stride=1, + padding=0, + groups=self.groups, + bias=False) + identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.to(self.rbr_1x1.weight.data.device) + identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.squeeze().squeeze() + # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}") + identity_conv_1x1.weight.data.fill_(0.0) + identity_conv_1x1.weight.data.fill_diagonal_(1.0) + identity_conv_1x1.weight.data = identity_conv_1x1.weight.data.unsqueeze(2).unsqueeze(3) + # print(f" identity_conv_1x1.weight = {identity_conv_1x1.weight.shape}") + + identity_conv_1x1 = self.fuse_conv_bn(identity_conv_1x1, self.rbr_identity) + bias_identity_expanded = identity_conv_1x1.bias + weight_identity_expanded = torch.nn.functional.pad(identity_conv_1x1.weight, [1, 1, 1, 1]) + else: + # print(f"fuse: rbr_identity != BatchNorm2d, rbr_identity = {self.rbr_identity}") + bias_identity_expanded = torch.nn.Parameter( torch.zeros_like(rbr_1x1_bias) ) + weight_identity_expanded = torch.nn.Parameter( torch.zeros_like(weight_1x1_expanded) ) + + + #print(f"self.rbr_1x1.weight = {self.rbr_1x1.weight.shape}, ") + #print(f"weight_1x1_expanded = {weight_1x1_expanded.shape}, ") + #print(f"self.rbr_dense.weight = {self.rbr_dense.weight.shape}, ") + + self.rbr_dense.weight = torch.nn.Parameter(self.rbr_dense.weight + weight_1x1_expanded + weight_identity_expanded) + self.rbr_dense.bias = torch.nn.Parameter(self.rbr_dense.bias + rbr_1x1_bias + bias_identity_expanded) + + self.rbr_reparam = self.rbr_dense + self.deploy = True + + if self.rbr_identity is not None: + del self.rbr_identity + self.rbr_identity = None + + if self.rbr_1x1 is not None: + del self.rbr_1x1 + self.rbr_1x1 = None + + if self.rbr_dense is not None: + del self.rbr_dense + self.rbr_dense = None + + +class RepBottleneck(Bottleneck): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__(c1, c2, shortcut=True, g=1, e=0.5) + c_ = int(c2 * e) # hidden channels + self.cv2 = RepConv(c_, c2, 3, 1, g=g) + + +class RepBottleneckCSPA(BottleneckCSPA): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class RepBottleneckCSPB(BottleneckCSPB): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class RepBottleneckCSPC(BottleneckCSPC): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepBottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + +class RepRes(Res): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__(c1, c2, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.cv2 = RepConv(c_, c_, 3, 1, g=g) + + +class RepResCSPA(ResCSPA): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResCSPB(ResCSPB): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResCSPC(ResCSPC): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepRes(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResX(ResX): + # Standard bottleneck + def __init__(self, c1, c2, shortcut=True, g=32, e=0.5): # ch_in, ch_out, shortcut, groups, expansion + super().__init__(c1, c2, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.cv2 = RepConv(c_, c_, 3, 1, g=g) + + +class RepResXCSPA(ResXCSPA): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResXCSPB(ResXCSPB): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2) # hidden channels + self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + + +class RepResXCSPC(ResXCSPC): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=32, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super().__init__(c1, c2, n, shortcut, g, e) + c_ = int(c2 * e) # hidden channels + self.m = nn.Sequential(*[RepResX(c_, c_, shortcut, g, e=0.5) for _ in range(n)]) + +##### end of repvgg ##### + + +##### transformer ##### + +class TransformerLayer(nn.Module): + # Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance) + def __init__(self, c, num_heads): + super().__init__() + self.q = nn.Linear(c, c, bias=False) + self.k = nn.Linear(c, c, bias=False) + self.v = nn.Linear(c, c, bias=False) + self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads) + self.fc1 = nn.Linear(c, c, bias=False) + self.fc2 = nn.Linear(c, c, bias=False) + + def forward(self, x): + x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x + x = self.fc2(self.fc1(x)) + x + return x + + +class TransformerBlock(nn.Module): + # Vision Transformer https://arxiv.org/abs/2010.11929 + def __init__(self, c1, c2, num_heads, num_layers): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + self.linear = nn.Linear(c2, c2) # learnable position embedding + self.tr = nn.Sequential(*[TransformerLayer(c2, num_heads) for _ in range(num_layers)]) + self.c2 = c2 + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + b, _, w, h = x.shape + p = x.flatten(2) + p = p.unsqueeze(0) + p = p.transpose(0, 3) + p = p.squeeze(3) + e = self.linear(p) + x = p + e + + x = self.tr(x) + x = x.unsqueeze(3) + x = x.transpose(0, 3) + x = x.reshape(b, self.c2, w, h) + return x + +##### end of transformer ##### + + +##### yolov5 ##### + +class Focus(nn.Module): + # Focus wh information into c-space + def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups + super(Focus, self).__init__() + self.conv = Conv(c1 * 4, c2, k, s, p, g, act) + # self.contract = Contract(gain=2) + + def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2) + return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1)) + # return self.conv(self.contract(x)) + + +class SPPF(nn.Module): + # Spatial Pyramid Pooling - Fast (SPPF) layer for YOLOv5 by Glenn Jocher + def __init__(self, c1, c2, k=5): # equivalent to SPP(k=(5, 9, 13)) + super().__init__() + c_ = c1 // 2 # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_ * 4, c2, 1, 1) + self.m = nn.MaxPool2d(kernel_size=k, stride=1, padding=k // 2) + + def forward(self, x): + x = self.cv1(x) + y1 = self.m(x) + y2 = self.m(y1) + return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1)) + + +class Contract(nn.Module): + # Contract width-height into channels, i.e. x(1,64,80,80) to x(1,256,40,40) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert (H / s == 0) and (W / s == 0), 'Indivisible gain' + s = self.gain + x = x.view(N, C, H // s, s, W // s, s) # x(1,64,40,2,40,2) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # x(1,2,2,64,40,40) + return x.view(N, C * s * s, H // s, W // s) # x(1,256,40,40) + + +class Expand(nn.Module): + # Expand channels into width-height, i.e. x(1,64,80,80) to x(1,16,160,160) + def __init__(self, gain=2): + super().__init__() + self.gain = gain + + def forward(self, x): + N, C, H, W = x.size() # assert C / s ** 2 == 0, 'Indivisible gain' + s = self.gain + x = x.view(N, s, s, C // s ** 2, H, W) # x(1,2,2,16,80,80) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # x(1,16,80,2,80,2) + return x.view(N, C // s ** 2, H * s, W * s) # x(1,16,160,160) + + +class NMS(nn.Module): + # Non-Maximum Suppression (NMS) module + conf = 0.25 # confidence threshold + iou = 0.45 # IoU threshold + classes = None # (optional list) filter by class + + def __init__(self): + super(NMS, self).__init__() + + def forward(self, x): + return non_max_suppression(x[0], conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) + + +class autoShape(nn.Module): + # input-robust model wrapper for passing cv2/np/PIL/torch inputs. Includes preprocessing, inference and NMS + conf = 0.25 # NMS confidence threshold + iou = 0.45 # NMS IoU threshold + classes = None # (optional list) filter by class + + def __init__(self, model): + super(autoShape, self).__init__() + self.model = model.eval() + + def autoshape(self): + print('autoShape already enabled, skipping... ') # model already converted to model.autoshape() + return self + + @torch.no_grad() + def forward(self, imgs, size=640, augment=False, profile=False): + # Inference from various sources. For height=640, width=1280, RGB images example inputs are: + # filename: imgs = 'data/samples/zidane.jpg' + # URI: = 'https://github.com/ultralytics/yolov5/releases/download/v1.0/zidane.jpg' + # OpenCV: = cv2.imread('image.jpg')[:,:,::-1] # HWC BGR to RGB x(640,1280,3) + # PIL: = Image.open('image.jpg') # HWC x(640,1280,3) + # numpy: = np.zeros((640,1280,3)) # HWC + # torch: = torch.zeros(16,3,320,640) # BCHW (scaled to size=640, 0-1 values) + # multiple: = [Image.open('image1.jpg'), Image.open('image2.jpg'), ...] # list of images + + t = [time_synchronized()] + p = next(self.model.parameters()) # for device and type + if isinstance(imgs, torch.Tensor): # torch + with amp.autocast(enabled=p.device.type != 'cpu'): + return self.model(imgs.to(p.device).type_as(p), augment, profile) # inference + + # Pre-process + n, imgs = (len(imgs), imgs) if isinstance(imgs, list) else (1, [imgs]) # number of images, list of images + shape0, shape1, files = [], [], [] # image and inference shapes, filenames + for i, im in enumerate(imgs): + f = f'image{i}' # filename + if isinstance(im, str): # filename or uri + im, f = np.asarray(Image.open(requests.get(im, stream=True).raw if im.startswith('http') else im)), im + elif isinstance(im, Image.Image): # PIL Image + im, f = np.asarray(im), getattr(im, 'filename', f) or f + files.append(Path(f).with_suffix('.jpg').name) + if im.shape[0] < 5: # image in CHW + im = im.transpose((1, 2, 0)) # reverse dataloader .transpose(2, 0, 1) + im = im[:, :, :3] if im.ndim == 3 else np.tile(im[:, :, None], 3) # enforce 3ch input + s = im.shape[:2] # HWC + shape0.append(s) # image shape + g = (size / max(s)) # gain + shape1.append([y * g for y in s]) + imgs[i] = im # update + shape1 = [make_divisible(x, int(self.stride.max())) for x in np.stack(shape1, 0).max(0)] # inference shape + x = [letterbox(im, new_shape=shape1, auto=False)[0] for im in imgs] # pad + x = np.stack(x, 0) if n > 1 else x[0][None] # stack + x = np.ascontiguousarray(x.transpose((0, 3, 1, 2))) # BHWC to BCHW + x = torch.from_numpy(x).to(p.device).type_as(p) / 255. # uint8 to fp16/32 + t.append(time_synchronized()) + + with amp.autocast(enabled=p.device.type != 'cpu'): + # Inference + y = self.model(x, augment, profile)[0] # forward + t.append(time_synchronized()) + + # Post-process + y = non_max_suppression(y, conf_thres=self.conf, iou_thres=self.iou, classes=self.classes) # NMS + for i in range(n): + scale_coords(shape1, y[i][:, :4], shape0[i]) + + t.append(time_synchronized()) + return Detections(imgs, y, files, t, self.names, x.shape) + + +class Detections: + # detections class for YOLOv5 inference results + def __init__(self, imgs, pred, files, times=None, names=None, shape=None): + super(Detections, self).__init__() + d = pred[0].device # device + gn = [torch.tensor([*[im.shape[i] for i in [1, 0, 1, 0]], 1., 1.], device=d) for im in imgs] # normalizations + self.imgs = imgs # list of images as numpy arrays + self.pred = pred # list of tensors pred[0] = (xyxy, conf, cls) + self.names = names # class names + self.files = files # image filenames + self.xyxy = pred # xyxy pixels + self.xywh = [xyxy2xywh(x) for x in pred] # xywh pixels + self.xyxyn = [x / g for x, g in zip(self.xyxy, gn)] # xyxy normalized + self.xywhn = [x / g for x, g in zip(self.xywh, gn)] # xywh normalized + self.n = len(self.pred) # number of images (batch size) + self.t = tuple((times[i + 1] - times[i]) * 1000 / self.n for i in range(3)) # timestamps (ms) + self.s = shape # inference BCHW shape + + def display(self, pprint=False, show=False, save=False, render=False, save_dir=''): + colors = color_list() + for i, (img, pred) in enumerate(zip(self.imgs, self.pred)): + str = f'image {i + 1}/{len(self.pred)}: {img.shape[0]}x{img.shape[1]} ' + if pred is not None: + for c in pred[:, -1].unique(): + n = (pred[:, -1] == c).sum() # detections per class + str += f"{n} {self.names[int(c)]}{'s' * (n > 1)}, " # add to string + if show or save or render: + for *box, conf, cls in pred: # xyxy, confidence, class + label = f'{self.names[int(cls)]} {conf:.2f}' + plot_one_box(box, img, label=label, color=colors[int(cls) % 10]) + img = Image.fromarray(img.astype(np.uint8)) if isinstance(img, np.ndarray) else img # from np + if pprint: + print(str.rstrip(', ')) + if show: + img.show(self.files[i]) # show + if save: + f = self.files[i] + img.save(Path(save_dir) / f) # save + print(f"{'Saved' * (i == 0)} {f}", end=',' if i < self.n - 1 else f' to {save_dir}\n') + if render: + self.imgs[i] = np.asarray(img) + + def print(self): + self.display(pprint=True) # print results + print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {tuple(self.s)}' % self.t) + + def show(self): + self.display(show=True) # show results + + def save(self, save_dir='runs/hub/exp'): + save_dir = increment_path(save_dir, exist_ok=save_dir != 'runs/hub/exp') # increment save_dir + Path(save_dir).mkdir(parents=True, exist_ok=True) + self.display(save=True, save_dir=save_dir) # save results + + def render(self): + self.display(render=True) # render results + return self.imgs + + def pandas(self): + # return detections as pandas DataFrames, i.e. print(results.pandas().xyxy[0]) + new = copy(self) # return copy + ca = 'xmin', 'ymin', 'xmax', 'ymax', 'confidence', 'class', 'name' # xyxy columns + cb = 'xcenter', 'ycenter', 'width', 'height', 'confidence', 'class', 'name' # xywh columns + for k, c in zip(['xyxy', 'xyxyn', 'xywh', 'xywhn'], [ca, ca, cb, cb]): + a = [[x[:5] + [int(x[5]), self.names[int(x[5])]] for x in x.tolist()] for x in getattr(self, k)] # update + setattr(new, k, [pd.DataFrame(x, columns=c) for x in a]) + return new + + def tolist(self): + # return a list of Detections objects, i.e. 'for result in results.tolist():' + x = [Detections([self.imgs[i]], [self.pred[i]], self.names, self.s) for i in range(self.n)] + for d in x: + for k in ['imgs', 'pred', 'xyxy', 'xyxyn', 'xywh', 'xywhn']: + setattr(d, k, getattr(d, k)[0]) # pop out of list + return x + + def __len__(self): + return self.n + + +class Classify(nn.Module): + # Classification head, i.e. x(b,c1,20,20) to x(b,c2) + def __init__(self, c1, c2, k=1, s=1, p=None, g=1): # ch_in, ch_out, kernel, stride, padding, groups + super(Classify, self).__init__() + self.aap = nn.AdaptiveAvgPool2d(1) # to x(b,c1,1,1) + self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g) # to x(b,c2,1,1) + self.flat = nn.Flatten() + + def forward(self, x): + z = torch.cat([self.aap(y) for y in (x if isinstance(x, list) else [x])], 1) # cat if list + return self.flat(self.conv(z)) # flatten to x(b,c2) + +##### end of yolov5 ###### + + +##### orepa ##### + +def transI_fusebn(kernel, bn): + gamma = bn.weight + std = (bn.running_var + bn.eps).sqrt() + return kernel * ((gamma / std).reshape(-1, 1, 1, 1)), bn.bias - bn.running_mean * gamma / std + + +class ConvBN(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size, + stride=1, padding=0, dilation=1, groups=1, deploy=False, nonlinear=None): + super().__init__() + if nonlinear is None: + self.nonlinear = nn.Identity() + else: + self.nonlinear = nonlinear + if deploy: + self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, + stride=stride, padding=padding, dilation=dilation, groups=groups, bias=True) + else: + self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, + stride=stride, padding=padding, dilation=dilation, groups=groups, bias=False) + self.bn = nn.BatchNorm2d(num_features=out_channels) + + def forward(self, x): + if hasattr(self, 'bn'): + return self.nonlinear(self.bn(self.conv(x))) + else: + return self.nonlinear(self.conv(x)) + + def switch_to_deploy(self): + kernel, bias = transI_fusebn(self.conv.weight, self.bn) + conv = nn.Conv2d(in_channels=self.conv.in_channels, out_channels=self.conv.out_channels, kernel_size=self.conv.kernel_size, + stride=self.conv.stride, padding=self.conv.padding, dilation=self.conv.dilation, groups=self.conv.groups, bias=True) + conv.weight.data = kernel + conv.bias.data = bias + for para in self.parameters(): + para.detach_() + self.__delattr__('conv') + self.__delattr__('bn') + self.conv = conv + +class OREPA_3x3_RepConv(nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size, + stride=1, padding=0, dilation=1, groups=1, + internal_channels_1x1_3x3=None, + deploy=False, nonlinear=None, single_init=False): + super(OREPA_3x3_RepConv, self).__init__() + self.deploy = deploy + + if nonlinear is None: + self.nonlinear = nn.Identity() + else: + self.nonlinear = nonlinear + + self.kernel_size = kernel_size + self.in_channels = in_channels + self.out_channels = out_channels + self.groups = groups + assert padding == kernel_size // 2 + + self.stride = stride + self.padding = padding + self.dilation = dilation + + self.branch_counter = 0 + + self.weight_rbr_origin = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), kernel_size, kernel_size)) + nn.init.kaiming_uniform_(self.weight_rbr_origin, a=math.sqrt(1.0)) + self.branch_counter += 1 + + + if groups < out_channels: + self.weight_rbr_avg_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1)) + self.weight_rbr_pfir_conv = nn.Parameter(torch.Tensor(out_channels, int(in_channels/self.groups), 1, 1)) + nn.init.kaiming_uniform_(self.weight_rbr_avg_conv, a=1.0) + nn.init.kaiming_uniform_(self.weight_rbr_pfir_conv, a=1.0) + self.weight_rbr_avg_conv.data + self.weight_rbr_pfir_conv.data + self.register_buffer('weight_rbr_avg_avg', torch.ones(kernel_size, kernel_size).mul(1.0/kernel_size/kernel_size)) + self.branch_counter += 1 + + else: + raise NotImplementedError + self.branch_counter += 1 + + if internal_channels_1x1_3x3 is None: + internal_channels_1x1_3x3 = in_channels if groups < out_channels else 2 * in_channels # For mobilenet, it is better to have 2X internal channels + + if internal_channels_1x1_3x3 == in_channels: + self.weight_rbr_1x1_kxk_idconv1 = nn.Parameter(torch.zeros(in_channels, int(in_channels/self.groups), 1, 1)) + id_value = np.zeros((in_channels, int(in_channels/self.groups), 1, 1)) + for i in range(in_channels): + id_value[i, i % int(in_channels/self.groups), 0, 0] = 1 + id_tensor = torch.from_numpy(id_value).type_as(self.weight_rbr_1x1_kxk_idconv1) + self.register_buffer('id_tensor', id_tensor) + + else: + self.weight_rbr_1x1_kxk_conv1 = nn.Parameter(torch.Tensor(internal_channels_1x1_3x3, int(in_channels/self.groups), 1, 1)) + nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv1, a=math.sqrt(1.0)) + self.weight_rbr_1x1_kxk_conv2 = nn.Parameter(torch.Tensor(out_channels, int(internal_channels_1x1_3x3/self.groups), kernel_size, kernel_size)) + nn.init.kaiming_uniform_(self.weight_rbr_1x1_kxk_conv2, a=math.sqrt(1.0)) + self.branch_counter += 1 + + expand_ratio = 8 + self.weight_rbr_gconv_dw = nn.Parameter(torch.Tensor(in_channels*expand_ratio, 1, kernel_size, kernel_size)) + self.weight_rbr_gconv_pw = nn.Parameter(torch.Tensor(out_channels, in_channels*expand_ratio, 1, 1)) + nn.init.kaiming_uniform_(self.weight_rbr_gconv_dw, a=math.sqrt(1.0)) + nn.init.kaiming_uniform_(self.weight_rbr_gconv_pw, a=math.sqrt(1.0)) + self.branch_counter += 1 + + if out_channels == in_channels and stride == 1: + self.branch_counter += 1 + + self.vector = nn.Parameter(torch.Tensor(self.branch_counter, self.out_channels)) + self.bn = nn.BatchNorm2d(out_channels) + + self.fre_init() + + nn.init.constant_(self.vector[0, :], 0.25) #origin + nn.init.constant_(self.vector[1, :], 0.25) #avg + nn.init.constant_(self.vector[2, :], 0.0) #prior + nn.init.constant_(self.vector[3, :], 0.5) #1x1_kxk + nn.init.constant_(self.vector[4, :], 0.5) #dws_conv + + + def fre_init(self): + prior_tensor = torch.Tensor(self.out_channels, self.kernel_size, self.kernel_size) + half_fg = self.out_channels/2 + for i in range(self.out_channels): + for h in range(3): + for w in range(3): + if i < half_fg: + prior_tensor[i, h, w] = math.cos(math.pi*(h+0.5)*(i+1)/3) + else: + prior_tensor[i, h, w] = math.cos(math.pi*(w+0.5)*(i+1-half_fg)/3) + + self.register_buffer('weight_rbr_prior', prior_tensor) + + def weight_gen(self): + + weight_rbr_origin = torch.einsum('oihw,o->oihw', self.weight_rbr_origin, self.vector[0, :]) + + weight_rbr_avg = torch.einsum('oihw,o->oihw', torch.einsum('oihw,hw->oihw', self.weight_rbr_avg_conv, self.weight_rbr_avg_avg), self.vector[1, :]) + + weight_rbr_pfir = torch.einsum('oihw,o->oihw', torch.einsum('oihw,ohw->oihw', self.weight_rbr_pfir_conv, self.weight_rbr_prior), self.vector[2, :]) + + weight_rbr_1x1_kxk_conv1 = None + if hasattr(self, 'weight_rbr_1x1_kxk_idconv1'): + weight_rbr_1x1_kxk_conv1 = (self.weight_rbr_1x1_kxk_idconv1 + self.id_tensor).squeeze() + elif hasattr(self, 'weight_rbr_1x1_kxk_conv1'): + weight_rbr_1x1_kxk_conv1 = self.weight_rbr_1x1_kxk_conv1.squeeze() + else: + raise NotImplementedError + weight_rbr_1x1_kxk_conv2 = self.weight_rbr_1x1_kxk_conv2 + + if self.groups > 1: + g = self.groups + t, ig = weight_rbr_1x1_kxk_conv1.size() + o, tg, h, w = weight_rbr_1x1_kxk_conv2.size() + weight_rbr_1x1_kxk_conv1 = weight_rbr_1x1_kxk_conv1.view(g, int(t/g), ig) + weight_rbr_1x1_kxk_conv2 = weight_rbr_1x1_kxk_conv2.view(g, int(o/g), tg, h, w) + weight_rbr_1x1_kxk = torch.einsum('gti,gothw->goihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2).view(o, ig, h, w) + else: + weight_rbr_1x1_kxk = torch.einsum('ti,othw->oihw', weight_rbr_1x1_kxk_conv1, weight_rbr_1x1_kxk_conv2) + + weight_rbr_1x1_kxk = torch.einsum('oihw,o->oihw', weight_rbr_1x1_kxk, self.vector[3, :]) + + weight_rbr_gconv = self.dwsc2full(self.weight_rbr_gconv_dw, self.weight_rbr_gconv_pw, self.in_channels) + weight_rbr_gconv = torch.einsum('oihw,o->oihw', weight_rbr_gconv, self.vector[4, :]) + + weight = weight_rbr_origin + weight_rbr_avg + weight_rbr_1x1_kxk + weight_rbr_pfir + weight_rbr_gconv + + return weight + + def dwsc2full(self, weight_dw, weight_pw, groups): + + t, ig, h, w = weight_dw.size() + o, _, _, _ = weight_pw.size() + tg = int(t/groups) + i = int(ig*groups) + weight_dw = weight_dw.view(groups, tg, ig, h, w) + weight_pw = weight_pw.squeeze().view(o, groups, tg) + + weight_dsc = torch.einsum('gtihw,ogt->ogihw', weight_dw, weight_pw) + return weight_dsc.view(o, i, h, w) + + def forward(self, inputs): + weight = self.weight_gen() + out = F.conv2d(inputs, weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups) + + return self.nonlinear(self.bn(out)) + +class RepConv_OREPA(nn.Module): + + def __init__(self, c1, c2, k=3, s=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False, nonlinear=nn.SiLU()): + super(RepConv_OREPA, self).__init__() + self.deploy = deploy + self.groups = groups + self.in_channels = c1 + self.out_channels = c2 + + self.padding = padding + self.dilation = dilation + self.groups = groups + + assert k == 3 + assert padding == 1 + + padding_11 = padding - k // 2 + + if nonlinear is None: + self.nonlinearity = nn.Identity() + else: + self.nonlinearity = nonlinear + + if use_se: + self.se = SEBlock(self.out_channels, internal_neurons=self.out_channels // 16) + else: + self.se = nn.Identity() + + if deploy: + self.rbr_reparam = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s, + padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode) + + else: + self.rbr_identity = nn.BatchNorm2d(num_features=self.in_channels) if self.out_channels == self.in_channels and s == 1 else None + self.rbr_dense = OREPA_3x3_RepConv(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=k, stride=s, padding=padding, groups=groups, dilation=1) + self.rbr_1x1 = ConvBN(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=1, stride=s, padding=padding_11, groups=groups, dilation=1) + print('RepVGG Block, identity = ', self.rbr_identity) + + + def forward(self, inputs): + if hasattr(self, 'rbr_reparam'): + return self.nonlinearity(self.se(self.rbr_reparam(inputs))) + + if self.rbr_identity is None: + id_out = 0 + else: + id_out = self.rbr_identity(inputs) + + out1 = self.rbr_dense(inputs) + out2 = self.rbr_1x1(inputs) + out3 = id_out + out = out1 + out2 + out3 + + return self.nonlinearity(self.se(out)) + + + # Optional. This improves the accuracy and facilitates quantization. + # 1. Cancel the original weight decay on rbr_dense.conv.weight and rbr_1x1.conv.weight. + # 2. Use like this. + # loss = criterion(....) + # for every RepVGGBlock blk: + # loss += weight_decay_coefficient * 0.5 * blk.get_cust_L2() + # optimizer.zero_grad() + # loss.backward() + + # Not used for OREPA + def get_custom_L2(self): + K3 = self.rbr_dense.weight_gen() + K1 = self.rbr_1x1.conv.weight + t3 = (self.rbr_dense.bn.weight / ((self.rbr_dense.bn.running_var + self.rbr_dense.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach() + t1 = (self.rbr_1x1.bn.weight / ((self.rbr_1x1.bn.running_var + self.rbr_1x1.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach() + + l2_loss_circle = (K3 ** 2).sum() - (K3[:, :, 1:2, 1:2] ** 2).sum() # The L2 loss of the "circle" of weights in 3x3 kernel. Use regular L2 on them. + eq_kernel = K3[:, :, 1:2, 1:2] * t3 + K1 * t1 # The equivalent resultant central point of 3x3 kernel. + l2_loss_eq_kernel = (eq_kernel ** 2 / (t3 ** 2 + t1 ** 2)).sum() # Normalize for an L2 coefficient comparable to regular L2. + return l2_loss_eq_kernel + l2_loss_circle + + def get_equivalent_kernel_bias(self): + kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense) + kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1) + kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity) + return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid + + def _pad_1x1_to_3x3_tensor(self, kernel1x1): + if kernel1x1 is None: + return 0 + else: + return torch.nn.functional.pad(kernel1x1, [1,1,1,1]) + + def _fuse_bn_tensor(self, branch): + if branch is None: + return 0, 0 + if not isinstance(branch, nn.BatchNorm2d): + if isinstance(branch, OREPA_3x3_RepConv): + kernel = branch.weight_gen() + elif isinstance(branch, ConvBN): + kernel = branch.conv.weight + else: + raise NotImplementedError + running_mean = branch.bn.running_mean + running_var = branch.bn.running_var + gamma = branch.bn.weight + beta = branch.bn.bias + eps = branch.bn.eps + else: + if not hasattr(self, 'id_tensor'): + input_dim = self.in_channels // self.groups + kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32) + for i in range(self.in_channels): + kernel_value[i, i % input_dim, 1, 1] = 1 + self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device) + kernel = self.id_tensor + running_mean = branch.running_mean + running_var = branch.running_var + gamma = branch.weight + beta = branch.bias + eps = branch.eps + std = (running_var + eps).sqrt() + t = (gamma / std).reshape(-1, 1, 1, 1) + return kernel * t, beta - running_mean * gamma / std + + def switch_to_deploy(self): + if hasattr(self, 'rbr_reparam'): + return + print(f"RepConv_OREPA.switch_to_deploy") + kernel, bias = self.get_equivalent_kernel_bias() + self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.in_channels, out_channels=self.rbr_dense.out_channels, + kernel_size=self.rbr_dense.kernel_size, stride=self.rbr_dense.stride, + padding=self.rbr_dense.padding, dilation=self.rbr_dense.dilation, groups=self.rbr_dense.groups, bias=True) + self.rbr_reparam.weight.data = kernel + self.rbr_reparam.bias.data = bias + for para in self.parameters(): + para.detach_() + self.__delattr__('rbr_dense') + self.__delattr__('rbr_1x1') + if hasattr(self, 'rbr_identity'): + self.__delattr__('rbr_identity') + +##### end of orepa ##### + + +##### swin transformer ##### + +class WindowAttention(nn.Module): + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + nn.init.normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + # print(attn.dtype, v.dtype) + try: + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + except: + #print(attn.dtype, v.dtype) + x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + +class Mlp(nn.Module): + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + +def window_partition(x, window_size): + + B, H, W, C = x.shape + assert H % window_size == 0, 'feature map h and w can not divide by window size' + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + +def window_reverse(windows, window_size, H, W): + + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class SwinTransformerLayer(nn.Module): + + def __init__(self, dim, num_heads, window_size=8, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.SiLU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + # if min(self.input_resolution) <= self.window_size: + # # if window size is larger than input resolution, we don't partition windows + # self.shift_size = 0 + # self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def create_mask(self, H, W): + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + return attn_mask + + def forward(self, x): + # reshape x[b c h w] to x[b l c] + _, _, H_, W_ = x.shape + + Padding = False + if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0: + Padding = True + # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.') + pad_r = (self.window_size - W_ % self.window_size) % self.window_size + pad_b = (self.window_size - H_ % self.window_size) % self.window_size + x = F.pad(x, (0, pad_r, 0, pad_b)) + + # print('2', x.shape) + B, C, H, W = x.shape + L = H * W + x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c + + # create mask from init to forward + if self.shift_size > 0: + attn_mask = self.create_mask(H, W).to(x.device) + else: + attn_mask = None + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w + + if Padding: + x = x[:, :, :H_, :W_] # reverse padding + + return x + + +class SwinTransformerBlock(nn.Module): + def __init__(self, c1, c2, num_heads, num_layers, window_size=8): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + + # remove input_resolution + self.blocks = nn.Sequential(*[SwinTransformerLayer(dim=c2, num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)]) + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + x = self.blocks(x) + return x + + +class STCSPA(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(STCSPA, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformerBlock(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.m(self.cv1(x)) + y2 = self.cv2(x) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class STCSPB(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(STCSPB, self).__init__() + c_ = int(c2) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformerBlock(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + x1 = self.cv1(x) + y1 = self.m(x1) + y2 = self.cv2(x1) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class STCSPC(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(STCSPC, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(c_, c_, 1, 1) + self.cv4 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformerBlock(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(torch.cat((y1, y2), dim=1)) + +##### end of swin transformer ##### + + +##### swin transformer v2 ##### + +class WindowAttention_v2(nn.Module): + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., + pretrained_window_size=[0, 0]): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.pretrained_window_size = pretrained_window_size + self.num_heads = num_heads + + self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))), requires_grad=True) + + # mlp to generate continuous relative position bias + self.cpb_mlp = nn.Sequential(nn.Linear(2, 512, bias=True), + nn.ReLU(inplace=True), + nn.Linear(512, num_heads, bias=False)) + + # get relative_coords_table + relative_coords_h = torch.arange(-(self.window_size[0] - 1), self.window_size[0], dtype=torch.float32) + relative_coords_w = torch.arange(-(self.window_size[1] - 1), self.window_size[1], dtype=torch.float32) + relative_coords_table = torch.stack( + torch.meshgrid([relative_coords_h, + relative_coords_w])).permute(1, 2, 0).contiguous().unsqueeze(0) # 1, 2*Wh-1, 2*Ww-1, 2 + if pretrained_window_size[0] > 0: + relative_coords_table[:, :, :, 0] /= (pretrained_window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (pretrained_window_size[1] - 1) + else: + relative_coords_table[:, :, :, 0] /= (self.window_size[0] - 1) + relative_coords_table[:, :, :, 1] /= (self.window_size[1] - 1) + relative_coords_table *= 8 # normalize to -8, 8 + relative_coords_table = torch.sign(relative_coords_table) * torch.log2( + torch.abs(relative_coords_table) + 1.0) / np.log2(8) + + self.register_buffer("relative_coords_table", relative_coords_table) + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=False) + if qkv_bias: + self.q_bias = nn.Parameter(torch.zeros(dim)) + self.v_bias = nn.Parameter(torch.zeros(dim)) + else: + self.q_bias = None + self.v_bias = None + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + + B_, N, C = x.shape + qkv_bias = None + if self.q_bias is not None: + qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + qkv = qkv.reshape(B_, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + # cosine attention + attn = (F.normalize(q, dim=-1) @ F.normalize(k, dim=-1).transpose(-2, -1)) + logit_scale = torch.clamp(self.logit_scale, max=torch.log(torch.tensor(1. / 0.01))).exp() + attn = attn * logit_scale + + relative_position_bias_table = self.cpb_mlp(self.relative_coords_table).view(-1, self.num_heads) + relative_position_bias = relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + relative_position_bias = 16 * torch.sigmoid(relative_position_bias) + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + try: + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + except: + x = (attn.half() @ v).transpose(1, 2).reshape(B_, N, C) + + x = self.proj(x) + x = self.proj_drop(x) + return x + + def extra_repr(self) -> str: + return f'dim={self.dim}, window_size={self.window_size}, ' \ + f'pretrained_window_size={self.pretrained_window_size}, num_heads={self.num_heads}' + + def flops(self, N): + # calculate flops for 1 window with token length of N + flops = 0 + # qkv = self.qkv(x) + flops += N * self.dim * 3 * self.dim + # attn = (q @ k.transpose(-2, -1)) + flops += self.num_heads * N * (self.dim // self.num_heads) * N + # x = (attn @ v) + flops += self.num_heads * N * N * (self.dim // self.num_heads) + # x = self.proj(x) + flops += N * self.dim * self.dim + return flops + +class Mlp_v2(nn.Module): + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition_v2(x, window_size): + + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse_v2(windows, window_size, H, W): + + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class SwinTransformerLayer_v2(nn.Module): + + def __init__(self, dim, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.SiLU, norm_layer=nn.LayerNorm, pretrained_window_size=0): + super().__init__() + self.dim = dim + #self.input_resolution = input_resolution + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + #if min(self.input_resolution) <= self.window_size: + # # if window size is larger than input resolution, we don't partition windows + # self.shift_size = 0 + # self.window_size = min(self.input_resolution) + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention_v2( + dim, window_size=(self.window_size, self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop, + pretrained_window_size=(pretrained_window_size, pretrained_window_size)) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp_v2(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def create_mask(self, H, W): + # calculate attention mask for SW-MSA + img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + return attn_mask + + def forward(self, x): + # reshape x[b c h w] to x[b l c] + _, _, H_, W_ = x.shape + + Padding = False + if min(H_, W_) < self.window_size or H_ % self.window_size!=0 or W_ % self.window_size!=0: + Padding = True + # print(f'img_size {min(H_, W_)} is less than (or not divided by) window_size {self.window_size}, Padding.') + pad_r = (self.window_size - W_ % self.window_size) % self.window_size + pad_b = (self.window_size - H_ % self.window_size) % self.window_size + x = F.pad(x, (0, pad_r, 0, pad_b)) + + # print('2', x.shape) + B, C, H, W = x.shape + L = H * W + x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C) # b, L, c + + # create mask from init to forward + if self.shift_size > 0: + attn_mask = self.create_mask(H, W).to(x.device) + else: + attn_mask = None + + shortcut = x + x = x.view(B, H, W, C) + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + else: + shifted_x = x + + # partition windows + x_windows = window_partition_v2(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse_v2(attn_windows, self.window_size, H, W) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + x = x.view(B, H * W, C) + x = shortcut + self.drop_path(self.norm1(x)) + + # FFN + x = x + self.drop_path(self.norm2(self.mlp(x))) + x = x.permute(0, 2, 1).contiguous().view(-1, C, H, W) # b c h w + + if Padding: + x = x[:, :, :H_, :W_] # reverse padding + + return x + + def extra_repr(self) -> str: + return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \ + f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}" + + def flops(self): + flops = 0 + H, W = self.input_resolution + # norm1 + flops += self.dim * H * W + # W-MSA/SW-MSA + nW = H * W / self.window_size / self.window_size + flops += nW * self.attn.flops(self.window_size * self.window_size) + # mlp + flops += 2 * H * W * self.dim * self.dim * self.mlp_ratio + # norm2 + flops += self.dim * H * W + return flops + + +class SwinTransformer2Block(nn.Module): + def __init__(self, c1, c2, num_heads, num_layers, window_size=7): + super().__init__() + self.conv = None + if c1 != c2: + self.conv = Conv(c1, c2) + + # remove input_resolution + self.blocks = nn.Sequential(*[SwinTransformerLayer_v2(dim=c2, num_heads=num_heads, window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2) for i in range(num_layers)]) + + def forward(self, x): + if self.conv is not None: + x = self.conv(x) + x = self.blocks(x) + return x + + +class ST2CSPA(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(ST2CSPA, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformer2Block(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.m(self.cv1(x)) + y2 = self.cv2(x) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class ST2CSPB(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(ST2CSPB, self).__init__() + c_ = int(c2) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c_, c_, 1, 1) + self.cv3 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformer2Block(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + x1 = self.cv1(x) + y1 = self.m(x1) + y2 = self.cv2(x1) + return self.cv3(torch.cat((y1, y2), dim=1)) + + +class ST2CSPC(nn.Module): + # CSP Bottleneck https://github.com/WongKinYiu/CrossStagePartialNetworks + def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion + super(ST2CSPC, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, 1, 1) + self.cv2 = Conv(c1, c_, 1, 1) + self.cv3 = Conv(c_, c_, 1, 1) + self.cv4 = Conv(2 * c_, c2, 1, 1) + num_heads = c_ // 32 + self.m = SwinTransformer2Block(c_, c_, num_heads, n) + #self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)]) + + def forward(self, x): + y1 = self.cv3(self.m(self.cv1(x))) + y2 = self.cv2(x) + return self.cv4(torch.cat((y1, y2), dim=1)) + +##### end of swin transformer v2 ##### diff --git a/src/train_utils/train_models/models/yolov7/models/experimental.py b/src/train_utils/train_models/models/yolov7/models/experimental.py new file mode 100644 index 0000000..735d7aa --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/models/experimental.py @@ -0,0 +1,272 @@ +import numpy as np +import random +import torch +import torch.nn as nn + +from models.common import Conv, DWConv +from utils.google_utils import attempt_download + + +class CrossConv(nn.Module): + # Cross Convolution Downsample + def __init__(self, c1, c2, k=3, s=1, g=1, e=1.0, shortcut=False): + # ch_in, ch_out, kernel, stride, groups, expansion, shortcut + super(CrossConv, self).__init__() + c_ = int(c2 * e) # hidden channels + self.cv1 = Conv(c1, c_, (1, k), (1, s)) + self.cv2 = Conv(c_, c2, (k, 1), (s, 1), g=g) + self.add = shortcut and c1 == c2 + + def forward(self, x): + return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x)) + + +class Sum(nn.Module): + # Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070 + def __init__(self, n, weight=False): # n: number of inputs + super(Sum, self).__init__() + self.weight = weight # apply weights boolean + self.iter = range(n - 1) # iter object + if weight: + self.w = nn.Parameter(-torch.arange(1., n) / 2, requires_grad=True) # layer weights + + def forward(self, x): + y = x[0] # no weight + if self.weight: + w = torch.sigmoid(self.w) * 2 + for i in self.iter: + y = y + x[i + 1] * w[i] + else: + for i in self.iter: + y = y + x[i + 1] + return y + + +class MixConv2d(nn.Module): + # Mixed Depthwise Conv https://arxiv.org/abs/1907.09595 + def __init__(self, c1, c2, k=(1, 3), s=1, equal_ch=True): + super(MixConv2d, self).__init__() + groups = len(k) + if equal_ch: # equal c_ per group + i = torch.linspace(0, groups - 1E-6, c2).floor() # c2 indices + c_ = [(i == g).sum() for g in range(groups)] # intermediate channels + else: # equal weight.numel() per group + b = [c2] + [0] * groups + a = np.eye(groups + 1, groups, k=-1) + a -= np.roll(a, 1, axis=1) + a *= np.array(k) ** 2 + a[0] = 1 + c_ = np.linalg.lstsq(a, b, rcond=None)[0].round() # solve for equal weight indices, ax = b + + self.m = nn.ModuleList([nn.Conv2d(c1, int(c_[g]), k[g], s, k[g] // 2, bias=False) for g in range(groups)]) + self.bn = nn.BatchNorm2d(c2) + self.act = nn.LeakyReLU(0.1, inplace=True) + + def forward(self, x): + return x + self.act(self.bn(torch.cat([m(x) for m in self.m], 1))) + + +class Ensemble(nn.ModuleList): + # Ensemble of models + def __init__(self): + super(Ensemble, self).__init__() + + def forward(self, x, augment=False): + y = [] + for module in self: + y.append(module(x, augment)[0]) + # y = torch.stack(y).max(0)[0] # max ensemble + # y = torch.stack(y).mean(0) # mean ensemble + y = torch.cat(y, 1) # nms ensemble + return y, None # inference, train output + + + + + +class ORT_NMS(torch.autograd.Function): + '''ONNX-Runtime NMS operation''' + @staticmethod + def forward(ctx, + boxes, + scores, + max_output_boxes_per_class=torch.tensor([100]), + iou_threshold=torch.tensor([0.45]), + score_threshold=torch.tensor([0.25])): + device = boxes.device + batch = scores.shape[0] + num_det = random.randint(0, 100) + batches = torch.randint(0, batch, (num_det,)).sort()[0].to(device) + idxs = torch.arange(100, 100 + num_det).to(device) + zeros = torch.zeros((num_det,), dtype=torch.int64).to(device) + selected_indices = torch.cat([batches[None], zeros[None], idxs[None]], 0).T.contiguous() + selected_indices = selected_indices.to(torch.int64) + return selected_indices + + @staticmethod + def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): + return g.op("NonMaxSuppression", boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) + + +class TRT_NMS(torch.autograd.Function): + '''TensorRT NMS operation''' + @staticmethod + def forward( + ctx, + boxes, + scores, + background_class=-1, + box_coding=1, + iou_threshold=0.45, + max_output_boxes=100, + plugin_version="1", + score_activation=0, + score_threshold=0.25, + ): + batch_size, num_boxes, num_classes = scores.shape + num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32) + det_boxes = torch.randn(batch_size, max_output_boxes, 4) + det_scores = torch.randn(batch_size, max_output_boxes) + det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32) + return num_det, det_boxes, det_scores, det_classes + + @staticmethod + def symbolic(g, + boxes, + scores, + background_class=-1, + box_coding=1, + iou_threshold=0.45, + max_output_boxes=100, + plugin_version="1", + score_activation=0, + score_threshold=0.25): + out = g.op("TRT::EfficientNMS_TRT", + boxes, + scores, + background_class_i=background_class, + box_coding_i=box_coding, + iou_threshold_f=iou_threshold, + max_output_boxes_i=max_output_boxes, + plugin_version_s=plugin_version, + score_activation_i=score_activation, + score_threshold_f=score_threshold, + outputs=4) + nums, boxes, scores, classes = out + return nums, boxes, scores, classes + + +class ONNX_ORT(nn.Module): + '''onnx module with ONNX-Runtime NMS operation.''' + def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=640, device=None, n_classes=80): + super().__init__() + self.device = device if device else torch.device("cpu") + self.max_obj = torch.tensor([max_obj]).to(device) + self.iou_threshold = torch.tensor([iou_thres]).to(device) + self.score_threshold = torch.tensor([score_thres]).to(device) + self.max_wh = max_wh # if max_wh != 0 : non-agnostic else : agnostic + self.convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], + dtype=torch.float32, + device=self.device) + self.n_classes=n_classes + + def forward(self, x): + boxes = x[:, :, :4] + conf = x[:, :, 4:5] + scores = x[:, :, 5:] + if self.n_classes == 1: + scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5, + # so there is no need to multiplicate. + else: + scores *= conf # conf = obj_conf * cls_conf + boxes @= self.convert_matrix + max_score, category_id = scores.max(2, keepdim=True) + dis = category_id.float() * self.max_wh + nmsbox = boxes + dis + max_score_tp = max_score.transpose(1, 2).contiguous() + selected_indices = ORT_NMS.apply(nmsbox, max_score_tp, self.max_obj, self.iou_threshold, self.score_threshold) + X, Y = selected_indices[:, 0], selected_indices[:, 2] + selected_boxes = boxes[X, Y, :] + selected_categories = category_id[X, Y, :].float() + selected_scores = max_score[X, Y, :] + X = X.unsqueeze(1).float() + return torch.cat([X, selected_boxes, selected_categories, selected_scores], 1) + +class ONNX_TRT(nn.Module): + '''onnx module with TensorRT NMS operation.''' + def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None, n_classes=80): + super().__init__() + assert max_wh is None + self.device = device if device else torch.device('cpu') + self.background_class = -1, + self.box_coding = 1, + self.iou_threshold = iou_thres + self.max_obj = max_obj + self.plugin_version = '1' + self.score_activation = 0 + self.score_threshold = score_thres + self.n_classes=n_classes + + def forward(self, x): + boxes = x[:, :, :4] + conf = x[:, :, 4:5] + scores = x[:, :, 5:] + if self.n_classes == 1: + scores = conf # for models with one class, cls_loss is 0 and cls_conf is always 0.5, + # so there is no need to multiplicate. + else: + scores *= conf # conf = obj_conf * cls_conf + num_det, det_boxes, det_scores, det_classes = TRT_NMS.apply(boxes, scores, self.background_class, self.box_coding, + self.iou_threshold, self.max_obj, + self.plugin_version, self.score_activation, + self.score_threshold) + return num_det, det_boxes, det_scores, det_classes + + +class End2End(nn.Module): + '''export onnx or tensorrt model with NMS operation.''' + def __init__(self, model, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None, device=None, n_classes=80): + super().__init__() + device = device if device else torch.device('cpu') + assert isinstance(max_wh,(int)) or max_wh is None + self.model = model.to(device) + self.model.model[-1].end2end = True + self.patch_model = ONNX_TRT if max_wh is None else ONNX_ORT + self.end2end = self.patch_model(max_obj, iou_thres, score_thres, max_wh, device, n_classes) + self.end2end.eval() + + def forward(self, x): + x = self.model(x) + x = self.end2end(x) + return x + + + + + +def attempt_load(weights, map_location=None): + # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a + model = Ensemble() + for w in weights if isinstance(weights, list) else [weights]: + attempt_download(w) + ckpt = torch.load(w, map_location=map_location) # load + model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model + + # Compatibility updates + for m in model.modules(): + if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]: + m.inplace = True # pytorch 1.7.0 compatibility + elif type(m) is nn.Upsample: + m.recompute_scale_factor = None # torch 1.11.0 compatibility + elif type(m) is Conv: + m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility + + if len(model) == 1: + return model[-1] # return model + else: + print('Ensemble created with %s\n' % weights) + for k in ['names', 'stride']: + setattr(model, k, getattr(model[-1], k)) + return model # return ensemble + + diff --git a/src/train_utils/train_models/models/yolov7/models/yolo.py b/src/train_utils/train_models/models/yolov7/models/yolo.py new file mode 100644 index 0000000..95a019c --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/models/yolo.py @@ -0,0 +1,843 @@ +import argparse +import logging +import sys +from copy import deepcopy + +sys.path.append('./') # to run '$ python *.py' files in subdirectories +logger = logging.getLogger(__name__) +import torch +from models.common import * +from models.experimental import * +from utils.autoanchor import check_anchor_order +from utils.general import make_divisible, check_file, set_logging +from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \ + select_device, copy_attr +from utils.loss import SigmoidBin + +try: + import thop # for FLOPS computation +except ImportError: + thop = None + + +class Detect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + end2end = False + include_nms = False + concat = False + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(Detect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + y = x[i].sigmoid() + if not torch.onnx.is_in_onnx_export(): + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: + xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy + wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, -1, self.no)) + + if self.training: + out = x + elif self.end2end: + out = torch.cat(z, 1) + elif self.include_nms: + z = self.convert(z) + out = (z, ) + elif self.concat: + out = torch.cat(z, 1) + else: + out = (torch.cat(z, 1), x) + + return out + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + def convert(self, z): + z = torch.cat(z, 1) + box = z[:, :, :4] + conf = z[:, :, 4:5] + score = z[:, :, 5:] + score *= conf + convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], + dtype=torch.float32, + device=z.device) + box @= convert_matrix + return (box, score) + + +class IDetect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + end2end = False + include_nms = False + concat = False + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(IDetect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + self.ia = nn.ModuleList(ImplicitA(x) for x in ch) + self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](self.ia[i](x[i])) # conv + x[i] = self.im[i](x[i]) + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + def fuseforward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + if not torch.onnx.is_in_onnx_export(): + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: + xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy + wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, -1, self.no)) + + if self.training: + out = x + elif self.end2end: + out = torch.cat(z, 1) + elif self.include_nms: + z = self.convert(z) + out = (z, ) + elif self.concat: + out = torch.cat(z, 1) + else: + out = (torch.cat(z, 1), x) + + return out + + def fuse(self): + print("IDetect.fuse") + # fuse ImplicitA and Convolution + for i in range(len(self.m)): + c1,c2,_,_ = self.m[i].weight.shape + c1_,c2_, _,_ = self.ia[i].implicit.shape + self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) + + # fuse ImplicitM and Convolution + for i in range(len(self.m)): + c1,c2, _,_ = self.im[i].implicit.shape + self.m[i].bias *= self.im[i].implicit.reshape(c2) + self.m[i].weight *= self.im[i].implicit.transpose(0,1) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + def convert(self, z): + z = torch.cat(z, 1) + box = z[:, :, :4] + conf = z[:, :, 4:5] + score = z[:, :, 5:] + score *= conf + convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], + dtype=torch.float32, + device=z.device) + box @= convert_matrix + return (box, score) + + +class IKeypoint(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, nc=80, anchors=(), nkpt=17, ch=(), inplace=True, dw_conv_kpt=False): # detection layer + super(IKeypoint, self).__init__() + self.nc = nc # number of classes + self.nkpt = nkpt + self.dw_conv_kpt = dw_conv_kpt + self.no_det=(nc + 5) # number of outputs per anchor for box and class + self.no_kpt = 3*self.nkpt ## number of outputs per anchor for keypoints + self.no = self.no_det+self.no_kpt + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + self.flip_test = False + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no_det * self.na, 1) for x in ch) # output conv + + self.ia = nn.ModuleList(ImplicitA(x) for x in ch) + self.im = nn.ModuleList(ImplicitM(self.no_det * self.na) for _ in ch) + + if self.nkpt is not None: + if self.dw_conv_kpt: #keypoint head is slightly more complex + self.m_kpt = nn.ModuleList( + nn.Sequential(DWConv(x, x, k=3), Conv(x,x), + DWConv(x, x, k=3), Conv(x, x), + DWConv(x, x, k=3), Conv(x,x), + DWConv(x, x, k=3), Conv(x, x), + DWConv(x, x, k=3), Conv(x, x), + DWConv(x, x, k=3), nn.Conv2d(x, self.no_kpt * self.na, 1)) for x in ch) + else: #keypoint head is a single convolution + self.m_kpt = nn.ModuleList(nn.Conv2d(x, self.no_kpt * self.na, 1) for x in ch) + + self.inplace = inplace # use in-place ops (e.g. slice assignment) + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + if self.nkpt is None or self.nkpt==0: + x[i] = self.im[i](self.m[i](self.ia[i](x[i]))) # conv + else : + x[i] = torch.cat((self.im[i](self.m[i](self.ia[i](x[i]))), self.m_kpt[i](x[i])), axis=1) + + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + x_det = x[i][..., :6] + x_kpt = x[i][..., 6:] + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + kpt_grid_x = self.grid[i][..., 0:1] + kpt_grid_y = self.grid[i][..., 1:2] + + if self.nkpt == 0: + y = x[i].sigmoid() + else: + y = x_det.sigmoid() + + if self.inplace: + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].view(1, self.na, 1, 1, 2) # wh + if self.nkpt != 0: + x_kpt[..., 0::3] = (x_kpt[..., ::3] * 2. - 0.5 + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy + x_kpt[..., 1::3] = (x_kpt[..., 1::3] * 2. - 0.5 + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy + #x_kpt[..., 0::3] = (x_kpt[..., ::3] + kpt_grid_x.repeat(1,1,1,1,17)) * self.stride[i] # xy + #x_kpt[..., 1::3] = (x_kpt[..., 1::3] + kpt_grid_y.repeat(1,1,1,1,17)) * self.stride[i] # xy + #print('=============') + #print(self.anchor_grid[i].shape) + #print(self.anchor_grid[i][...,0].unsqueeze(4).shape) + #print(x_kpt[..., 0::3].shape) + #x_kpt[..., 0::3] = ((x_kpt[..., 0::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy + #x_kpt[..., 1::3] = ((x_kpt[..., 1::3].tanh() * 2.) ** 3 * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy + #x_kpt[..., 0::3] = (((x_kpt[..., 0::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,0].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_x.repeat(1,1,1,1,17) * self.stride[i] # xy + #x_kpt[..., 1::3] = (((x_kpt[..., 1::3].sigmoid() * 4.) ** 2 - 8.) * self.anchor_grid[i][...,1].unsqueeze(4).repeat(1,1,1,1,self.nkpt)) + kpt_grid_y.repeat(1,1,1,1,17) * self.stride[i] # xy + x_kpt[..., 2::3] = x_kpt[..., 2::3].sigmoid() + + y = torch.cat((xy, wh, y[..., 4:], x_kpt), dim = -1) + + else: # for YOLOv5 on AWS Inferentia https://github.com/ultralytics/yolov5/pull/2953 + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + if self.nkpt != 0: + y[..., 6:] = (y[..., 6:] * 2. - 0.5 + self.grid[i].repeat((1,1,1,1,self.nkpt))) * self.stride[i] # xy + y = torch.cat((xy, wh, y[..., 4:]), -1) + + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +class IAuxDetect(nn.Module): + stride = None # strides computed during build + export = False # onnx export + end2end = False + include_nms = False + concat = False + + def __init__(self, nc=80, anchors=(), ch=()): # detection layer + super(IAuxDetect, self).__init__() + self.nc = nc # number of classes + self.no = nc + 5 # number of outputs per anchor + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[:self.nl]) # output conv + self.m2 = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch[self.nl:]) # output conv + + self.ia = nn.ModuleList(ImplicitA(x) for x in ch[:self.nl]) + self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch[:self.nl]) + + def forward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](self.ia[i](x[i])) # conv + x[i] = self.im[i](x[i]) + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + x[i+self.nl] = self.m2[i](x[i+self.nl]) + x[i+self.nl] = x[i+self.nl].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + if not torch.onnx.is_in_onnx_export(): + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: + xy, wh, conf = y.split((2, 2, self.nc + 1), 4) # y.tensor_split((2, 4, 5), 4) # torch 1.8.0 + xy = xy * (2. * self.stride[i]) + (self.stride[i] * (self.grid[i] - 0.5)) # new xy + wh = wh ** 2 * (4 * self.anchor_grid[i].data) # new wh + y = torch.cat((xy, wh, conf), 4) + z.append(y.view(bs, -1, self.no)) + + return x if self.training else (torch.cat(z, 1), x[:self.nl]) + + def fuseforward(self, x): + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](x[i]) # conv + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + if not torch.onnx.is_in_onnx_export(): + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + else: + xy = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + wh = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i].data # wh + y = torch.cat((xy, wh, y[..., 4:]), -1) + z.append(y.view(bs, -1, self.no)) + + if self.training: + out = x + elif self.end2end: + out = torch.cat(z, 1) + elif self.include_nms: + z = self.convert(z) + out = (z, ) + elif self.concat: + out = torch.cat(z, 1) + else: + out = (torch.cat(z, 1), x) + + return out + + def fuse(self): + print("IAuxDetect.fuse") + # fuse ImplicitA and Convolution + for i in range(len(self.m)): + c1,c2,_,_ = self.m[i].weight.shape + c1_,c2_, _,_ = self.ia[i].implicit.shape + self.m[i].bias += torch.matmul(self.m[i].weight.reshape(c1,c2),self.ia[i].implicit.reshape(c2_,c1_)).squeeze(1) + + # fuse ImplicitM and Convolution + for i in range(len(self.m)): + c1,c2, _,_ = self.im[i].implicit.shape + self.m[i].bias *= self.im[i].implicit.reshape(c2) + self.m[i].weight *= self.im[i].implicit.transpose(0,1) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + def convert(self, z): + z = torch.cat(z, 1) + box = z[:, :, :4] + conf = z[:, :, 4:5] + score = z[:, :, 5:] + score *= conf + convert_matrix = torch.tensor([[1, 0, 1, 0], [0, 1, 0, 1], [-0.5, 0, 0.5, 0], [0, -0.5, 0, 0.5]], + dtype=torch.float32, + device=z.device) + box @= convert_matrix + return (box, score) + + +class IBin(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, nc=80, anchors=(), ch=(), bin_count=21): # detection layer + super(IBin, self).__init__() + self.nc = nc # number of classes + self.bin_count = bin_count + + self.w_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) + self.h_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0) + # classes, x,y,obj + self.no = nc + 3 + \ + self.w_bin_sigmoid.get_length() + self.h_bin_sigmoid.get_length() # w-bce, h-bce + # + self.x_bin_sigmoid.get_length() + self.y_bin_sigmoid.get_length() + + self.nl = len(anchors) # number of detection layers + self.na = len(anchors[0]) // 2 # number of anchors + self.grid = [torch.zeros(1)] * self.nl # init grid + a = torch.tensor(anchors).float().view(self.nl, -1, 2) + self.register_buffer('anchors', a) # shape(nl,na,2) + self.register_buffer('anchor_grid', a.clone().view(self.nl, 1, -1, 1, 1, 2)) # shape(nl,1,na,1,1,2) + self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv + + self.ia = nn.ModuleList(ImplicitA(x) for x in ch) + self.im = nn.ModuleList(ImplicitM(self.no * self.na) for _ in ch) + + def forward(self, x): + + #self.x_bin_sigmoid.use_fw_regression = True + #self.y_bin_sigmoid.use_fw_regression = True + self.w_bin_sigmoid.use_fw_regression = True + self.h_bin_sigmoid.use_fw_regression = True + + # x = x.copy() # for profiling + z = [] # inference output + self.training |= self.export + for i in range(self.nl): + x[i] = self.m[i](self.ia[i](x[i])) # conv + x[i] = self.im[i](x[i]) + bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) + x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() + + if not self.training: # inference + if self.grid[i].shape[2:4] != x[i].shape[2:4]: + self.grid[i] = self._make_grid(nx, ny).to(x[i].device) + + y = x[i].sigmoid() + y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy + #y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh + + + #px = (self.x_bin_sigmoid.forward(y[..., 0:12]) + self.grid[i][..., 0]) * self.stride[i] + #py = (self.y_bin_sigmoid.forward(y[..., 12:24]) + self.grid[i][..., 1]) * self.stride[i] + + pw = self.w_bin_sigmoid.forward(y[..., 2:24]) * self.anchor_grid[i][..., 0] + ph = self.h_bin_sigmoid.forward(y[..., 24:46]) * self.anchor_grid[i][..., 1] + + #y[..., 0] = px + #y[..., 1] = py + y[..., 2] = pw + y[..., 3] = ph + + y = torch.cat((y[..., 0:4], y[..., 46:]), dim=-1) + + z.append(y.view(bs, -1, y.shape[-1])) + + return x if self.training else (torch.cat(z, 1), x) + + @staticmethod + def _make_grid(nx=20, ny=20): + yv, xv = torch.meshgrid([torch.arange(ny), torch.arange(nx)]) + return torch.stack((xv, yv), 2).view((1, 1, ny, nx, 2)).float() + + +class Model(nn.Module): + def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes + super(Model, self).__init__() + self.traced = False + if isinstance(cfg, dict): + self.yaml = cfg # model dict + else: # is *.yaml + import yaml # for torch hub + self.yaml_file = Path(cfg).name + with open(cfg) as f: + self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict + + # Define model + ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels + if nc and nc != self.yaml['nc']: + logger.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}") + self.yaml['nc'] = nc # override yaml value + if anchors: + logger.info(f'Overriding model.yaml anchors with anchors={anchors}') + self.yaml['anchors'] = round(anchors) # override yaml value + self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist + self.names = [str(i) for i in range(self.yaml['nc'])] # default names + # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))]) + + # Build strides, anchors + m = self.model[-1] # Detect() + if isinstance(m, Detect): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + if isinstance(m, IDetect): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + if isinstance(m, IAuxDetect): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward + #print(m.stride) + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_aux_biases() # only run once + # print('Strides: %s' % m.stride.tolist()) + if isinstance(m, IBin): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases_bin() # only run once + # print('Strides: %s' % m.stride.tolist()) + if isinstance(m, IKeypoint): + s = 256 # 2x min stride + m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward + check_anchor_order(m) + m.anchors /= m.stride.view(-1, 1, 1) + self.stride = m.stride + self._initialize_biases_kpt() # only run once + # print('Strides: %s' % m.stride.tolist()) + + # Init weights, biases + initialize_weights(self) + self.info() + logger.info('') + + def forward(self, x, augment=False, profile=False): + if augment: + img_size = x.shape[-2:] # height, width + s = [1, 0.83, 0.67] # scales + f = [None, 3, None] # flips (2-ud, 3-lr) + y = [] # outputs + for si, fi in zip(s, f): + xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max())) + yi = self.forward_once(xi)[0] # forward + # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save + yi[..., :4] /= si # de-scale + if fi == 2: + yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud + elif fi == 3: + yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr + y.append(yi) + return torch.cat(y, 1), None # augmented inference, train + else: + return self.forward_once(x, profile) # single-scale inference, train + + def forward_once(self, x, profile=False): + y, dt = [], [] # outputs + for m in self.model: + if m.f != -1: # if not from previous layer + x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers + + if not hasattr(self, 'traced'): + self.traced=False + + if self.traced: + if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect) or isinstance(m, IKeypoint): + break + + if profile: + c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin)) + o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS + for _ in range(10): + m(x.copy() if c else x) + t = time_synchronized() + for _ in range(10): + m(x.copy() if c else x) + dt.append((time_synchronized() - t) * 100) + print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) + + x = m(x) # run + + y.append(x if m.i in self.save else None) # save output + + if profile: + print('%.1fms total' % sum(dt)) + return x + + def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, mi2, s in zip(m.m, m.m2, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True) + + def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Bin() module + bc = m.bin_count + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + old = b[:, (0,1,2,bc+3)].data + obj_idx = 2*bc+4 + b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99)) + b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + b[:, (0,1,2,bc+3)].data = old + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _initialize_biases_kpt(self, cf=None): # initialize biases into Detect(), cf is class frequency + # https://arxiv.org/abs/1708.02002 section 3.3 + # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1. + m = self.model[-1] # Detect() module + for mi, s in zip(m.m, m.stride): # from + b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85) + b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image) + b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls + mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True) + + def _print_biases(self): + m = self.model[-1] # Detect() module + for mi in m.m: # from + b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85) + print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean())) + + # def _print_weights(self): + # for m in self.model.modules(): + # if type(m) is Bottleneck: + # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights + + def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers + print('Fusing layers... ') + for m in self.model.modules(): + if isinstance(m, RepConv): + #print(f" fuse_repvgg_block") + m.fuse_repvgg_block() + elif isinstance(m, RepConv_OREPA): + #print(f" switch_to_deploy") + m.switch_to_deploy() + elif type(m) is Conv and hasattr(m, 'bn'): + m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv + delattr(m, 'bn') # remove batchnorm + m.forward = m.fuseforward # update forward + elif isinstance(m, (IDetect, IAuxDetect)): + m.fuse() + m.forward = m.fuseforward + self.info() + return self + + def nms(self, mode=True): # add or remove NMS module + present = type(self.model[-1]) is NMS # last layer is NMS + if mode and not present: + print('Adding NMS... ') + m = NMS() # module + m.f = -1 # from + m.i = self.model[-1].i + 1 # index + self.model.add_module(name='%s' % m.i, module=m) # add + self.eval() + elif not mode and present: + print('Removing NMS... ') + self.model = self.model[:-1] # remove + return self + + def autoshape(self): # add autoShape module + print('Adding autoShape... ') + m = autoShape(self) # wrap model + copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes + return m + + def info(self, verbose=False, img_size=640): # print model information + model_info(self, verbose, img_size) + + +def parse_model(d, ch): # model_dict, input_channels(3) + logger.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments')) + anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple'] + na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors + no = na * (nc + 5) # number of outputs = anchors * (classes + 5) + + layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out + for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args + m = eval(m) if isinstance(m, str) else m # eval strings + for j, a in enumerate(args): + try: + args[j] = eval(a) if isinstance(a, str) else a # eval strings + except: + pass + + n = max(round(n * gd), 1) if n > 1 else n # depth gain + if m in [nn.Conv2d, Conv, RobustConv, RobustConv2, DWConv, GhostConv, RepConv, RepConv_OREPA, DownC, + SPP, SPPF, SPPCSPC, GhostSPPCSPC, MixConv2d, Focus, Stem, GhostStem, CrossConv, + Bottleneck, BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, + RepBottleneck, RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, + Res, ResCSPA, ResCSPB, ResCSPC, + RepRes, RepResCSPA, RepResCSPB, RepResCSPC, + ResX, ResXCSPA, ResXCSPB, ResXCSPC, + RepResX, RepResXCSPA, RepResXCSPB, RepResXCSPC, + Ghost, GhostCSPA, GhostCSPB, GhostCSPC, + SwinTransformerBlock, STCSPA, STCSPB, STCSPC, + SwinTransformer2Block, ST2CSPA, ST2CSPB, ST2CSPC]: + c1, c2 = ch[f], args[0] + if c2 != no: # if not output + c2 = make_divisible(c2 * gw, 8) + + args = [c1, c2, *args[1:]] + if m in [DownC, SPPCSPC, GhostSPPCSPC, + BottleneckCSPA, BottleneckCSPB, BottleneckCSPC, + RepBottleneckCSPA, RepBottleneckCSPB, RepBottleneckCSPC, + ResCSPA, ResCSPB, ResCSPC, + RepResCSPA, RepResCSPB, RepResCSPC, + ResXCSPA, ResXCSPB, ResXCSPC, + RepResXCSPA, RepResXCSPB, RepResXCSPC, + GhostCSPA, GhostCSPB, GhostCSPC, + STCSPA, STCSPB, STCSPC, + ST2CSPA, ST2CSPB, ST2CSPC]: + args.insert(2, n) # number of repeats + n = 1 + elif m is nn.BatchNorm2d: + args = [ch[f]] + elif m is Concat: + c2 = sum([ch[x] for x in f]) + elif m is Chuncat: + c2 = sum([ch[x] for x in f]) + elif m is Shortcut: + c2 = ch[f[0]] + elif m is Foldcut: + c2 = ch[f] // 2 + elif m in [Detect, IDetect, IAuxDetect, IBin, IKeypoint]: + args.append([ch[x] for x in f]) + if isinstance(args[1], int): # number of anchors + args[1] = [list(range(args[1] * 2))] * len(f) + elif m is ReOrg: + c2 = ch[f] * 4 + elif m is Contract: + c2 = ch[f] * args[0] ** 2 + elif m is Expand: + c2 = ch[f] // args[0] ** 2 + else: + c2 = ch[f] + + m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module + t = str(m)[8:-2].replace('__main__.', '') # module type + np = sum([x.numel() for x in m_.parameters()]) # number params + m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params + logger.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n, np, t, args)) # print + save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist + layers.append(m_) + if i == 0: + ch = [] + ch.append(c2) + return nn.Sequential(*layers), sorted(save) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--cfg', type=str, default='yolor-csp-c.yaml', help='model.yaml') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--profile', action='store_true', help='profile model speed') + opt = parser.parse_args() + opt.cfg = check_file(opt.cfg) # check file + set_logging() + device = select_device(opt.device) + + # Create model + model = Model(opt.cfg).to(device) + model.train() + + if opt.profile: + img = torch.rand(1, 3, 640, 640).to(device) + y = model(img, profile=True) + + # Profile + # img = torch.rand(8 if torch.cuda.is_available() else 1, 3, 640, 640).to(device) + # y = model(img, profile=True) + + # Tensorboard + # from torch.utils.tensorboard import SummaryWriter + # tb_writer = SummaryWriter() + # print("Run 'tensorboard --logdir=models/runs' to view tensorboard at http://localhost:6006/") + # tb_writer.add_graph(model.model, img) # add model to tensorboard + # tb_writer.add_image('test', img[0], dataformats='CWH') # add model to tensorboard diff --git a/src/train_utils/train_models/models/yolov7/paper/yolov7.pdf b/src/train_utils/train_models/models/yolov7/paper/yolov7.pdf new file mode 100644 index 0000000..e60c30b Binary files /dev/null and b/src/train_utils/train_models/models/yolov7/paper/yolov7.pdf differ diff --git a/src/train_utils/train_models/models/yolov7/requirements.txt b/src/train_utils/train_models/models/yolov7/requirements.txt new file mode 100644 index 0000000..f4d2182 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/requirements.txt @@ -0,0 +1,39 @@ +# Usage: pip install -r requirements.txt + +# Base ---------------------------------------- +matplotlib>=3.2.2 +numpy>=1.18.5,<1.24.0 +opencv-python>=4.1.1 +Pillow>=7.1.2 +PyYAML>=5.3.1 +requests>=2.23.0 +scipy>=1.4.1 +torch>=1.7.0,!=1.12.0 +torchvision>=0.8.1,!=0.13.0 +tqdm>=4.41.0 +protobuf<4.21.3 + +# Logging ------------------------------------- +tensorboard>=2.4.1 +# wandb + +# Plotting ------------------------------------ +pandas>=1.1.4 +seaborn>=0.11.0 + +# Export -------------------------------------- +# coremltools>=4.1 # CoreML export +# onnx>=1.9.0 # ONNX export +# onnx-simplifier>=0.3.6 # ONNX simplifier +# scikit-learn==0.19.2 # CoreML quantization +# tensorflow>=2.4.1 # TFLite export +# tensorflowjs>=3.9.0 # TF.js export +# openvino-dev # OpenVINO export + +# Extras -------------------------------------- +ipython # interactive notebook +psutil # system utilization +thop # FLOPs computation +# albumentations>=1.0.3 +# pycocotools>=2.0 # COCO mAP +# roboflow diff --git a/src/train_utils/train_models/models/yolov7/scripts/get_coco.sh b/src/train_utils/train_models/models/yolov7/scripts/get_coco.sh new file mode 100644 index 0000000..524f8dd --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/scripts/get_coco.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# COCO 2017 dataset http://cocodataset.org +# Download command: bash ./scripts/get_coco.sh + +# Download/unzip labels +d='./' # unzip directory +url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ +f='coco2017labels-segments.zip' # or 'coco2017labels.zip', 68 MB +echo 'Downloading' $url$f ' ...' +curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background + +# Download/unzip images +d='./coco/images' # unzip directory +url=http://images.cocodataset.org/zips/ +f1='train2017.zip' # 19G, 118k images +f2='val2017.zip' # 1G, 5k images +f3='test2017.zip' # 7G, 41k images (optional) +for f in $f1 $f2 $f3; do + echo 'Downloading' $url$f '...' + curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background +done +wait # finish background tasks diff --git a/src/train_utils/train_models/models/yolov7/test.py b/src/train_utils/train_models/models/yolov7/test.py new file mode 100644 index 0000000..17b4806 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/test.py @@ -0,0 +1,353 @@ +import argparse +import json +import os +from pathlib import Path +from threading import Thread + +import numpy as np +import torch +import yaml +from tqdm import tqdm + +from models.experimental import attempt_load +from utils.datasets import create_dataloader +from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ + box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr +from utils.metrics import ap_per_class, ConfusionMatrix +from utils.plots import plot_images, output_to_target, plot_study_txt +from utils.torch_utils import select_device, time_synchronized, TracedModel + + +def test(data, + weights=None, + batch_size=32, + imgsz=640, + conf_thres=0.001, + iou_thres=0.6, # for NMS + save_json=False, + single_cls=False, + augment=False, + verbose=False, + model=None, + dataloader=None, + save_dir=Path(''), # for saving images + save_txt=False, # for auto-labelling + save_hybrid=False, # for hybrid auto-labelling + save_conf=False, # save auto-label confidences + plots=True, + wandb_logger=None, + compute_loss=None, + half_precision=True, + trace=False, + is_coco=False, + v5_metric=False): + # Initialize/load model and set device + training = model is not None + if training: # called by train.py + device = next(model.parameters()).device # get model device + + else: # called directly + set_logging() + device = select_device(opt.device, batch_size=batch_size) + + # Directories + save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run + (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + + # Load model + model = attempt_load(weights, map_location=device) # load FP32 model + gs = max(int(model.stride.max()), 32) # grid size (max stride) + imgsz = check_img_size(imgsz, s=gs) # check img_size + + if trace: + model = TracedModel(model, device, imgsz) + + # Half + half = device.type != 'cpu' and half_precision # half precision only supported on CUDA + if half: + model.half() + + # Configure + model.eval() + if isinstance(data, str): + is_coco = data.endswith('coco.yaml') + with open(data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) + check_dataset(data) # check + nc = 1 if single_cls else int(data['nc']) # number of classes + iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 + niou = iouv.numel() + + # Logging + log_imgs = 0 + if wandb_logger and wandb_logger.wandb: + log_imgs = min(wandb_logger.log_imgs, 100) + # Dataloader + if not training: + if device.type != 'cpu': + model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once + task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images + dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, + prefix=colorstr(f'{task}: '))[0] + + if v5_metric: + print("Testing with YOLOv5 AP metric...") + + seen = 0 + confusion_matrix = ConfusionMatrix(nc=nc) + names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} + coco91class = coco80_to_coco91_class() + s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. + loss = torch.zeros(3, device=device) + jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] + for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): + img = img.to(device, non_blocking=True) + img = img.half() if half else img.float() # uint8 to fp16/32 + img /= 255.0 # 0 - 255 to 0.0 - 1.0 + targets = targets.to(device) + nb, _, height, width = img.shape # batch size, channels, height, width + + with torch.no_grad(): + # Run model + t = time_synchronized() + out, train_out = model(img, augment=augment) # inference and training outputs + t0 += time_synchronized() - t + + # Compute loss + if compute_loss: + loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls + + # Run NMS + targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels + lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling + t = time_synchronized() + out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) + t1 += time_synchronized() - t + + # Statistics per image + for si, pred in enumerate(out): + labels = targets[targets[:, 0] == si, 1:] + nl = len(labels) + tcls = labels[:, 0].tolist() if nl else [] # target class + path = Path(paths[si]) + seen += 1 + + if len(pred) == 0: + if nl: + stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) + continue + + # Predictions + predn = pred.clone() + scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred + + # Append to text file + if save_txt: + gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh + for *xyxy, conf, cls in predn.tolist(): + xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh + line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format + with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: + f.write(('%g ' * len(line)).rstrip() % line + '\n') + + # W&B logging - Media Panel Plots + if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation + if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: + box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) + wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None + + # Append to pycocotools JSON dictionary + if save_json: + # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... + image_id = int(path.stem) if path.stem.isnumeric() else path.stem + box = xyxy2xywh(predn[:, :4]) # xywh + box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner + for p, b in zip(pred.tolist(), box.tolist()): + jdict.append({'image_id': image_id, + 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5)}) + + # Assign all predictions as incorrect + correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) + if nl: + detected = [] # target indices + tcls_tensor = labels[:, 0] + + # target boxes + tbox = xywh2xyxy(labels[:, 1:5]) + scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels + if plots: + confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) + + # Per target class + for cls in torch.unique(tcls_tensor): + ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices + pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices + + # Search for detections + if pi.shape[0]: + # Prediction to target ious + ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices + + # Append detections + detected_set = set() + for j in (ious > iouv[0]).nonzero(as_tuple=False): + d = ti[i[j]] # detected target + if d.item() not in detected_set: + detected_set.add(d.item()) + detected.append(d) + correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn + if len(detected) == nl: # all targets already located in image + break + + # Append statistics (correct, conf, pcls, tcls) + stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) + + # Plot images + if plots and batch_i < 3: + f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels + Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() + f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions + Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() + + # Compute statistics + stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy + if len(stats) and stats[0].any(): + p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, v5_metric=v5_metric, save_dir=save_dir, names=names) + ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95 + mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() + nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class + else: + nt = torch.zeros(1) + + # Print results + pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format + print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + + # Print results per class + if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): + for i, c in enumerate(ap_class): + print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) + + # Print speeds + t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple + if not training: + print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) + + # Plots + if plots: + confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) + if wandb_logger and wandb_logger.wandb: + val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))] + wandb_logger.log({"Validation": val_batches}) + if wandb_images: + wandb_logger.log({"Bounding Box Debugger/Images": wandb_images}) + + # Save JSON + if save_json and len(jdict): + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights + anno_json = './coco/annotations/instances_val2017.json' # annotations json + pred_json = str(save_dir / f"{w}_predictions.json") # predictions json + print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) + with open(pred_json, 'w') as f: + json.dump(jdict, f) + + try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb + from pycocotools.coco import COCO + from pycocotools.cocoeval import COCOeval + + anno = COCO(anno_json) # init annotations api + pred = anno.loadRes(pred_json) # init predictions api + eval = COCOeval(anno, pred, 'bbox') + if is_coco: + eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate + eval.evaluate() + eval.accumulate() + eval.summarize() + map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) + except Exception as e: + print(f'pycocotools unable to run: {e}') + + # Return results + model.float() # for training + if not training: + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + print(f"Results saved to {save_dir}{s}") + maps = np.zeros(nc) + map + for i, c in enumerate(ap_class): + maps[c] = ap[i] + return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(prog='test.py') + parser.add_argument('--weights', nargs='+', type=str, default='yolov7.pt', help='model.pt path(s)') + parser.add_argument('--data', type=str, default='data/coco.yaml', help='*.data path') + parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') + parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') + parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') + parser.add_argument('--iou-thres', type=float, default=0.65, help='IOU threshold for NMS') + parser.add_argument('--task', default='val', help='train, val, test, speed or study') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') + parser.add_argument('--augment', action='store_true', help='augmented inference') + parser.add_argument('--verbose', action='store_true', help='report mAP by class') + parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') + parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') + parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') + parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') + parser.add_argument('--project', default='runs/test', help='save to project/name') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--no-trace', action='store_true', help='don`t trace model') + parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation') + opt = parser.parse_args() + opt.save_json |= opt.data.endswith('coco.yaml') + opt.data = check_file(opt.data) # check file + print(opt) + #check_requirements() + + if opt.task in ('train', 'val', 'test'): # run normally + test(opt.data, + opt.weights, + opt.batch_size, + opt.img_size, + opt.conf_thres, + opt.iou_thres, + opt.save_json, + opt.single_cls, + opt.augment, + opt.verbose, + save_txt=opt.save_txt | opt.save_hybrid, + save_hybrid=opt.save_hybrid, + save_conf=opt.save_conf, + trace=not opt.no_trace, + v5_metric=opt.v5_metric + ) + + elif opt.task == 'speed': # speed benchmarks + for w in opt.weights: + test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False, v5_metric=opt.v5_metric) + + elif opt.task == 'study': # run over a range of settings and save/plot + # python test.py --task study --data coco.yaml --iou 0.65 --weights yolov7.pt + x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) + for w in opt.weights: + f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to + y = [] # y axis + for i in x: # img-size + print(f'\nRunning {f} point {i}...') + r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, + plots=False, v5_metric=opt.v5_metric) + y.append(r + t) # results and times + np.savetxt(f, y, fmt='%10.4g') # save + os.system('zip -r study.zip study_*.txt') + plot_study_txt(x=x) # plot diff --git a/src/train_utils/train_models/models/yolov7/train.py b/src/train_utils/train_models/models/yolov7/train.py new file mode 100644 index 0000000..86c7e48 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/train.py @@ -0,0 +1,705 @@ +import argparse +import logging +import math +import os +import random +import time +from copy import deepcopy +from pathlib import Path +from threading import Thread + +import numpy as np +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch.optim.lr_scheduler as lr_scheduler +import torch.utils.data +import yaml +from torch.cuda import amp +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter +from tqdm import tqdm + +import test # import test.py to get mAP after each epoch +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.datasets import create_dataloader +from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ + fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ + check_requirements, print_mutation, set_logging, one_cycle, colorstr +from utils.google_utils import attempt_download +from utils.loss import ComputeLoss, ComputeLossOTA +from utils.plots import plot_images, plot_labels, plot_results, plot_evolution +from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel +from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume + +logger = logging.getLogger(__name__) + + +def train(hyp, opt, device, tb_writer=None): + logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze + + # Directories + wdir = save_dir / 'weights' + wdir.mkdir(parents=True, exist_ok=True) # make dir + last = wdir / 'last.pt' + best = wdir / 'best.pt' + results_file = save_dir / 'results.txt' + + # Save run settings + with open(save_dir / 'hyp.yaml', 'w') as f: + yaml.dump(hyp, f, sort_keys=False) + with open(save_dir / 'opt.yaml', 'w') as f: + yaml.dump(vars(opt), f, sort_keys=False) + + # Configure + plots = not opt.evolve # create plots + cuda = device.type != 'cpu' + init_seeds(2 + rank) + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + is_coco = opt.data.endswith('coco.yaml') + + # Logging- Doing this before checking the dataset. Might update data_dict + loggers = {'wandb': None} # loggers dict + if rank in [-1, 0]: + opt.hyp = hyp # add hyperparameters + run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None + wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) + loggers['wandb'] = wandb_logger.wandb + data_dict = wandb_logger.data_dict + if wandb_logger.wandb: + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming + + nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes + names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check + + # Model + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(rank): + attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location=device) # load checkpoint + model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys + state_dict = ckpt['model'].float().state_dict() # to FP32 + state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(state_dict, strict=False) # load + logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report + else: + model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + with torch_distributed_zero_first(rank): + check_dataset(data_dict) # check + train_path = data_dict['train'] + test_path = data_dict['val'] + + # Freeze + freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial) + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + if any(x in k for x in freeze): + print('freezing %s' % k) + v.requires_grad = False + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay + logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") + + pg0, pg1, pg2 = [], [], [] # optimizer parameter groups + for k, v in model.named_modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): + pg2.append(v.bias) # biases + if isinstance(v, nn.BatchNorm2d): + pg0.append(v.weight) # no decay + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): + pg1.append(v.weight) # apply decay + if hasattr(v, 'im'): + if hasattr(v.im, 'implicit'): + pg0.append(v.im.implicit) + else: + for iv in v.im: + pg0.append(iv.implicit) + if hasattr(v, 'imc'): + if hasattr(v.imc, 'implicit'): + pg0.append(v.imc.implicit) + else: + for iv in v.imc: + pg0.append(iv.implicit) + if hasattr(v, 'imb'): + if hasattr(v.imb, 'implicit'): + pg0.append(v.imb.implicit) + else: + for iv in v.imb: + pg0.append(iv.implicit) + if hasattr(v, 'imo'): + if hasattr(v.imo, 'implicit'): + pg0.append(v.imo.implicit) + else: + for iv in v.imo: + pg0.append(iv.implicit) + if hasattr(v, 'ia'): + if hasattr(v.ia, 'implicit'): + pg0.append(v.ia.implicit) + else: + for iv in v.ia: + pg0.append(iv.implicit) + if hasattr(v, 'attn'): + if hasattr(v.attn, 'logit_scale'): + pg0.append(v.attn.logit_scale) + if hasattr(v.attn, 'q_bias'): + pg0.append(v.attn.q_bias) + if hasattr(v.attn, 'v_bias'): + pg0.append(v.attn.v_bias) + if hasattr(v.attn, 'relative_position_bias_table'): + pg0.append(v.attn.relative_position_bias_table) + if hasattr(v, 'rbr_dense'): + if hasattr(v.rbr_dense, 'weight_rbr_origin'): + pg0.append(v.rbr_dense.weight_rbr_origin) + if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): + pg0.append(v.rbr_dense.weight_rbr_avg_conv) + if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): + pg0.append(v.rbr_dense.weight_rbr_pfir_conv) + if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): + pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) + if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): + pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) + if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): + pg0.append(v.rbr_dense.weight_rbr_gconv_dw) + if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): + pg0.append(v.rbr_dense.weight_rbr_gconv_pw) + if hasattr(v.rbr_dense, 'vector'): + pg0.append(v.rbr_dense.vector) + + if opt.adam: + optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + else: + optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) + + optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay + optimizer.add_param_group({'params': pg2}) # add pg2 (biases) + logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) + del pg0, pg1, pg2 + + # Scheduler https://arxiv.org/pdf/1812.01187.pdf + # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR + if opt.linear_lr: + lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + else: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if rank in [-1, 0] else None + + # Resume + start_epoch, best_fitness = 0, 0.0 + if pretrained: + # Optimizer + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) + best_fitness = ckpt['best_fitness'] + + # EMA + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) + ema.updates = ckpt['updates'] + + # Results + if ckpt.get('training_results') is not None: + results_file.write_text(ckpt['training_results']) # write results.txt + + # Epochs + start_epoch = ckpt['epoch'] + 1 + if opt.resume: + assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) + if epochs < start_epoch: + logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % + (weights, ckpt['epoch'], epochs)) + epochs += ckpt['epoch'] # finetune additional epochs + + del ckpt, state_dict + + # Image sizes + gs = max(int(model.stride.max()), 32) # grid size (max stride) + nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) + imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples + + # DP mode + if cuda and rank == -1 and torch.cuda.device_count() > 1: + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and rank != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + logger.info('Using SyncBatchNorm()') + + # Trainloader + dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, + hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, + world_size=opt.world_size, workers=opt.workers, + image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) + mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class + nb = len(dataloader) # number of batches + assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) + + # Process 0 + if rank in [-1, 0]: + testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader + hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, + world_size=opt.world_size, workers=opt.workers, + pad=0.5, prefix=colorstr('val: '))[0] + + if not opt.resume: + labels = np.concatenate(dataset.labels, 0) + c = torch.tensor(labels[:, 0]) # classes + # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency + # model._initialize_biases(cf.to(device)) + if plots: + #plot_labels(labels, names, save_dir, loggers) + if tb_writer: + tb_writer.add_histogram('classes', c, 0) + + # Anchors + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + model.half().float() # pre-reduce anchor precision + + # DDP mode + if cuda and rank != -1: + model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, + # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 + find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) + + # Model parameters + hyp['box'] *= 3. / nl # scale to layers + hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = amp.GradScaler(enabled=cuda) + compute_loss_ota = ComputeLossOTA(model) # init loss class + compute_loss = ComputeLoss(model) # init loss class + logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' + f'Using {dataloader.num_workers} dataloader workers\n' + f'Logging results to {save_dir}\n' + f'Starting training for {epochs} epochs...') + torch.save(model, wdir / 'init.pt') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + model.train() + + # Update image weights (optional) + if opt.image_weights: + # Generate indices + if rank in [-1, 0]: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + # Broadcast if DDP + if rank != -1: + indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() + dist.broadcast(indices, 0) + if rank != 0: + dataset.indices = indices.cpu().numpy() + + # Update mosaic border + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(4, device=device) # mean losses + if rank != -1: + dataloader.sampler.set_epoch(epoch) + pbar = enumerate(dataloader) + logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) + if rank in [-1, 0]: + pbar = tqdm(pbar, total=nb) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with amp.autocast(enabled=cuda): + pred = model(imgs) # forward + if 'loss_ota' not in hyp or hyp['loss_ota'] == 1: + loss, loss_items = compute_loss_ota(pred, targets.to(device), imgs) # loss scaled by batch_size + else: + loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size + if rank != -1: + loss *= opt.world_size # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize + if ni % accumulate == 0: + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + + # Print + if rank in [-1, 0]: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) + s = ('%10s' * 2 + '%10.4g' * 6) % ( + '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) + pbar.set_description(s) + + # Plot + if plots and ni < 10: + f = save_dir / f'train_batch{ni}.jpg' # filename + Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + # if tb_writer: + # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) + # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph + elif plots and ni == 10 and wandb_logger.wandb: + wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + save_dir.glob('train*.jpg') if x.exists()]}) + + # end batch ------------------------------------------------------------------------------------------------ + # end epoch ---------------------------------------------------------------------------------------------------- + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard + scheduler.step() + + # DDP process 0 or single-GPU + if rank in [-1, 0]: + # mAP + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) + final_epoch = epoch + 1 == epochs + if not opt.notest or final_epoch: # Calculate mAP + wandb_logger.current_epoch = epoch + 1 + results, maps, times = test.test(data_dict, + batch_size=batch_size * 2, + imgsz=imgsz_test, + model=ema.ema, + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + verbose=nc < 50 and final_epoch, + plots=plots and final_epoch, + wandb_logger=wandb_logger, + compute_loss=compute_loss, + is_coco=is_coco, + v5_metric=opt.v5_metric) + + # Write + with open(results_file, 'a') as f: + f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss + if len(opt.name) and opt.bucket: + os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) + + # Log + tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss + 'x/lr0', 'x/lr1', 'x/lr2'] # params + for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): + if tb_writer: + tb_writer.add_scalar(tag, x, epoch) # tensorboard + if wandb_logger.wandb: + wandb_logger.log({tag: x}) # W&B + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + if fi > best_fitness: + best_fitness = fi + wandb_logger.end_epoch(best_result=best_fitness == fi) + + # Save model + if (not opt.nosave) or (final_epoch and not opt.evolve): # if save + ckpt = {'epoch': epoch, + 'best_fitness': best_fitness, + 'training_results': results_file.read_text(), + 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if (best_fitness == fi) and (epoch >= 200): + torch.save(ckpt, wdir / 'best_{:03d}.pt'.format(epoch)) + if epoch == 0: + torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch)) + elif ((epoch+1) % 25) == 0: + torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch)) + elif epoch >= (epochs-5): + torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch)) + if wandb_logger.wandb: + if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: + wandb_logger.log_model( + last.parent, opt, epoch, fi, best_model=best_fitness == fi) + del ckpt + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training + if rank in [-1, 0]: + # Plots + if plots: + plot_results(save_dir=save_dir) # save as results.png + if wandb_logger.wandb: + files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] + wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files + if (save_dir / f).exists()]}) + # Test best.pt + logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) + if opt.data.endswith('coco.yaml') and nc == 80: # if COCO + for m in (last, best) if best.exists() else (last): # speed, mAP tests + results, _, _ = test.test(opt.data, + batch_size=batch_size * 2, + imgsz=imgsz_test, + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=True, + plots=False, + is_coco=is_coco, + v5_metric=opt.v5_metric) + + # Strip optimizers + final = best if best.exists() else last # final model + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if opt.bucket: + os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload + if wandb_logger.wandb and not opt.evolve: # Log the stripped model + wandb_logger.wandb.log_artifact(str(final), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['last', 'best', 'stripped']) + wandb_logger.finish_run() + else: + dist.destroy_process_group() + torch.cuda.empty_cache() + return results + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='yolo7.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path') + parser.add_argument('--hyp', type=str, default='data/hyp.scratch.p5.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300) + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--notest', action='store_true', help='only test final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') + parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') + parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') + parser.add_argument('--project', default='runs/train', help='save to project/name') + parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--linear-lr', action='store_true', help='linear LR') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B') + parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') + parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') + parser.add_argument('--freeze', nargs='+', type=int, default=[0], help='Freeze layers: backbone of yolov7=50, first3=0 1 2') + parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation') + opt = parser.parse_args() + + # Set DDP variables + opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 + opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 + set_logging(opt.global_rank) + #if opt.global_rank in [-1, 0]: + # check_git_status() + # check_requirements() + + # Resume + wandb_run = check_wandb_resume(opt) + if opt.resume and not wandb_run: # resume an interrupted run + ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path + assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' + apriori = opt.global_rank, opt.local_rank + with open(Path(ckpt).parent.parent / 'opt.yaml') as f: + opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace + opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate + logger.info('Resuming training from %s' % ckpt) + else: + # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') + opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) + opt.name = 'evolve' if opt.evolve else opt.name + opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run + + # DDP mode + opt.total_batch_size = opt.batch_size + device = select_device(opt.device, batch_size=opt.batch_size) + if opt.local_rank != -1: + assert torch.cuda.device_count() > opt.local_rank + torch.cuda.set_device(opt.local_rank) + device = torch.device('cuda', opt.local_rank) + dist.init_process_group(backend='nccl', init_method='env://') # distributed backend + assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' + opt.batch_size = opt.total_batch_size // opt.world_size + + # Hyperparameters + with open(opt.hyp) as f: + hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps + + # Train + logger.info(opt) + if not opt.evolve: + tb_writer = None # init loggers + if opt.global_rank in [-1, 0]: + prefix = colorstr('tensorboard: ') + logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") + tb_writer = SummaryWriter(opt.save_dir) # Tensorboard + train(hyp, opt, device, tb_writer) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0), # image mixup (probability) + 'copy_paste': (1, 0.0, 1.0), # segment copy-paste (probability) + 'paste_in': (1, 0.0, 1.0)} # segment copy-paste (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + + assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' + opt.notest, opt.nosave = True, True # only test/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here + if opt.bucket: + os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists + + for _ in range(300): # generations to evolve + if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt('evolve.txt', ndmin=2) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() # weights + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([x[0] for x in meta.values()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device) + + # Write mutation results + print_mutation(hyp.copy(), results, yaml_file, opt.bucket) + + # Plot results + plot_evolution(yaml_file) + print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' + f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') diff --git a/src/train_utils/train_models/models/yolov7/train_aux.py b/src/train_utils/train_models/models/yolov7/train_aux.py new file mode 100644 index 0000000..0e8053f --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/train_aux.py @@ -0,0 +1,699 @@ +import argparse +import logging +import math +import os +import random +import time +from copy import deepcopy +from pathlib import Path +from threading import Thread + +import numpy as np +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torch.optim.lr_scheduler as lr_scheduler +import torch.utils.data +import yaml +from torch.cuda import amp +from torch.nn.parallel import DistributedDataParallel as DDP +from torch.utils.tensorboard import SummaryWriter +from tqdm import tqdm + +import test # import test.py to get mAP after each epoch +from models.experimental import attempt_load +from models.yolo import Model +from utils.autoanchor import check_anchors +from utils.datasets import create_dataloader +from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ + fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ + check_requirements, print_mutation, set_logging, one_cycle, colorstr +from utils.google_utils import attempt_download +from utils.loss import ComputeLoss, ComputeLossAuxOTA +from utils.plots import plot_images, plot_labels, plot_results, plot_evolution +from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel +from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume + +logger = logging.getLogger(__name__) + + +def train(hyp, opt, device, tb_writer=None): + logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) + save_dir, epochs, batch_size, total_batch_size, weights, rank = \ + Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank + + # Directories + wdir = save_dir / 'weights' + wdir.mkdir(parents=True, exist_ok=True) # make dir + last = wdir / 'last.pt' + best = wdir / 'best.pt' + results_file = save_dir / 'results.txt' + + # Save run settings + with open(save_dir / 'hyp.yaml', 'w') as f: + yaml.dump(hyp, f, sort_keys=False) + with open(save_dir / 'opt.yaml', 'w') as f: + yaml.dump(vars(opt), f, sort_keys=False) + + # Configure + plots = not opt.evolve # create plots + cuda = device.type != 'cpu' + init_seeds(2 + rank) + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + is_coco = opt.data.endswith('coco.yaml') + + # Logging- Doing this before checking the dataset. Might update data_dict + loggers = {'wandb': None} # loggers dict + if rank in [-1, 0]: + opt.hyp = hyp # add hyperparameters + run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None + wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) + loggers['wandb'] = wandb_logger.wandb + data_dict = wandb_logger.data_dict + if wandb_logger.wandb: + weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming + + nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes + names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names + assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check + + # Model + pretrained = weights.endswith('.pt') + if pretrained: + with torch_distributed_zero_first(rank): + attempt_download(weights) # download if not found locally + ckpt = torch.load(weights, map_location=device) # load checkpoint + model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys + state_dict = ckpt['model'].float().state_dict() # to FP32 + state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect + model.load_state_dict(state_dict, strict=False) # load + logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report + else: + model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create + with torch_distributed_zero_first(rank): + check_dataset(data_dict) # check + train_path = data_dict['train'] + test_path = data_dict['val'] + + # Freeze + freeze = [] # parameter names to freeze (full or partial) + for k, v in model.named_parameters(): + v.requires_grad = True # train all layers + if any(x in k for x in freeze): + print('freezing %s' % k) + v.requires_grad = False + + # Optimizer + nbs = 64 # nominal batch size + accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing + hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay + logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") + + pg0, pg1, pg2 = [], [], [] # optimizer parameter groups + for k, v in model.named_modules(): + if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): + pg2.append(v.bias) # biases + if isinstance(v, nn.BatchNorm2d): + pg0.append(v.weight) # no decay + elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): + pg1.append(v.weight) # apply decay + if hasattr(v, 'im'): + if hasattr(v.im, 'implicit'): + pg0.append(v.im.implicit) + else: + for iv in v.im: + pg0.append(iv.implicit) + if hasattr(v, 'imc'): + if hasattr(v.imc, 'implicit'): + pg0.append(v.imc.implicit) + else: + for iv in v.imc: + pg0.append(iv.implicit) + if hasattr(v, 'imb'): + if hasattr(v.imb, 'implicit'): + pg0.append(v.imb.implicit) + else: + for iv in v.imb: + pg0.append(iv.implicit) + if hasattr(v, 'imo'): + if hasattr(v.imo, 'implicit'): + pg0.append(v.imo.implicit) + else: + for iv in v.imo: + pg0.append(iv.implicit) + if hasattr(v, 'ia'): + if hasattr(v.ia, 'implicit'): + pg0.append(v.ia.implicit) + else: + for iv in v.ia: + pg0.append(iv.implicit) + if hasattr(v, 'attn'): + if hasattr(v.attn, 'logit_scale'): + pg0.append(v.attn.logit_scale) + if hasattr(v.attn, 'q_bias'): + pg0.append(v.attn.q_bias) + if hasattr(v.attn, 'v_bias'): + pg0.append(v.attn.v_bias) + if hasattr(v.attn, 'relative_position_bias_table'): + pg0.append(v.attn.relative_position_bias_table) + if hasattr(v, 'rbr_dense'): + if hasattr(v.rbr_dense, 'weight_rbr_origin'): + pg0.append(v.rbr_dense.weight_rbr_origin) + if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): + pg0.append(v.rbr_dense.weight_rbr_avg_conv) + if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): + pg0.append(v.rbr_dense.weight_rbr_pfir_conv) + if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): + pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) + if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): + pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) + if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): + pg0.append(v.rbr_dense.weight_rbr_gconv_dw) + if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): + pg0.append(v.rbr_dense.weight_rbr_gconv_pw) + if hasattr(v.rbr_dense, 'vector'): + pg0.append(v.rbr_dense.vector) + + if opt.adam: + optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum + else: + optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) + + optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay + optimizer.add_param_group({'params': pg2}) # add pg2 (biases) + logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) + del pg0, pg1, pg2 + + # Scheduler https://arxiv.org/pdf/1812.01187.pdf + # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR + if opt.linear_lr: + lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear + else: + lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] + scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) + # plot_lr_scheduler(optimizer, scheduler, epochs) + + # EMA + ema = ModelEMA(model) if rank in [-1, 0] else None + + # Resume + start_epoch, best_fitness = 0, 0.0 + if pretrained: + # Optimizer + if ckpt['optimizer'] is not None: + optimizer.load_state_dict(ckpt['optimizer']) + best_fitness = ckpt['best_fitness'] + + # EMA + if ema and ckpt.get('ema'): + ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) + ema.updates = ckpt['updates'] + + # Results + if ckpt.get('training_results') is not None: + results_file.write_text(ckpt['training_results']) # write results.txt + + # Epochs + start_epoch = ckpt['epoch'] + 1 + if opt.resume: + assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) + if epochs < start_epoch: + logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % + (weights, ckpt['epoch'], epochs)) + epochs += ckpt['epoch'] # finetune additional epochs + + del ckpt, state_dict + + # Image sizes + gs = max(int(model.stride.max()), 32) # grid size (max stride) + nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) + imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples + + # DP mode + if cuda and rank == -1 and torch.cuda.device_count() > 1: + model = torch.nn.DataParallel(model) + + # SyncBatchNorm + if opt.sync_bn and cuda and rank != -1: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) + logger.info('Using SyncBatchNorm()') + + # Trainloader + dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, + hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, + world_size=opt.world_size, workers=opt.workers, + image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) + mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class + nb = len(dataloader) # number of batches + assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) + + # Process 0 + if rank in [-1, 0]: + testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader + hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, + world_size=opt.world_size, workers=opt.workers, + pad=0.5, prefix=colorstr('val: '))[0] + + if not opt.resume: + labels = np.concatenate(dataset.labels, 0) + c = torch.tensor(labels[:, 0]) # classes + # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency + # model._initialize_biases(cf.to(device)) + if plots: + #plot_labels(labels, names, save_dir, loggers) + if tb_writer: + tb_writer.add_histogram('classes', c, 0) + + # Anchors + if not opt.noautoanchor: + check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) + model.half().float() # pre-reduce anchor precision + + # DDP mode + if cuda and rank != -1: + model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank, + # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698 + find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules())) + + # Model parameters + hyp['box'] *= 3. / nl # scale to layers + hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers + hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers + hyp['label_smoothing'] = opt.label_smoothing + model.nc = nc # attach number of classes to model + model.hyp = hyp # attach hyperparameters to model + model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) + model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights + model.names = names + + # Start training + t0 = time.time() + nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) + # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training + maps = np.zeros(nc) # mAP per class + results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) + scheduler.last_epoch = start_epoch - 1 # do not move + scaler = amp.GradScaler(enabled=cuda) + compute_loss_ota = ComputeLossAuxOTA(model) # init loss class + compute_loss = ComputeLoss(model) # init loss class + logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' + f'Using {dataloader.num_workers} dataloader workers\n' + f'Logging results to {save_dir}\n' + f'Starting training for {epochs} epochs...') + torch.save(model, wdir / 'init.pt') + for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ + model.train() + + # Update image weights (optional) + if opt.image_weights: + # Generate indices + if rank in [-1, 0]: + cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights + iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights + dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx + # Broadcast if DDP + if rank != -1: + indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() + dist.broadcast(indices, 0) + if rank != 0: + dataset.indices = indices.cpu().numpy() + + # Update mosaic border + # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) + # dataset.mosaic_border = [b - imgsz, -b] # height, width borders + + mloss = torch.zeros(4, device=device) # mean losses + if rank != -1: + dataloader.sampler.set_epoch(epoch) + pbar = enumerate(dataloader) + logger.info(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size')) + if rank in [-1, 0]: + pbar = tqdm(pbar, total=nb) # progress bar + optimizer.zero_grad() + for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- + ni = i + nb * epoch # number integrated batches (since train start) + imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 + + # Warmup + if ni <= nw: + xi = [0, nw] # x interp + # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) + accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) + for j, x in enumerate(optimizer.param_groups): + # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 + x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) + if 'momentum' in x: + x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) + + # Multi-scale + if opt.multi_scale: + sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size + sf = sz / max(imgs.shape[2:]) # scale factor + if sf != 1: + ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple) + imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) + + # Forward + with amp.autocast(enabled=cuda): + pred = model(imgs) # forward + loss, loss_items = compute_loss_ota(pred, targets.to(device), imgs) # loss scaled by batch_size + if rank != -1: + loss *= opt.world_size # gradient averaged between devices in DDP mode + if opt.quad: + loss *= 4. + + # Backward + scaler.scale(loss).backward() + + # Optimize + if ni % accumulate == 0: + scaler.step(optimizer) # optimizer.step + scaler.update() + optimizer.zero_grad() + if ema: + ema.update(model) + + # Print + if rank in [-1, 0]: + mloss = (mloss * i + loss_items) / (i + 1) # update mean losses + mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) + s = ('%10s' * 2 + '%10.4g' * 6) % ( + '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) + pbar.set_description(s) + + # Plot + if plots and ni < 10: + f = save_dir / f'train_batch{ni}.jpg' # filename + Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() + # if tb_writer: + # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) + # tb_writer.add_graph(torch.jit.trace(model, imgs, strict=False), []) # add model graph + elif plots and ni == 10 and wandb_logger.wandb: + wandb_logger.log({"Mosaics": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in + save_dir.glob('train*.jpg') if x.exists()]}) + + # end batch ------------------------------------------------------------------------------------------------ + # end epoch ---------------------------------------------------------------------------------------------------- + + # Scheduler + lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard + scheduler.step() + + # DDP process 0 or single-GPU + if rank in [-1, 0]: + # mAP + ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) + final_epoch = epoch + 1 == epochs + if not opt.notest or final_epoch: # Calculate mAP + wandb_logger.current_epoch = epoch + 1 + results, maps, times = test.test(data_dict, + batch_size=batch_size * 2, + imgsz=imgsz_test, + model=ema.ema, + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + verbose=nc < 50 and final_epoch, + plots=plots and final_epoch, + wandb_logger=wandb_logger, + compute_loss=compute_loss, + is_coco=is_coco, + v5_metric=opt.v5_metric) + + # Write + with open(results_file, 'a') as f: + f.write(s + '%10.4g' * 7 % results + '\n') # append metrics, val_loss + if len(opt.name) and opt.bucket: + os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) + + # Log + tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss + 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', + 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss + 'x/lr0', 'x/lr1', 'x/lr2'] # params + for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): + if tb_writer: + tb_writer.add_scalar(tag, x, epoch) # tensorboard + if wandb_logger.wandb: + wandb_logger.log({tag: x}) # W&B + + # Update best mAP + fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, mAP@.5, mAP@.5-.95] + if fi > best_fitness: + best_fitness = fi + wandb_logger.end_epoch(best_result=best_fitness == fi) + + # Save model + if (not opt.nosave) or (final_epoch and not opt.evolve): # if save + ckpt = {'epoch': epoch, + 'best_fitness': best_fitness, + 'training_results': results_file.read_text(), + 'model': deepcopy(model.module if is_parallel(model) else model).half(), + 'ema': deepcopy(ema.ema).half(), + 'updates': ema.updates, + 'optimizer': optimizer.state_dict(), + 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None} + + # Save last, best and delete + torch.save(ckpt, last) + if best_fitness == fi: + torch.save(ckpt, best) + if (best_fitness == fi) and (epoch >= 200): + torch.save(ckpt, wdir / 'best_{:03d}.pt'.format(epoch)) + if epoch == 0: + torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch)) + elif ((epoch+1) % 25) == 0: + torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch)) + elif epoch >= (epochs-5): + torch.save(ckpt, wdir / 'epoch_{:03d}.pt'.format(epoch)) + if wandb_logger.wandb: + if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1: + wandb_logger.log_model( + last.parent, opt, epoch, fi, best_model=best_fitness == fi) + del ckpt + + # end epoch ---------------------------------------------------------------------------------------------------- + # end training + if rank in [-1, 0]: + # Plots + if plots: + plot_results(save_dir=save_dir) # save as results.png + if wandb_logger.wandb: + files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] + wandb_logger.log({"Results": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files + if (save_dir / f).exists()]}) + # Test best.pt + logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) + if opt.data.endswith('coco.yaml') and nc == 80: # if COCO + for m in (last, best) if best.exists() else (last): # speed, mAP tests + results, _, _ = test.test(opt.data, + batch_size=batch_size * 2, + imgsz=imgsz_test, + conf_thres=0.001, + iou_thres=0.7, + model=attempt_load(m, device).half(), + single_cls=opt.single_cls, + dataloader=testloader, + save_dir=save_dir, + save_json=True, + plots=False, + is_coco=is_coco, + v5_metric=opt.v5_metric) + + # Strip optimizers + final = best if best.exists() else last # final model + for f in last, best: + if f.exists(): + strip_optimizer(f) # strip optimizers + if opt.bucket: + os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload + if wandb_logger.wandb and not opt.evolve: # Log the stripped model + wandb_logger.wandb.log_artifact(str(final), type='model', + name='run_' + wandb_logger.wandb_run.id + '_model', + aliases=['last', 'best', 'stripped']) + wandb_logger.finish_run() + else: + dist.destroy_process_group() + torch.cuda.empty_cache() + return results + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--weights', type=str, default='yolo7.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default='', help='model.yaml path') + parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path') + parser.add_argument('--hyp', type=str, default='data/hyp.scratch.p5.yaml', help='hyperparameters path') + parser.add_argument('--epochs', type=int, default=300) + parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') + parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') + parser.add_argument('--rect', action='store_true', help='rectangular training') + parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') + parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') + parser.add_argument('--notest', action='store_true', help='only test final epoch') + parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') + parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') + parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') + parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') + parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') + parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') + parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') + parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') + parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') + parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') + parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers') + parser.add_argument('--project', default='runs/train', help='save to project/name') + parser.add_argument('--entity', default=None, help='W&B entity') + parser.add_argument('--name', default='exp', help='save to project/name') + parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') + parser.add_argument('--quad', action='store_true', help='quad dataloader') + parser.add_argument('--linear-lr', action='store_true', help='linear LR') + parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon') + parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table') + parser.add_argument('--bbox_interval', type=int, default=-1, help='Set bounding-box image logging interval for W&B') + parser.add_argument('--save_period', type=int, default=-1, help='Log model after every "save_period" epoch') + parser.add_argument('--artifact_alias', type=str, default="latest", help='version of dataset artifact to be used') + parser.add_argument('--v5-metric', action='store_true', help='assume maximum recall as 1.0 in AP calculation') + opt = parser.parse_args() + + # Set DDP variables + opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 + opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 + set_logging(opt.global_rank) + #if opt.global_rank in [-1, 0]: + # check_git_status() + # check_requirements() + + # Resume + wandb_run = check_wandb_resume(opt) + if opt.resume and not wandb_run: # resume an interrupted run + ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path + assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' + apriori = opt.global_rank, opt.local_rank + with open(Path(ckpt).parent.parent / 'opt.yaml') as f: + opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace + opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = '', ckpt, True, opt.total_batch_size, *apriori # reinstate + logger.info('Resuming training from %s' % ckpt) + else: + # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') + opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files + assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' + opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test) + opt.name = 'evolve' if opt.evolve else opt.name + opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve) # increment run + + # DDP mode + opt.total_batch_size = opt.batch_size + device = select_device(opt.device, batch_size=opt.batch_size) + if opt.local_rank != -1: + assert torch.cuda.device_count() > opt.local_rank + torch.cuda.set_device(opt.local_rank) + device = torch.device('cuda', opt.local_rank) + dist.init_process_group(backend='nccl', init_method='env://') # distributed backend + assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' + opt.batch_size = opt.total_batch_size // opt.world_size + + # Hyperparameters + with open(opt.hyp) as f: + hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps + + # Train + logger.info(opt) + if not opt.evolve: + tb_writer = None # init loggers + if opt.global_rank in [-1, 0]: + prefix = colorstr('tensorboard: ') + logger.info(f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/") + tb_writer = SummaryWriter(opt.save_dir) # Tensorboard + train(hyp, opt, device, tb_writer) + + # Evolve hyperparameters (optional) + else: + # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) + meta = {'lr0': (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3) + 'lrf': (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf) + 'momentum': (0.3, 0.6, 0.98), # SGD momentum/Adam beta1 + 'weight_decay': (1, 0.0, 0.001), # optimizer weight decay + 'warmup_epochs': (1, 0.0, 5.0), # warmup epochs (fractions ok) + 'warmup_momentum': (1, 0.0, 0.95), # warmup initial momentum + 'warmup_bias_lr': (1, 0.0, 0.2), # warmup initial bias lr + 'box': (1, 0.02, 0.2), # box loss gain + 'cls': (1, 0.2, 4.0), # cls loss gain + 'cls_pw': (1, 0.5, 2.0), # cls BCELoss positive_weight + 'obj': (1, 0.2, 4.0), # obj loss gain (scale with pixels) + 'obj_pw': (1, 0.5, 2.0), # obj BCELoss positive_weight + 'iou_t': (0, 0.1, 0.7), # IoU training threshold + 'anchor_t': (1, 2.0, 8.0), # anchor-multiple threshold + 'anchors': (2, 2.0, 10.0), # anchors per output grid (0 to ignore) + 'fl_gamma': (0, 0.0, 2.0), # focal loss gamma (efficientDet default gamma=1.5) + 'hsv_h': (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction) + 'hsv_s': (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction) + 'hsv_v': (1, 0.0, 0.9), # image HSV-Value augmentation (fraction) + 'degrees': (1, 0.0, 45.0), # image rotation (+/- deg) + 'translate': (1, 0.0, 0.9), # image translation (+/- fraction) + 'scale': (1, 0.0, 0.9), # image scale (+/- gain) + 'shear': (1, 0.0, 10.0), # image shear (+/- deg) + 'perspective': (0, 0.0, 0.001), # image perspective (+/- fraction), range 0-0.001 + 'flipud': (1, 0.0, 1.0), # image flip up-down (probability) + 'fliplr': (0, 0.0, 1.0), # image flip left-right (probability) + 'mosaic': (1, 0.0, 1.0), # image mixup (probability) + 'mixup': (1, 0.0, 1.0)} # image mixup (probability) + + with open(opt.hyp, errors='ignore') as f: + hyp = yaml.safe_load(f) # load hyps dict + if 'anchors' not in hyp: # anchors commented in hyp.yaml + hyp['anchors'] = 3 + + assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' + opt.notest, opt.nosave = True, True # only test/save final epoch + # ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices + yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml' # save best result here + if opt.bucket: + os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket) # download evolve.txt if exists + + for _ in range(300): # generations to evolve + if Path('evolve.txt').exists(): # if evolve.txt exists: select best hyps and mutate + # Select parent(s) + parent = 'single' # parent selection method: 'single' or 'weighted' + x = np.loadtxt('evolve.txt', ndmin=2) + n = min(5, len(x)) # number of previous results to consider + x = x[np.argsort(-fitness(x))][:n] # top n mutations + w = fitness(x) - fitness(x).min() # weights + if parent == 'single' or len(x) == 1: + # x = x[random.randint(0, n - 1)] # random selection + x = x[random.choices(range(n), weights=w)[0]] # weighted selection + elif parent == 'weighted': + x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination + + # Mutate + mp, s = 0.8, 0.2 # mutation probability, sigma + npr = np.random + npr.seed(int(time.time())) + g = np.array([x[0] for x in meta.values()]) # gains 0-1 + ng = len(meta) + v = np.ones(ng) + while all(v == 1): # mutate until a change occurs (prevent duplicates) + v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) + for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300) + hyp[k] = float(x[i + 7] * v[i]) # mutate + + # Constrain to limits + for k, v in meta.items(): + hyp[k] = max(hyp[k], v[1]) # lower limit + hyp[k] = min(hyp[k], v[2]) # upper limit + hyp[k] = round(hyp[k], 5) # significant digits + + # Train mutation + results = train(hyp.copy(), opt, device) + + # Write mutation results + print_mutation(hyp.copy(), results, yaml_file, opt.bucket) + + # Plot results + plot_evolution(yaml_file) + print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' + f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') diff --git a/src/train_utils/train_models/models/yolov7/utils/__init__.py b/src/train_utils/train_models/models/yolov7/utils/__init__.py new file mode 100644 index 0000000..84952a8 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/utils/activations.py b/src/train_utils/train_models/models/yolov7/utils/activations.py new file mode 100644 index 0000000..aa3ddf0 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/activations.py @@ -0,0 +1,72 @@ +# Activation functions + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +# SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- +class SiLU(nn.Module): # export-friendly version of nn.SiLU() + @staticmethod + def forward(x): + return x * torch.sigmoid(x) + + +class Hardswish(nn.Module): # export-friendly version of nn.Hardswish() + @staticmethod + def forward(x): + # return x * F.hardsigmoid(x) # for torchscript and CoreML + return x * F.hardtanh(x + 3, 0., 6.) / 6. # for torchscript, CoreML and ONNX + + +class MemoryEfficientSwish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x * torch.sigmoid(x) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + return grad_output * (sx * (1 + x * (1 - sx))) + + def forward(self, x): + return self.F.apply(x) + + +# Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- +class Mish(nn.Module): + @staticmethod + def forward(x): + return x * F.softplus(x).tanh() + + +class MemoryEfficientMish(nn.Module): + class F(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + ctx.save_for_backward(x) + return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) + + @staticmethod + def backward(ctx, grad_output): + x = ctx.saved_tensors[0] + sx = torch.sigmoid(x) + fx = F.softplus(x).tanh() + return grad_output * (fx + x * sx * (1 - fx * fx)) + + def forward(self, x): + return self.F.apply(x) + + +# FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- +class FReLU(nn.Module): + def __init__(self, c1, k=3): # ch_in, kernel + super().__init__() + self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) + self.bn = nn.BatchNorm2d(c1) + + def forward(self, x): + return torch.max(x, self.bn(self.conv(x))) diff --git a/src/train_utils/train_models/models/yolov7/utils/add_nms.py b/src/train_utils/train_models/models/yolov7/utils/add_nms.py new file mode 100644 index 0000000..0a1f797 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/add_nms.py @@ -0,0 +1,155 @@ +import numpy as np +import onnx +from onnx import shape_inference +try: + import onnx_graphsurgeon as gs +except Exception as e: + print('Import onnx_graphsurgeon failure: %s' % e) + +import logging + +LOGGER = logging.getLogger(__name__) + +class RegisterNMS(object): + def __init__( + self, + onnx_model_path: str, + precision: str = "fp32", + ): + + self.graph = gs.import_onnx(onnx.load(onnx_model_path)) + assert self.graph + LOGGER.info("ONNX graph created successfully") + # Fold constants via ONNX-GS that PyTorch2ONNX may have missed + self.graph.fold_constants() + self.precision = precision + self.batch_size = 1 + def infer(self): + """ + Sanitize the graph by cleaning any unconnected nodes, do a topological resort, + and fold constant inputs values. When possible, run shape inference on the + ONNX graph to determine tensor shapes. + """ + for _ in range(3): + count_before = len(self.graph.nodes) + + self.graph.cleanup().toposort() + try: + for node in self.graph.nodes: + for o in node.outputs: + o.shape = None + model = gs.export_onnx(self.graph) + model = shape_inference.infer_shapes(model) + self.graph = gs.import_onnx(model) + except Exception as e: + LOGGER.info(f"Shape inference could not be performed at this time:\n{e}") + try: + self.graph.fold_constants(fold_shapes=True) + except TypeError as e: + LOGGER.error( + "This version of ONNX GraphSurgeon does not support folding shapes, " + f"please upgrade your onnx_graphsurgeon module. Error:\n{e}" + ) + raise + + count_after = len(self.graph.nodes) + if count_before == count_after: + # No new folding occurred in this iteration, so we can stop for now. + break + + def save(self, output_path): + """ + Save the ONNX model to the given location. + Args: + output_path: Path pointing to the location where to write + out the updated ONNX model. + """ + self.graph.cleanup().toposort() + model = gs.export_onnx(self.graph) + onnx.save(model, output_path) + LOGGER.info(f"Saved ONNX model to {output_path}") + + def register_nms( + self, + *, + score_thresh: float = 0.25, + nms_thresh: float = 0.45, + detections_per_img: int = 100, + ): + """ + Register the ``EfficientNMS_TRT`` plugin node. + NMS expects these shapes for its input tensors: + - box_net: [batch_size, number_boxes, 4] + - class_net: [batch_size, number_boxes, number_labels] + Args: + score_thresh (float): The scalar threshold for score (low scoring boxes are removed). + nms_thresh (float): The scalar threshold for IOU (new boxes that have high IOU + overlap with previously selected boxes are removed). + detections_per_img (int): Number of best detections to keep after NMS. + """ + + self.infer() + # Find the concat node at the end of the network + op_inputs = self.graph.outputs + op = "EfficientNMS_TRT" + attrs = { + "plugin_version": "1", + "background_class": -1, # no background class + "max_output_boxes": detections_per_img, + "score_threshold": score_thresh, + "iou_threshold": nms_thresh, + "score_activation": False, + "box_coding": 0, + } + + if self.precision == "fp32": + dtype_output = np.float32 + elif self.precision == "fp16": + dtype_output = np.float16 + else: + raise NotImplementedError(f"Currently not supports precision: {self.precision}") + + # NMS Outputs + output_num_detections = gs.Variable( + name="num_dets", + dtype=np.int32, + shape=[self.batch_size, 1], + ) # A scalar indicating the number of valid detections per batch image. + output_boxes = gs.Variable( + name="det_boxes", + dtype=dtype_output, + shape=[self.batch_size, detections_per_img, 4], + ) + output_scores = gs.Variable( + name="det_scores", + dtype=dtype_output, + shape=[self.batch_size, detections_per_img], + ) + output_labels = gs.Variable( + name="det_classes", + dtype=np.int32, + shape=[self.batch_size, detections_per_img], + ) + + op_outputs = [output_num_detections, output_boxes, output_scores, output_labels] + + # Create the NMS Plugin node with the selected inputs. The outputs of the node will also + # become the final outputs of the graph. + self.graph.layer(op=op, name="batched_nms", inputs=op_inputs, outputs=op_outputs, attrs=attrs) + LOGGER.info(f"Created NMS plugin '{op}' with attributes: {attrs}") + + self.graph.outputs = op_outputs + + self.infer() + + def save(self, output_path): + """ + Save the ONNX model to the given location. + Args: + output_path: Path pointing to the location where to write + out the updated ONNX model. + """ + self.graph.cleanup().toposort() + model = gs.export_onnx(self.graph) + onnx.save(model, output_path) + LOGGER.info(f"Saved ONNX model to {output_path}") diff --git a/src/train_utils/train_models/models/yolov7/utils/autoanchor.py b/src/train_utils/train_models/models/yolov7/utils/autoanchor.py new file mode 100644 index 0000000..f491032 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/autoanchor.py @@ -0,0 +1,160 @@ +# Auto-anchor utils + +import numpy as np +import torch +import yaml +from scipy.cluster.vq import kmeans +from tqdm import tqdm + +from utils.general import colorstr + + +def check_anchor_order(m): + # Check anchor order against stride order for YOLO Detect() module m, and correct if necessary + a = m.anchor_grid.prod(-1).view(-1) # anchor area + da = a[-1] - a[0] # delta a + ds = m.stride[-1] - m.stride[0] # delta s + if da.sign() != ds.sign(): # same order + print('Reversing anchor order') + m.anchors[:] = m.anchors.flip(0) + m.anchor_grid[:] = m.anchor_grid.flip(0) + + +def check_anchors(dataset, model, thr=4.0, imgsz=640): + # Check anchor fit to data, recompute if necessary + prefix = colorstr('autoanchor: ') + print(f'\n{prefix}Analyzing anchors... ', end='') + m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect() + shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) + scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale + wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh + + def metric(k): # compute metric + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + best = x.max(1)[0] # best_x + aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold + bpr = (best > 1. / thr).float().mean() # best possible recall + return bpr, aat + + anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors + bpr, aat = metric(anchors) + print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') + if bpr < 0.98: # threshold to recompute + print('. Attempting to improve anchors, please wait...') + na = m.anchor_grid.numel() // 2 # number of anchors + try: + anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) + except Exception as e: + print(f'{prefix}ERROR: {e}') + new_bpr = metric(anchors)[0] + if new_bpr > bpr: # replace anchors + anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors) + m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference + check_anchor_order(m) + m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss + print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') + else: + print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') + print('') # newline + + +def kmean_anchors(path='./data/coco.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): + """ Creates kmeans-evolved anchors from training dataset + + Arguments: + path: path to dataset *.yaml, or a loaded dataset + n: number of anchors + img_size: image size used for training + thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 + gen: generations to evolve anchors using genetic algorithm + verbose: print all results + + Return: + k: kmeans evolved anchors + + Usage: + from utils.autoanchor import *; _ = kmean_anchors() + """ + thr = 1. / thr + prefix = colorstr('autoanchor: ') + + def metric(k, wh): # compute metrics + r = wh[:, None] / k[None] + x = torch.min(r, 1. / r).min(2)[0] # ratio metric + # x = wh_iou(wh, torch.tensor(k)) # iou metric + return x, x.max(1)[0] # x, best_x + + def anchor_fitness(k): # mutation fitness + _, best = metric(torch.tensor(k, dtype=torch.float32), wh) + return (best * (best > thr).float()).mean() # fitness + + def print_results(k): + k = k[np.argsort(k.prod(1))] # sort small to large + x, best = metric(k, wh0) + bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n # best possible recall, anch > thr + print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') + print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' + f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') + for i, x in enumerate(k): + print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg + return k + + if isinstance(path, str): # *.yaml file + with open(path) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # model dict + from utils.datasets import LoadImagesAndLabels + dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) + else: + dataset = path # dataset + + # Get label wh + shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) + wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)]) # wh + + # Filter + i = (wh0 < 3.0).any(1).sum() + if i: + print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') + wh = wh0[(wh0 >= 2.0).any(1)] # filter > 2 pixels + # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1) # multiply by random scale 0-1 + + # Kmeans calculation + print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') + s = wh.std(0) # sigmas for whitening + k, dist = kmeans(wh / s, n, iter=30) # points, mean distance + assert len(k) == n, print(f'{prefix}ERROR: scipy.cluster.vq.kmeans requested {n} points but returned only {len(k)}') + k *= s + wh = torch.tensor(wh, dtype=torch.float32) # filtered + wh0 = torch.tensor(wh0, dtype=torch.float32) # unfiltered + k = print_results(k) + + # Plot + # k, d = [None] * 20, [None] * 20 + # for i in tqdm(range(1, 21)): + # k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance + # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) + # ax = ax.ravel() + # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') + # fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh + # ax[0].hist(wh[wh[:, 0]<100, 0],400) + # ax[1].hist(wh[wh[:, 1]<100, 1],400) + # fig.savefig('wh.png', dpi=200) + + # Evolve + npr = np.random + f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma + pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:') # progress bar + for _ in pbar: + v = np.ones(sh) + while (v == 1).all(): # mutate until a change occurs (prevent duplicates) + v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) + kg = (k.copy() * v).clip(min=2.0) + fg = anchor_fitness(kg) + if fg > f: + f, k = fg, kg.copy() + pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' + if verbose: + print_results(k) + + return print_results(k) diff --git a/src/train_utils/train_models/models/yolov7/utils/aws/__init__.py b/src/train_utils/train_models/models/yolov7/utils/aws/__init__.py new file mode 100644 index 0000000..e9691f2 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/aws/__init__.py @@ -0,0 +1 @@ +#init \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/utils/aws/mime.sh b/src/train_utils/train_models/models/yolov7/utils/aws/mime.sh new file mode 100644 index 0000000..c319a83 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/aws/mime.sh @@ -0,0 +1,26 @@ +# AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ +# This script will run on every instance restart, not only on first start +# --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- + +Content-Type: multipart/mixed; boundary="//" +MIME-Version: 1.0 + +--// +Content-Type: text/cloud-config; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="cloud-config.txt" + +#cloud-config +cloud_final_modules: +- [scripts-user, always] + +--// +Content-Type: text/x-shellscript; charset="us-ascii" +MIME-Version: 1.0 +Content-Transfer-Encoding: 7bit +Content-Disposition: attachment; filename="userdata.txt" + +#!/bin/bash +# --- paste contents of userdata.sh here --- +--// diff --git a/src/train_utils/train_models/models/yolov7/utils/aws/resume.py b/src/train_utils/train_models/models/yolov7/utils/aws/resume.py new file mode 100644 index 0000000..338685b --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/aws/resume.py @@ -0,0 +1,37 @@ +# Resume all interrupted trainings in yolor/ dir including DDP trainings +# Usage: $ python utils/aws/resume.py + +import os +import sys +from pathlib import Path + +import torch +import yaml + +sys.path.append('./') # to run '$ python *.py' files in subdirectories + +port = 0 # --master_port +path = Path('').resolve() +for last in path.rglob('*/**/last.pt'): + ckpt = torch.load(last) + if ckpt['optimizer'] is None: + continue + + # Load opt.yaml + with open(last.parent.parent / 'opt.yaml') as f: + opt = yaml.load(f, Loader=yaml.SafeLoader) + + # Get device count + d = opt['device'].split(',') # devices + nd = len(d) # number of devices + ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1) # distributed data parallel + + if ddp: # multi-GPU + port += 1 + cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' + else: # single-GPU + cmd = f'python train.py --resume {last}' + + cmd += ' > /dev/null 2>&1 &' # redirect output to dev/null and run in daemon thread + print(cmd) + os.system(cmd) diff --git a/src/train_utils/train_models/models/yolov7/utils/aws/userdata.sh b/src/train_utils/train_models/models/yolov7/utils/aws/userdata.sh new file mode 100644 index 0000000..5a99d4b --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/aws/userdata.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html +# This script will run only once on first instance start (for a re-start script see mime.sh) +# /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir +# Use >300 GB SSD + +cd home/ubuntu +if [ ! -d yolor ]; then + echo "Running first-time script." # install dependencies, download COCO, pull Docker + git clone -b main https://github.com/WongKinYiu/yolov7 && sudo chmod -R 777 yolov7 + cd yolov7 + bash data/scripts/get_coco.sh && echo "Data done." & + sudo docker pull nvcr.io/nvidia/pytorch:21.08-py3 && echo "Docker done." & + python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & + wait && echo "All tasks done." # finish background tasks +else + echo "Running re-start script." # resume interrupted runs + i=0 + list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' + while IFS= read -r id; do + ((i++)) + echo "restarting container $i: $id" + sudo docker start $id + # sudo docker exec -it $id python train.py --resume # single-GPU + sudo docker exec -d $id python utils/aws/resume.py # multi-scenario + done <<<"$list" +fi diff --git a/src/train_utils/train_models/models/yolov7/utils/datasets.py b/src/train_utils/train_models/models/yolov7/utils/datasets.py new file mode 100644 index 0000000..5fe4f7b --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/datasets.py @@ -0,0 +1,1320 @@ +# Dataset utils and dataloaders + +import glob +import logging +import math +import os +import random +import shutil +import time +from itertools import repeat +from multiprocessing.pool import ThreadPool +from pathlib import Path +from threading import Thread + +import cv2 +import numpy as np +import torch +import torch.nn.functional as F +from PIL import Image, ExifTags +from torch.utils.data import Dataset +from tqdm import tqdm + +import pickle +from copy import deepcopy +#from pycocotools import mask as maskUtils +from torchvision.utils import save_image +from torchvision.ops import roi_pool, roi_align, ps_roi_pool, ps_roi_align + +from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \ + resample_segments, clean_str +from utils.torch_utils import torch_distributed_zero_first + +# Parameters +help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data' +img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes +vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes +logger = logging.getLogger(__name__) + +# Get orientation exif tag +for orientation in ExifTags.TAGS.keys(): + if ExifTags.TAGS[orientation] == 'Orientation': + break + + +def get_hash(files): + # Returns a single hash value of a list of files + return sum(os.path.getsize(f) for f in files if os.path.isfile(f)) + + +def exif_size(img): + # Returns exif-corrected PIL size + s = img.size # (width, height) + try: + rotation = dict(img._getexif().items())[orientation] + if rotation == 6: # rotation 270 + s = (s[1], s[0]) + elif rotation == 8: # rotation 90 + s = (s[1], s[0]) + except: + pass + + return s + + +def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False, + rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''): + # Make sure only the first process in DDP process the dataset first, and the following others can use the cache + with torch_distributed_zero_first(rank): + dataset = LoadImagesAndLabels(path, imgsz, batch_size, + augment=augment, # augment images + hyp=hyp, # augmentation hyperparameters + rect=rect, # rectangular training + cache_images=cache, + single_cls=opt.single_cls, + stride=int(stride), + pad=pad, + image_weights=image_weights, + prefix=prefix) + + batch_size = min(batch_size, len(dataset)) + nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers + sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None + loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader + # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader() + dataloader = loader(dataset, + batch_size=batch_size, + num_workers=nw, + sampler=sampler, + pin_memory=True, + collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn) + return dataloader, dataset + + +class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader): + """ Dataloader that reuses workers + + Uses same syntax as vanilla DataLoader + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) + self.iterator = super().__iter__() + + def __len__(self): + return len(self.batch_sampler.sampler) + + def __iter__(self): + for i in range(len(self)): + yield next(self.iterator) + + +class _RepeatSampler(object): + """ Sampler that repeats forever + + Args: + sampler (Sampler) + """ + + def __init__(self, sampler): + self.sampler = sampler + + def __iter__(self): + while True: + yield from iter(self.sampler) + + +class LoadImages: # for inference + def __init__(self, path, img_size=640, stride=32): + p = str(Path(path).absolute()) # os-agnostic absolute path + if '*' in p: + files = sorted(glob.glob(p, recursive=True)) # glob + elif os.path.isdir(p): + files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir + elif os.path.isfile(p): + files = [p] # files + else: + raise Exception(f'ERROR: {p} does not exist') + + images = [x for x in files if x.split('.')[-1].lower() in img_formats] + videos = [x for x in files if x.split('.')[-1].lower() in vid_formats] + ni, nv = len(images), len(videos) + + self.img_size = img_size + self.stride = stride + self.files = images + videos + self.nf = ni + nv # number of files + self.video_flag = [False] * ni + [True] * nv + self.mode = 'image' + if any(videos): + self.new_video(videos[0]) # new video + else: + self.cap = None + assert self.nf > 0, f'No images or videos found in {p}. ' \ + f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}' + + def __iter__(self): + self.count = 0 + return self + + def __next__(self): + if self.count == self.nf: + raise StopIteration + path = self.files[self.count] + + if self.video_flag[self.count]: + # Read video + self.mode = 'video' + ret_val, img0 = self.cap.read() + if not ret_val: + self.count += 1 + self.cap.release() + if self.count == self.nf: # last video + raise StopIteration + else: + path = self.files[self.count] + self.new_video(path) + ret_val, img0 = self.cap.read() + + self.frame += 1 + print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='') + + else: + # Read image + self.count += 1 + img0 = cv2.imread(path) # BGR + assert img0 is not None, 'Image Not Found ' + path + #print(f'image {self.count}/{self.nf} {path}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return path, img, img0, self.cap + + def new_video(self, path): + self.frame = 0 + self.cap = cv2.VideoCapture(path) + self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + def __len__(self): + return self.nf # number of files + + +class LoadWebcam: # for inference + def __init__(self, pipe='0', img_size=640, stride=32): + self.img_size = img_size + self.stride = stride + + if pipe.isnumeric(): + pipe = eval(pipe) # local camera + # pipe = 'rtsp://192.168.1.64/1' # IP camera + # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login + # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera + + self.pipe = pipe + self.cap = cv2.VideoCapture(pipe) # video capture object + self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + if cv2.waitKey(1) == ord('q'): # q to quit + self.cap.release() + cv2.destroyAllWindows() + raise StopIteration + + # Read frame + if self.pipe == 0: # local camera + ret_val, img0 = self.cap.read() + img0 = cv2.flip(img0, 1) # flip left-right + else: # IP camera + n = 0 + while True: + n += 1 + self.cap.grab() + if n % 30 == 0: # skip frames + ret_val, img0 = self.cap.retrieve() + if ret_val: + break + + # Print + assert ret_val, f'Camera Error {self.pipe}' + img_path = 'webcam.jpg' + print(f'webcam {self.count}: ', end='') + + # Padded resize + img = letterbox(img0, self.img_size, stride=self.stride)[0] + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return img_path, img, img0, None + + def __len__(self): + return 0 + + +class LoadStreams: # multiple IP or RTSP cameras + def __init__(self, sources='streams.txt', img_size=640, stride=32): + self.mode = 'stream' + self.img_size = img_size + self.stride = stride + + if os.path.isfile(sources): + with open(sources, 'r') as f: + sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())] + else: + sources = [sources] + + n = len(sources) + self.imgs = [None] * n + self.sources = [clean_str(x) for x in sources] # clean source names for later + for i, s in enumerate(sources): + # Start the thread to read frames from the video stream + print(f'{i + 1}/{n}: {s}... ', end='') + url = eval(s) if s.isnumeric() else s + if 'youtube.com/' in str(url) or 'youtu.be/' in str(url): # if source is YouTube video + check_requirements(('pafy', 'youtube_dl')) + import pafy + url = pafy.new(url).getbest(preftype="mp4").url + cap = cv2.VideoCapture(url) + assert cap.isOpened(), f'Failed to open {s}' + w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + self.fps = cap.get(cv2.CAP_PROP_FPS) % 100 + + _, self.imgs[i] = cap.read() # guarantee first frame + thread = Thread(target=self.update, args=([i, cap]), daemon=True) + print(f' success ({w}x{h} at {self.fps:.2f} FPS).') + thread.start() + print('') # newline + + # check for common shapes + s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes + self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal + if not self.rect: + print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.') + + def update(self, index, cap): + # Read next stream frame in a daemon thread + n = 0 + while cap.isOpened(): + n += 1 + # _, self.imgs[index] = cap.read() + cap.grab() + if n == 4: # read every 4th frame + success, im = cap.retrieve() + self.imgs[index] = im if success else self.imgs[index] * 0 + n = 0 + time.sleep(1 / self.fps) # wait time + + def __iter__(self): + self.count = -1 + return self + + def __next__(self): + self.count += 1 + img0 = self.imgs.copy() + if cv2.waitKey(1) == ord('q'): # q to quit + cv2.destroyAllWindows() + raise StopIteration + + # Letterbox + img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0] + + # Stack + img = np.stack(img, 0) + + # Convert + img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416 + img = np.ascontiguousarray(img) + + return self.sources, img, img0, None + + def __len__(self): + return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years + + +def img2label_paths(img_paths): + # Define label paths as a function of image paths + sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings + return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths] + + +class LoadImagesAndLabels(Dataset): # for training/testing + def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False, + cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''): + self.img_size = img_size + self.augment = augment + self.hyp = hyp + self.image_weights = image_weights + self.rect = False if image_weights else rect + self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training) + self.mosaic_border = [-img_size // 2, -img_size // 2] + self.stride = stride + self.path = path + #self.albumentations = Albumentations() if augment else None + + try: + f = [] # image files + for p in path if isinstance(path, list) else [path]: + p = Path(p) # os-agnostic + if p.is_dir(): # dir + f += glob.glob(str(p / '**' / '*.*'), recursive=True) + # f = list(p.rglob('**/*.*')) # pathlib + elif p.is_file(): # file + with open(p, 'r') as t: + t = t.read().strip().splitlines() + parent = str(p.parent) + os.sep + f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path + # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib) + else: + raise Exception(f'{prefix}{p} does not exist') + self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats]) + # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib + assert self.img_files, f'{prefix}No images found' + except Exception as e: + raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}') + + # Check cache + self.label_files = img2label_paths(self.img_files) # labels + cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels + if cache_path.is_file(): + cache, exists = torch.load(cache_path), True # load + #if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed + # cache, exists = self.cache_labels(cache_path, prefix), False # re-cache + else: + cache, exists = self.cache_labels(cache_path, prefix), False # cache + + # Display cache + nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total + if exists: + d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted" + tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results + assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}' + + # Read cache + cache.pop('hash') # remove hash + cache.pop('version') # remove version + labels, shapes, self.segments = zip(*cache.values()) + self.labels = list(labels) + self.shapes = np.array(shapes, dtype=np.float64) + self.img_files = list(cache.keys()) # update + self.label_files = img2label_paths(cache.keys()) # update + if single_cls: + for x in self.labels: + x[:, 0] = 0 + + n = len(shapes) # number of images + bi = np.floor(np.arange(n) / batch_size).astype(int) # batch index + nb = bi[-1] + 1 # number of batches + self.batch = bi # batch index of image + self.n = n + self.indices = range(n) + + # Rectangular Training + if self.rect: + # Sort by aspect ratio + s = self.shapes # wh + ar = s[:, 1] / s[:, 0] # aspect ratio + irect = ar.argsort() + self.img_files = [self.img_files[i] for i in irect] + self.label_files = [self.label_files[i] for i in irect] + self.labels = [self.labels[i] for i in irect] + self.shapes = s[irect] # wh + ar = ar[irect] + + # Set training image shapes + shapes = [[1, 1]] * nb + for i in range(nb): + ari = ar[bi == i] + mini, maxi = ari.min(), ari.max() + if maxi < 1: + shapes[i] = [maxi, 1] + elif mini > 1: + shapes[i] = [1, 1 / mini] + + self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(int) * stride + + # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM) + self.imgs = [None] * n + if cache_images: + if cache_images == 'disk': + self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy') + self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files] + self.im_cache_dir.mkdir(parents=True, exist_ok=True) + gb = 0 # Gigabytes of cached images + self.img_hw0, self.img_hw = [None] * n, [None] * n + results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n))) + pbar = tqdm(enumerate(results), total=n) + for i, x in pbar: + if cache_images == 'disk': + if not self.img_npy[i].exists(): + np.save(self.img_npy[i].as_posix(), x[0]) + gb += self.img_npy[i].stat().st_size + else: + self.imgs[i], self.img_hw0[i], self.img_hw[i] = x + gb += self.imgs[i].nbytes + pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)' + pbar.close() + + def cache_labels(self, path=Path('./labels.cache'), prefix=''): + # Cache dataset labels, check images and read shapes + x = {} # dict + nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate + pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files)) + for i, (im_file, lb_file) in enumerate(pbar): + try: + # verify images + im = Image.open(im_file) + im.verify() # PIL verify + shape = exif_size(im) # image size + segments = [] # instance segments + assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels' + assert im.format.lower() in img_formats, f'invalid image format {im.format}' + + # verify labels + if os.path.isfile(lb_file): + nf += 1 # label found + with open(lb_file, 'r') as f: + l = [x.split() for x in f.read().strip().splitlines()] + if any([len(x) > 8 for x in l]): # is segment + classes = np.array([x[0] for x in l], dtype=np.float32) + segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...) + l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh) + l = np.array(l, dtype=np.float32) + if len(l): + assert l.shape[1] == 5, 'labels require 5 columns each' + assert (l >= 0).all(), 'negative labels' + assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels' + assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels' + else: + ne += 1 # label empty + l = np.zeros((0, 5), dtype=np.float32) + else: + nm += 1 # label missing + l = np.zeros((0, 5), dtype=np.float32) + x[im_file] = [l, shape, segments] + except Exception as e: + nc += 1 + print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}') + + pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \ + f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted" + pbar.close() + + if nf == 0: + print(f'{prefix}WARNING: No labels found in {path}. See {help_url}') + + x['hash'] = get_hash(self.label_files + self.img_files) + x['results'] = nf, nm, ne, nc, i + 1 + x['version'] = 0.1 # cache version + torch.save(x, path) # save for next time + logging.info(f'{prefix}New cache created: {path}') + return x + + def __len__(self): + return len(self.img_files) + + # def __iter__(self): + # self.count = -1 + # print('ran dataset iter') + # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF) + # return self + + def __getitem__(self, index): + index = self.indices[index] # linear, shuffled, or image_weights + + hyp = self.hyp + mosaic = self.mosaic and random.random() < hyp['mosaic'] + if mosaic: + # Load mosaic + if random.random() < 0.8: + img, labels = load_mosaic(self, index) + else: + img, labels = load_mosaic9(self, index) + shapes = None + + # MixUp https://arxiv.org/pdf/1710.09412.pdf + if random.random() < hyp['mixup']: + if random.random() < 0.8: + img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1)) + else: + img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1)) + r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0 + img = (img * r + img2 * (1 - r)).astype(np.uint8) + labels = np.concatenate((labels, labels2), 0) + + else: + # Load image + img, (h0, w0), (h, w) = load_image(self, index) + + # Letterbox + shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape + img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) + shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling + + labels = self.labels[index].copy() + if labels.size: # normalized xywh to pixel xyxy format + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1]) + + if self.augment: + # Augment imagespace + if not mosaic: + img, labels = random_perspective(img, labels, + degrees=hyp['degrees'], + translate=hyp['translate'], + scale=hyp['scale'], + shear=hyp['shear'], + perspective=hyp['perspective']) + + + #img, labels = self.albumentations(img, labels) + + # Augment colorspace + augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v']) + + # Apply cutouts + # if random.random() < 0.9: + # labels = cutout(img, labels) + + if random.random() < hyp['paste_in']: + sample_labels, sample_images, sample_masks = [], [], [] + while len(sample_labels) < 30: + sample_labels_, sample_images_, sample_masks_ = load_samples(self, random.randint(0, len(self.labels) - 1)) + sample_labels += sample_labels_ + sample_images += sample_images_ + sample_masks += sample_masks_ + #print(len(sample_labels)) + if len(sample_labels) == 0: + break + labels = pastein(img, labels, sample_labels, sample_images, sample_masks) + + nL = len(labels) # number of labels + if nL: + labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh + labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1 + labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1 + + if self.augment: + # flip up-down + if random.random() < hyp['flipud']: + img = np.flipud(img) + if nL: + labels[:, 2] = 1 - labels[:, 2] + + # flip left-right + if random.random() < hyp['fliplr']: + img = np.fliplr(img) + if nL: + labels[:, 1] = 1 - labels[:, 1] + + labels_out = torch.zeros((nL, 6)) + if nL: + labels_out[:, 1:] = torch.from_numpy(labels) + + # Convert + img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + img = np.ascontiguousarray(img) + + return torch.from_numpy(img), labels_out, self.img_files[index], shapes + + @staticmethod + def collate_fn(batch): + img, label, path, shapes = zip(*batch) # transposed + for i, l in enumerate(label): + l[:, 0] = i # add target image index for build_targets() + return torch.stack(img, 0), torch.cat(label, 0), path, shapes + + @staticmethod + def collate_fn4(batch): + img, label, path, shapes = zip(*batch) # transposed + n = len(shapes) // 4 + img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n] + + ho = torch.tensor([[0., 0, 0, 1, 0, 0]]) + wo = torch.tensor([[0., 0, 1, 0, 0, 0]]) + s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale + for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW + i *= 4 + if random.random() < 0.5: + im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[ + 0].type(img[i].type()) + l = label[i] + else: + im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2) + l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s + img4.append(im) + label4.append(l) + + for i, l in enumerate(label4): + l[:, 0] = i # add target image index for build_targets() + + return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4 + + +# Ancillary functions -------------------------------------------------------------------------------------------------- +def load_image(self, index): + # loads 1 image from dataset, returns img, original hw, resized hw + img = self.imgs[index] + if img is None: # not cached + path = self.img_files[index] + img = cv2.imread(path) # BGR + assert img is not None, 'Image Not Found ' + path + h0, w0 = img.shape[:2] # orig hw + r = self.img_size / max(h0, w0) # resize image to img_size + if r != 1: # always resize down, only resize up if training with augmentation + interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR + img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp) + return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized + else: + return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized + + +def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5): + r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains + hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV)) + dtype = img.dtype # uint8 + + x = np.arange(0, 256, dtype=np.int16) + lut_hue = ((x * r[0]) % 180).astype(dtype) + lut_sat = np.clip(x * r[1], 0, 255).astype(dtype) + lut_val = np.clip(x * r[2], 0, 255).astype(dtype) + + img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype) + cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed + + +def hist_equalize(img, clahe=True, bgr=False): + # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255 + yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV) + if clahe: + c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + yuv[:, :, 0] = c.apply(yuv[:, :, 0]) + else: + yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram + return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB + + +def load_mosaic(self, index): + # loads images in a 4-mosaic + + labels4, segments4 = [], [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + #img4, labels4, segments4 = remove_background(img4, labels4, segments4) + #sample_segments(img4, labels4, segments4, probability=self.hyp['copy_paste']) + img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste']) + img4, labels4 = random_perspective(img4, labels4, segments4, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img4, labels4 + + +def load_mosaic9(self, index): + # loads images in a 9-mosaic + + labels9, segments9 = [], [] + s = self.img_size + indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img9 + if i == 0: # center + img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + h0, w0 = h, w + c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates + elif i == 1: # top + c = s, s - h, s + w, s + elif i == 2: # top right + c = s + wp, s - h, s + wp + w, s + elif i == 3: # right + c = s + w0, s, s + w0 + w, s + h + elif i == 4: # bottom right + c = s + w0, s + hp, s + w0 + w, s + hp + h + elif i == 5: # bottom + c = s + w0 - w, s + h0, s + w0, s + h0 + h + elif i == 6: # bottom left + c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h + elif i == 7: # left + c = s - w, s + h0 - h, s, s + h0 + elif i == 8: # top left + c = s - w, s + h0 - hp - h, s, s + h0 - hp + + padx, pady = c[:2] + x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padx, pady) for x in segments] + labels9.append(labels) + segments9.extend(segments) + + # Image + img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax] + hp, wp = h, w # height, width previous + + # Offset + yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y + img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s] + + # Concat/clip labels + labels9 = np.concatenate(labels9, 0) + labels9[:, [1, 3]] -= xc + labels9[:, [2, 4]] -= yc + c = np.array([xc, yc]) # centers + segments9 = [x - c for x in segments9] + + for x in (labels9[:, 1:], *segments9): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img9, labels9 = replicate(img9, labels9) # replicate + + # Augment + #img9, labels9, segments9 = remove_background(img9, labels9, segments9) + img9, labels9, segments9 = copy_paste(img9, labels9, segments9, probability=self.hyp['copy_paste']) + img9, labels9 = random_perspective(img9, labels9, segments9, + degrees=self.hyp['degrees'], + translate=self.hyp['translate'], + scale=self.hyp['scale'], + shear=self.hyp['shear'], + perspective=self.hyp['perspective'], + border=self.mosaic_border) # border to remove + + return img9, labels9 + + +def load_samples(self, index): + # loads images in a 4-mosaic + + labels4, segments4 = [], [] + s = self.img_size + yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y + indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices + for i, index in enumerate(indices): + # Load image + img, _, (h, w) = load_image(self, index) + + # place img in img4 + if i == 0: # top left + img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles + x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image) + x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image) + elif i == 1: # top right + x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc + x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h + elif i == 2: # bottom left + x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h) + x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h) + elif i == 3: # bottom right + x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h) + x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h) + + img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + padw = x1a - x1b + padh = y1a - y1b + + # Labels + labels, segments = self.labels[index].copy(), self.segments[index].copy() + if labels.size: + labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format + segments = [xyn2xy(x, w, h, padw, padh) for x in segments] + labels4.append(labels) + segments4.extend(segments) + + # Concat/clip labels + labels4 = np.concatenate(labels4, 0) + for x in (labels4[:, 1:], *segments4): + np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective() + # img4, labels4 = replicate(img4, labels4) # replicate + + # Augment + #img4, labels4, segments4 = remove_background(img4, labels4, segments4) + sample_labels, sample_images, sample_masks = sample_segments(img4, labels4, segments4, probability=0.5) + + return sample_labels, sample_images, sample_masks + + +def copy_paste(img, labels, segments, probability=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + if probability and n: + h, w, c = img.shape # height, width, channels + im_new = np.zeros(img.shape, np.uint8) + for j in random.sample(range(n), k=round(probability * n)): + l, s = labels[j], segments[j] + box = w - l[3], l[2], w - l[1], l[4] + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + if (ioa < 0.30).all(): # allow 30% obscuration of existing labels + labels = np.concatenate((labels, [[l[0], *box]]), 0) + segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1)) + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=img, src2=im_new) + result = cv2.flip(result, 1) # augment segments (flip left-right) + i = result > 0 # pixels to replace + # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch + img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + + return img, labels, segments + + +def remove_background(img, labels, segments): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + h, w, c = img.shape # height, width, channels + im_new = np.zeros(img.shape, np.uint8) + img_new = np.ones(img.shape, np.uint8) * 114 + for j in range(n): + cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + + result = cv2.bitwise_and(src1=img, src2=im_new) + + i = result > 0 # pixels to replace + img_new[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + + return img_new, labels, segments + + +def sample_segments(img, labels, segments, probability=0.5): + # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy) + n = len(segments) + sample_labels = [] + sample_images = [] + sample_masks = [] + if probability and n: + h, w, c = img.shape # height, width, channels + for j in random.sample(range(n), k=round(probability * n)): + l, s = labels[j], segments[j] + box = l[1].astype(int).clip(0,w-1), l[2].astype(int).clip(0,h-1), l[3].astype(int).clip(0,w-1), l[4].astype(int).clip(0,h-1) + + #print(box) + if (box[2] <= box[0]) or (box[3] <= box[1]): + continue + + sample_labels.append(l[0]) + + mask = np.zeros(img.shape, np.uint8) + + cv2.drawContours(mask, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED) + sample_masks.append(mask[box[1]:box[3],box[0]:box[2],:]) + + result = cv2.bitwise_and(src1=img, src2=mask) + i = result > 0 # pixels to replace + mask[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug + #print(box) + sample_images.append(mask[box[1]:box[3],box[0]:box[2],:]) + + return sample_labels, sample_images, sample_masks + + +def replicate(img, labels): + # Replicate labels + h, w = img.shape[:2] + boxes = labels[:, 1:].astype(int) + x1, y1, x2, y2 = boxes.T + s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels) + for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices + x1b, y1b, x2b, y2b = boxes[i] + bh, bw = y2b - y1b, x2b - x1b + yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y + x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh] + img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax] + labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0) + + return img, labels + + +def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): + # Resize and pad image while meeting stride-multiple constraints + shape = img.shape[:2] # current shape [height, width] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + if not scaleup: # only scale down, do not scale up (for better test mAP) + r = min(r, 1.0) + + # Compute padding + ratio = r, r # width, height ratios + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding + if auto: # minimum rectangle + dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding + elif scaleFill: # stretch + dw, dh = 0.0, 0.0 + new_unpad = (new_shape[1], new_shape[0]) + ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios + + dw /= 2 # divide padding into 2 sides + dh /= 2 + + if shape[::-1] != new_unpad: # resize + img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border + return img, ratio, (dw, dh) + + +def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, + border=(0, 0)): + # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10)) + # targets = [cls, xyxy] + + height = img.shape[0] + border[0] * 2 # shape(h,w,c) + width = img.shape[1] + border[1] * 2 + + # Center + C = np.eye(3) + C[0, 2] = -img.shape[1] / 2 # x translation (pixels) + C[1, 2] = -img.shape[0] / 2 # y translation (pixels) + + # Perspective + P = np.eye(3) + P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y) + P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x) + + # Rotation and Scale + R = np.eye(3) + a = random.uniform(-degrees, degrees) + # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations + s = random.uniform(1 - scale, 1.1 + scale) + # s = 2 ** random.uniform(-scale, scale) + R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s) + + # Shear + S = np.eye(3) + S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg) + S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg) + + # Translation + T = np.eye(3) + T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels) + T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels) + + # Combined rotation matrix + M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT + if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed + if perspective: + img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114)) + else: # affine + img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)) + + # Visualize + # import matplotlib.pyplot as plt + # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel() + # ax[0].imshow(img[:, :, ::-1]) # base + # ax[1].imshow(img2[:, :, ::-1]) # warped + + # Transform label coordinates + n = len(targets) + if n: + use_segments = any(x.any() for x in segments) + new = np.zeros((n, 4)) + if use_segments: # warp segments + segments = resample_segments(segments) # upsample + for i, segment in enumerate(segments): + xy = np.ones((len(segment), 3)) + xy[:, :2] = segment + xy = xy @ M.T # transform + xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine + + # clip + new[i] = segment2box(xy, width, height) + + else: # warp boxes + xy = np.ones((n * 4, 3)) + xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1 + xy = xy @ M.T # transform + xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine + + # create new boxes + x = xy[:, [0, 2, 4, 6]] + y = xy[:, [1, 3, 5, 7]] + new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T + + # clip + new[:, [0, 2]] = new[:, [0, 2]].clip(0, width) + new[:, [1, 3]] = new[:, [1, 3]].clip(0, height) + + # filter candidates + i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10) + targets = targets[i] + targets[:, 1:5] = new[i] + + return img, targets + + +def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n) + # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio + w1, h1 = box1[2] - box1[0], box1[3] - box1[1] + w2, h2 = box2[2] - box2[0], box2[3] - box2[1] + ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio + return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates + + +def bbox_ioa(box1, box2): + # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2 + box2 = box2.transpose() + + # Get the coordinates of bounding boxes + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + + # Intersection area + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ + (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + + # box2 area + box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16 + + # Intersection over box2 area + return inter_area / box2_area + + +def cutout(image, labels): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + # create random masks + scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction + for s in scales: + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + # apply random color mask + image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)] + + # return unobscured labels + if len(labels) and s > 0.03: + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + labels = labels[ioa < 0.60] # remove >60% obscured labels + + return labels + + +def pastein(image, labels, sample_labels, sample_images, sample_masks): + # Applies image cutout augmentation https://arxiv.org/abs/1708.04552 + h, w = image.shape[:2] + + # create random masks + scales = [0.75] * 2 + [0.5] * 4 + [0.25] * 4 + [0.125] * 4 + [0.0625] * 6 # image size fraction + for s in scales: + if random.random() < 0.2: + continue + mask_h = random.randint(1, int(h * s)) + mask_w = random.randint(1, int(w * s)) + + # box + xmin = max(0, random.randint(0, w) - mask_w // 2) + ymin = max(0, random.randint(0, h) - mask_h // 2) + xmax = min(w, xmin + mask_w) + ymax = min(h, ymin + mask_h) + + box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32) + if len(labels): + ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area + else: + ioa = np.zeros(1) + + if (ioa < 0.30).all() and len(sample_labels) and (xmax > xmin+20) and (ymax > ymin+20): # allow 30% obscuration of existing labels + sel_ind = random.randint(0, len(sample_labels)-1) + #print(len(sample_labels)) + #print(sel_ind) + #print((xmax-xmin, ymax-ymin)) + #print(image[ymin:ymax, xmin:xmax].shape) + #print([[sample_labels[sel_ind], *box]]) + #print(labels.shape) + hs, ws, cs = sample_images[sel_ind].shape + r_scale = min((ymax-ymin)/hs, (xmax-xmin)/ws) + r_w = int(ws*r_scale) + r_h = int(hs*r_scale) + + if (r_w > 10) and (r_h > 10): + r_mask = cv2.resize(sample_masks[sel_ind], (r_w, r_h)) + r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h)) + temp_crop = image[ymin:ymin+r_h, xmin:xmin+r_w] + m_ind = r_mask > 0 + if m_ind.astype(np.int32).sum() > 60: + temp_crop[m_ind] = r_image[m_ind] + #print(sample_labels[sel_ind]) + #print(sample_images[sel_ind].shape) + #print(temp_crop.shape) + box = np.array([xmin, ymin, xmin+r_w, ymin+r_h], dtype=np.float32) + if len(labels): + labels = np.concatenate((labels, [[sample_labels[sel_ind], *box]]), 0) + else: + labels = np.array([[sample_labels[sel_ind], *box]]) + + image[ymin:ymin+r_h, xmin:xmin+r_w] = temp_crop + + return labels + +class Albumentations: + # YOLOv5 Albumentations class (optional, only used if package is installed) + def __init__(self): + self.transform = None + import albumentations as A + + self.transform = A.Compose([ + A.CLAHE(p=0.01), + A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.01), + A.RandomGamma(gamma_limit=[80, 120], p=0.01), + A.Blur(p=0.01), + A.MedianBlur(p=0.01), + A.ToGray(p=0.01), + A.ImageCompression(quality_lower=75, p=0.01),], + bbox_params=A.BboxParams(format='pascal_voc', label_fields=['class_labels'])) + + #logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p)) + + def __call__(self, im, labels, p=1.0): + if self.transform and random.random() < p: + new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed + im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])]) + return im, labels + + +def create_folder(path='./new'): + # Create folder + if os.path.exists(path): + shutil.rmtree(path) # delete output folder + os.makedirs(path) # make new output folder + + +def flatten_recursive(path='../coco'): + # Flatten a recursive directory by bringing all files to top level + new_path = Path(path + '_flat') + create_folder(new_path) + for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)): + shutil.copyfile(file, new_path / Path(file).name) + + +def extract_boxes(path='../coco/'): # from utils.datasets import *; extract_boxes('../coco128') + # Convert detection dataset into classification dataset, with one directory per class + + path = Path(path) # images dir + shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing + files = list(path.rglob('*.*')) + n = len(files) # number of files + for im_file in tqdm(files, total=n): + if im_file.suffix[1:] in img_formats: + # image + im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB + h, w = im.shape[:2] + + # labels + lb_file = Path(img2label_paths([str(im_file)])[0]) + if Path(lb_file).exists(): + with open(lb_file, 'r') as f: + lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels + + for j, x in enumerate(lb): + c = int(x[0]) # class + f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename + if not f.parent.is_dir(): + f.parent.mkdir(parents=True) + + b = x[1:] * [w, h, w, h] # box + # b[2:] = b[2:].max() # rectangle to square + b[2:] = b[2:] * 1.2 + 3 # pad + b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int) + + b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image + b[[1, 3]] = np.clip(b[[1, 3]], 0, h) + assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}' + + +def autosplit(path='../coco', weights=(0.9, 0.1, 0.0), annotated_only=False): + """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files + Usage: from utils.datasets import *; autosplit('../coco') + Arguments + path: Path to images directory + weights: Train, val, test weights (list) + annotated_only: Only use images with an annotated txt file + """ + path = Path(path) # images dir + files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only + n = len(files) # number of files + indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split + + txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files + [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing + + print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only) + for i, img in tqdm(zip(indices, files), total=n): + if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label + with open(path / txt[i], 'a') as f: + f.write(str(img) + '\n') # add image to txt file + + +def load_segmentations(self, index): + key = '/work/handsomejw66/coco17/' + self.img_files[index] + #print(key) + # /work/handsomejw66/coco17/ + return self.segs[key] diff --git a/src/train_utils/train_models/models/yolov7/utils/general.py b/src/train_utils/train_models/models/yolov7/utils/general.py new file mode 100644 index 0000000..decdcc6 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/general.py @@ -0,0 +1,892 @@ +# YOLOR general utils + +import glob +import logging +import math +import os +import platform +import random +import re +import subprocess +import time +from pathlib import Path + +import cv2 +import numpy as np +import pandas as pd +import torch +import torchvision +import yaml + +from utils.google_utils import gsutil_getsize +from utils.metrics import fitness +from utils.torch_utils import init_torch_seeds + +# Settings +torch.set_printoptions(linewidth=320, precision=5, profile='long') +np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5 +pd.options.display.max_columns = 10 +cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader) +os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads + + +def set_logging(rank=-1): + logging.basicConfig( + format="%(message)s", + level=logging.INFO if rank in [-1, 0] else logging.WARN) + + +def init_seeds(seed=0): + # Initialize random number generator (RNG) seeds + random.seed(seed) + np.random.seed(seed) + init_torch_seeds(seed) + + +def get_latest_run(search_dir='.'): + # Return path to most recent 'last.pt' in /runs (i.e. to --resume from) + last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True) + return max(last_list, key=os.path.getctime) if last_list else '' + + +def isdocker(): + # Is environment a Docker container + return Path('/workspace').exists() # or Path('/.dockerenv').exists() + + +def emojis(str=''): + # Return platform-dependent emoji-safe version of string + return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str + + +def check_online(): + # Check internet connectivity + import socket + try: + socket.create_connection(("1.1.1.1", 443), 5) # check host accesability + return True + except OSError: + return False + + +def check_git_status(): + # Recommend 'git pull' if code is out of date + print(colorstr('github: '), end='') + try: + assert Path('.git').exists(), 'skipping check (not a git repository)' + assert not isdocker(), 'skipping check (Docker image)' + assert check_online(), 'skipping check (offline)' + + cmd = 'git fetch && git config --get remote.origin.url' + url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url + branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out + n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind + if n > 0: + s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \ + f"Use 'git pull' to update or 'git clone {url}' to download latest." + else: + s = f'up to date with {url} ✅' + print(emojis(s)) # emoji-safe + except Exception as e: + print(e) + + +def check_requirements(requirements='requirements.txt', exclude=()): + # Check installed dependencies meet requirements (pass *.txt file or list of packages) + import pkg_resources as pkg + prefix = colorstr('red', 'bold', 'requirements:') + if isinstance(requirements, (str, Path)): # requirements.txt file + file = Path(requirements) + if not file.exists(): + print(f"{prefix} {file.resolve()} not found, check failed.") + return + requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude] + else: # list or tuple of packages + requirements = [x for x in requirements if x not in exclude] + + n = 0 # number of packages updates + for r in requirements: + try: + pkg.require(r) + except Exception as e: # DistributionNotFound or VersionConflict if requirements not met + n += 1 + print(f"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...") + print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode()) + + if n: # if packages updated + source = file.resolve() if 'file' in locals() else requirements + s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \ + f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n" + print(emojis(s)) # emoji-safe + + +def check_img_size(img_size, s=32): + # Verify img_size is a multiple of stride s + new_size = make_divisible(img_size, int(s)) # ceil gs-multiple + if new_size != img_size: + print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size)) + return new_size + + +def check_imshow(): + # Check if environment supports image displays + try: + assert not isdocker(), 'cv2.imshow() is disabled in Docker environments' + cv2.imshow('test', np.zeros((1, 1, 3))) + cv2.waitKey(1) + cv2.destroyAllWindows() + cv2.waitKey(1) + return True + except Exception as e: + print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}') + return False + + +def check_file(file): + # Search for file if not found + if Path(file).is_file() or file == '': + return file + else: + files = glob.glob('./**/' + file, recursive=True) # find file + assert len(files), f'File Not Found: {file}' # assert file was found + assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique + return files[0] # return file + + +def check_dataset(dict): + # Download dataset if not found locally + val, s = dict.get('val'), dict.get('download') + if val and len(val): + val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path + if not all(x.exists() for x in val): + print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()]) + if s and len(s): # download script + print('Downloading %s ...' % s) + if s.startswith('http') and s.endswith('.zip'): # URL + f = Path(s).name # filename + torch.hub.download_url_to_file(s, f) + r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip + else: # bash script + r = os.system(s) + print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value + else: + raise Exception('Dataset not found.') + + +def make_divisible(x, divisor): + # Returns x evenly divisible by divisor + return math.ceil(x / divisor) * divisor + + +def clean_str(s): + # Cleans a string by replacing special characters with underscore _ + return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s) + + +def one_cycle(y1=0.0, y2=1.0, steps=100): + # lambda function for sinusoidal ramp from y1 to y2 + return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1 + + +def colorstr(*input): + # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world') + *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string + colors = {'black': '\033[30m', # basic colors + 'red': '\033[31m', + 'green': '\033[32m', + 'yellow': '\033[33m', + 'blue': '\033[34m', + 'magenta': '\033[35m', + 'cyan': '\033[36m', + 'white': '\033[37m', + 'bright_black': '\033[90m', # bright colors + 'bright_red': '\033[91m', + 'bright_green': '\033[92m', + 'bright_yellow': '\033[93m', + 'bright_blue': '\033[94m', + 'bright_magenta': '\033[95m', + 'bright_cyan': '\033[96m', + 'bright_white': '\033[97m', + 'end': '\033[0m', # misc + 'bold': '\033[1m', + 'underline': '\033[4m'} + return ''.join(colors[x] for x in args) + f'{string}' + colors['end'] + + +def labels_to_class_weights(labels, nc=80): + # Get class weights (inverse frequency) from training labels + if labels[0] is None: # no labels loaded + return torch.Tensor() + + labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO + classes = labels[:, 0].astype(np.int32) # labels = [class xywh] + weights = np.bincount(classes, minlength=nc) # occurrences per class + + # Prepend gridpoint count (for uCE training) + # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image + # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start + + weights[weights == 0] = 1 # replace empty bins with 1 + weights = 1 / weights # number of targets per class + weights /= weights.sum() # normalize + return torch.from_numpy(weights) + + +def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)): + # Produces image weights based on class_weights and image contents + class_counts = np.array([np.bincount(x[:, 0].astype(np.int32), minlength=nc) for x in labels]) + image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1) + # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample + return image_weights + + +def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper) + # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/ + # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n') + # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n') + # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco + # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet + x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90] + return x + + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + + +def xyn2xy(x, w=640, h=640, padw=0, padh=0): + # Convert normalized segments into pixel segments, shape (n,2) + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * x[:, 0] + padw # top left x + y[:, 1] = h * x[:, 1] + padh # top left y + return y + + +def segment2box(segment, width=640, height=640): + # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy) + x, y = segment.T # segment xy + inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height) + x, y, = x[inside], y[inside] + return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy + + +def segments2boxes(segments): + # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh) + boxes = [] + for s in segments: + x, y = s.T # segment xy + boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy + return xyxy2xywh(np.array(boxes)) # cls, xywh + + +def resample_segments(segments, n=1000): + # Up-sample an (n,2) segment + for i, s in enumerate(segments): + s = np.concatenate((s, s[0:1, :]), axis=0) + x = np.linspace(0, len(s) - 1, n) + xp = np.arange(len(s)) + segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy + return segments + + +def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None): + # Rescale coords (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + coords[:, [0, 2]] -= pad[0] # x padding + coords[:, [1, 3]] -= pad[1] # y padding + coords[:, :4] /= gain + clip_coords(coords, img0_shape) + return coords + + +def clip_coords(boxes, img_shape): + # Clip bounding xyxy bounding boxes to image shape (height, width) + boxes[:, 0].clamp_(0, img_shape[1]) # x1 + boxes[:, 1].clamp_(0, img_shape[0]) # y1 + boxes[:, 2].clamp_(0, img_shape[1]) # x2 + boxes[:, 3].clamp_(0, img_shape[0]) # y2 + + +def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): + # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + iou = inter / union + + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared + rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2) + with torch.no_grad(): + alpha = v / (v - iou + (1 + eps)) + return iou - (rho2 / c2 + v * alpha) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + c_area = cw * ch + eps # convex area + return iou - (c_area - union) / c_area # GIoU + else: + return iou # IoU + + + + +def bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9): + # Returns tsqrt_he IoU of box1 to box2. box1 is 4, box2 is nx4 + box2 = box2.T + + # Get the coordinates of bounding boxes + if x1y1x2y2: # x1, y1, x2, y2 = box1 + b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3] + b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] + else: # transform from xywh to xyxy + b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2 + b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2 + b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2 + b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 + + # Intersection area + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ + (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + + # Union Area + w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps + w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps + union = w1 * h1 + w2 * h2 - inter + eps + + # change iou into pow(iou+eps) + # iou = inter / union + iou = torch.pow(inter/union + eps, alpha) + # beta = 2 * alpha + if GIoU or DIoU or CIoU: + cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width + ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height + if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 + c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal + rho_x = torch.abs(b2_x1 + b2_x2 - b1_x1 - b1_x2) + rho_y = torch.abs(b2_y1 + b2_y2 - b1_y1 - b1_y2) + rho2 = ((rho_x ** 2 + rho_y ** 2) / 4) ** alpha # center distance + if DIoU: + return iou - rho2 / c2 # DIoU + elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 + v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + with torch.no_grad(): + alpha_ciou = v / ((1 + eps) - inter / union + v) + # return iou - (rho2 / c2 + v * alpha_ciou) # CIoU + return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU + else: # GIoU https://arxiv.org/pdf/1902.09630.pdf + # c_area = cw * ch + eps # convex area + # return iou - (c_area - union) / c_area # GIoU + c_area = torch.max(cw * ch + eps, union) # convex area + return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU + else: + return iou # torch.log(iou+eps) or iou + + +def box_iou(box1, box2): + # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + box1 (Tensor[N, 4]) + box2 (Tensor[M, 4]) + Returns: + iou (Tensor[N, M]): the NxM matrix containing the pairwise + IoU values for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) + + +def wh_iou(wh1, wh2): + # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2 + wh1 = wh1[:, None] # [N,1,2] + wh2 = wh2[None] # [1,M,2] + inter = torch.min(wh1, wh2).prod(2) # [N,M] + return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter) + + +def box_giou(box1, box2): + """ + Return generalized intersection-over-union (Jaccard index) between two sets of boxes. + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU values + for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + union = (area1[:, None] + area2 - inter) + + iou = inter / union + + lti = torch.min(box1[:, None, :2], box2[:, :2]) + rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) + + whi = (rbi - lti).clamp(min=0) # [N,M,2] + areai = whi[:, :, 0] * whi[:, :, 1] + + return iou - (areai - union) / areai + + +def box_ciou(box1, box2, eps: float = 1e-7): + """ + Return complete intersection-over-union (Jaccard index) between two sets of boxes. + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + eps (float, optional): small number to prevent division by zero. Default: 1e-7 + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise complete IoU values + for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + union = (area1[:, None] + area2 - inter) + + iou = inter / union + + lti = torch.min(box1[:, None, :2], box2[:, :2]) + rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) + + whi = (rbi - lti).clamp(min=0) # [N,M,2] + diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps + + # centers of boxes + x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2 + y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2 + x_g = (box2[:, 0] + box2[:, 2]) / 2 + y_g = (box2[:, 1] + box2[:, 3]) / 2 + # The distance between boxes' centers squared. + centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2 + + w_pred = box1[:, None, 2] - box1[:, None, 0] + h_pred = box1[:, None, 3] - box1[:, None, 1] + + w_gt = box2[:, 2] - box2[:, 0] + h_gt = box2[:, 3] - box2[:, 1] + + v = (4 / (torch.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2) + with torch.no_grad(): + alpha = v / (1 - iou + v + eps) + return iou - (centers_distance_squared / diagonal_distance_squared) - alpha * v + + +def box_diou(box1, box2, eps: float = 1e-7): + """ + Return distance intersection-over-union (Jaccard index) between two sets of boxes. + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + eps (float, optional): small number to prevent division by zero. Default: 1e-7 + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise distance IoU values + for every element in boxes1 and boxes2 + """ + + def box_area(box): + # box = 4xn + return (box[2] - box[0]) * (box[3] - box[1]) + + area1 = box_area(box1.T) + area2 = box_area(box2.T) + + inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2) + union = (area1[:, None] + area2 - inter) + + iou = inter / union + + lti = torch.min(box1[:, None, :2], box2[:, :2]) + rbi = torch.max(box1[:, None, 2:], box2[:, 2:]) + + whi = (rbi - lti).clamp(min=0) # [N,M,2] + diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps + + # centers of boxes + x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2 + y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2 + x_g = (box2[:, 0] + box2[:, 2]) / 2 + y_g = (box2[:, 1] + box2[:, 3]) / 2 + # The distance between boxes' centers squared. + centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2 + + # The distance IoU is the IoU penalized by a normalized + # distance between boxes' centers squared. + return iou - (centers_distance_squared / diagonal_distance_squared) + + +def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=()): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + nc = prediction.shape[2] - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_det = 300 # maximum number of detections per image + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + if nc == 1: + x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5, + # so there is no need to multiplicate. + else: + x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False, + labels=(), kpt_label=False, nc=None, nkpt=None): + """Runs Non-Maximum Suppression (NMS) on inference results + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + if nc is None: + nc = prediction.shape[2] - 5 if not kpt_label else prediction.shape[2] - 56 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Settings + min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height + max_det = 300 # maximum number of detections per image + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 10.0 # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + t = time.time() + output = [torch.zeros((0,6), device=prediction.device)] * prediction.shape[0] + for xi, x in enumerate(prediction): # image index, image inference + # Apply constraints + # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height + x = x[xc[xi]] # confidence + + # Cat apriori labels if autolabelling + if labels and len(labels[xi]): + l = labels[xi] + v = torch.zeros((len(l), nc + 5), device=x.device) + v[:, :4] = l[:, 1:5] # box + v[:, 4] = 1.0 # conf + v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:5+nc] *= x[:, 4:5] # conf = obj_conf * cls_conf + + # Box (center x, center y, width, height) to (x1, y1, x2, y2) + box = xywh2xyxy(x[:, :4]) + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1) + else: # best class only + if not kpt_label: + conf, j = x[:, 5:].max(1, keepdim=True) + x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres] + else: + kpts = x[:, 6:] + conf, j = x[:, 5:6].max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), kpts), 1)[conf.view(-1) > conf_thres] + + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + # Apply finite constraint + # if not torch.isfinite(x).all(): + # x = x[torch.isfinite(x).all(1)] + + # Check shape + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: # excess boxes + x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence + + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: # limit detections + i = i[:max_det] + if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean) + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix + weights = iou * scores[None] # box weights + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes + if redundant: + i = i[iou.sum(1) > 1] # require redundancy + + output[xi] = x[i] + if (time.time() - t) > time_limit: + print(f'WARNING: NMS time limit {time_limit}s exceeded') + break # time limit exceeded + + return output + + +def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer() + # Strip optimizer from 'f' to finalize training, optionally save as 's' + x = torch.load(f, map_location=torch.device('cpu')) + if x.get('ema'): + x['model'] = x['ema'] # replace model with ema + for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys + x[k] = None + x['epoch'] = -1 + x['model'].half() # to FP16 + for p in x['model'].parameters(): + p.requires_grad = False + torch.save(x, s or f) + mb = os.path.getsize(s or f) / 1E6 # filesize + print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB") + + +def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''): + # Print mutation results to evolve.txt (for use with train.py --evolve) + a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys + b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c)) + + if bucket: + url = 'gs://%s/evolve.txt' % bucket + if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0): + os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local + + with open('evolve.txt', 'a') as f: # append result + f.write(c + b + '\n') + x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows + x = x[np.argsort(-fitness(x))] # sort + np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness + + # Save yaml + for i, k in enumerate(hyp.keys()): + hyp[k] = float(x[0, i + 7]) + with open(yaml_file, 'w') as f: + results = tuple(x[0, :7]) + c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3) + f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n') + yaml.dump(hyp, f, sort_keys=False) + + if bucket: + os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload + + +def apply_classifier(x, model, img, im0): + # applies a second stage classifier to yolo outputs + im0 = [im0] if isinstance(im0, np.ndarray) else im0 + for i, d in enumerate(x): # per image + if d is not None and len(d): + d = d.clone() + + # Reshape and pad cutouts + b = xyxy2xywh(d[:, :4]) # boxes + b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square + b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad + d[:, :4] = xywh2xyxy(b).long() + + # Rescale boxes from img_size to im0 size + scale_coords(img.shape[2:], d[:, :4], im0[i].shape) + + # Classes + pred_cls1 = d[:, 5].long() + ims = [] + for j, a in enumerate(d): # per item + cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])] + im = cv2.resize(cutout, (224, 224)) # BGR + # cv2.imwrite('test%i.jpg' % j, cutout) + + im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416 + im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32 + im /= 255.0 # 0 - 255 to 0.0 - 1.0 + ims.append(im) + + pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction + x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections + + return x + + +def increment_path(path, exist_ok=True, sep=''): + # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc. + path = Path(path) # os-agnostic + if (path.exists() and exist_ok) or (not path.exists()): + return str(path) + else: + dirs = glob.glob(f"{path}{sep}*") # similar paths + matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] + i = [int(m.groups()[0]) for m in matches if m] # indices + n = max(i) + 1 if i else 2 # increment number + return f"{path}{sep}{n}" # update path diff --git a/src/train_utils/train_models/models/yolov7/utils/google_app_engine/Dockerfile b/src/train_utils/train_models/models/yolov7/utils/google_app_engine/Dockerfile new file mode 100644 index 0000000..0155618 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/google_app_engine/Dockerfile @@ -0,0 +1,25 @@ +FROM gcr.io/google-appengine/python + +# Create a virtualenv for dependencies. This isolates these packages from +# system-level packages. +# Use -p python3 or -p python3.7 to select python version. Default is version 2. +RUN virtualenv /env -p python3 + +# Setting these environment variables are the same as running +# source /env/bin/activate. +ENV VIRTUAL_ENV /env +ENV PATH /env/bin:$PATH + +RUN apt-get update && apt-get install -y python-opencv + +# Copy the application's requirements.txt and run pip to install all +# dependencies into the virtualenv. +ADD requirements.txt /app/requirements.txt +RUN pip install -r /app/requirements.txt + +# Add the application source code. +ADD . /app + +# Run a WSGI server to serve the application. gunicorn must be declared as +# a dependency in requirements.txt. +CMD gunicorn -b :$PORT main:app diff --git a/src/train_utils/train_models/models/yolov7/utils/google_app_engine/additional_requirements.txt b/src/train_utils/train_models/models/yolov7/utils/google_app_engine/additional_requirements.txt new file mode 100644 index 0000000..5fcc305 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/google_app_engine/additional_requirements.txt @@ -0,0 +1,4 @@ +# add these requirements in your app on top of the existing ones +pip==18.1 +Flask==1.0.2 +gunicorn==19.9.0 diff --git a/src/train_utils/train_models/models/yolov7/utils/google_app_engine/app.yaml b/src/train_utils/train_models/models/yolov7/utils/google_app_engine/app.yaml new file mode 100644 index 0000000..69b8f68 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/google_app_engine/app.yaml @@ -0,0 +1,14 @@ +runtime: custom +env: flex + +service: yolorapp + +liveness_check: + initial_delay_sec: 600 + +manual_scaling: + instances: 1 +resources: + cpu: 1 + memory_gb: 4 + disk_size_gb: 20 \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/utils/google_utils.py b/src/train_utils/train_models/models/yolov7/utils/google_utils.py new file mode 100644 index 0000000..f363408 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/google_utils.py @@ -0,0 +1,123 @@ +# Google utils: https://cloud.google.com/storage/docs/reference/libraries + +import os +import platform +import subprocess +import time +from pathlib import Path + +import requests +import torch + + +def gsutil_getsize(url=''): + # gs://bucket/file size https://cloud.google.com/storage/docs/gsutil/commands/du + s = subprocess.check_output(f'gsutil du {url}', shell=True).decode('utf-8') + return eval(s.split(' ')[0]) if len(s) else 0 # bytes + + +def attempt_download(file, repo='WongKinYiu/yolov7'): + # Attempt file download if does not exist + file = Path(str(file).strip().replace("'", '').lower()) + + if not file.exists(): + try: + response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api + assets = [x['name'] for x in response['assets']] # release assets + tag = response['tag_name'] # i.e. 'v1.0' + except: # fallback plan + assets = ['yolov7.pt', 'yolov7-tiny.pt', 'yolov7x.pt', 'yolov7-d6.pt', 'yolov7-e6.pt', + 'yolov7-e6e.pt', 'yolov7-w6.pt'] + tag = subprocess.check_output('git tag', shell=True).decode().split()[-1] + + name = file.name + if name in assets: + msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/' + redundant = False # second download option + try: # GitHub + url = f'https://github.com/{repo}/releases/download/{tag}/{name}' + print(f'Downloading {url} to {file}...') + torch.hub.download_url_to_file(url, file) + assert file.exists() and file.stat().st_size > 1E6 # check + except Exception as e: # GCP + print(f'Download error: {e}') + assert redundant, 'No secondary mirror' + url = f'https://storage.googleapis.com/{repo}/ckpt/{name}' + print(f'Downloading {url} to {file}...') + os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights) + finally: + if not file.exists() or file.stat().st_size < 1E6: # check + file.unlink(missing_ok=True) # remove partial downloads + print(f'ERROR: Download failure: {msg}') + print('') + return + + +def gdrive_download(id='', file='tmp.zip'): + # Downloads a file from Google Drive. from yolov7.utils.google_utils import *; gdrive_download() + t = time.time() + file = Path(file) + cookie = Path('cookie') # gdrive cookie + print(f'Downloading https://drive.google.com/uc?export=download&id={id} as {file}... ', end='') + file.unlink(missing_ok=True) # remove existing file + cookie.unlink(missing_ok=True) # remove existing cookie + + # Attempt file download + out = "NUL" if platform.system() == "Windows" else "/dev/null" + os.system(f'curl -c ./cookie -s -L "drive.google.com/uc?export=download&id={id}" > {out}') + if os.path.exists('cookie'): # large file + s = f'curl -Lb ./cookie "drive.google.com/uc?export=download&confirm={get_token()}&id={id}" -o {file}' + else: # small file + s = f'curl -s -L -o {file} "drive.google.com/uc?export=download&id={id}"' + r = os.system(s) # execute, capture return + cookie.unlink(missing_ok=True) # remove existing cookie + + # Error check + if r != 0: + file.unlink(missing_ok=True) # remove partial + print('Download error ') # raise Exception('Download error') + return r + + # Unzip if archive + if file.suffix == '.zip': + print('unzipping... ', end='') + os.system(f'unzip -q {file}') # unzip + file.unlink() # remove zip to free space + + print(f'Done ({time.time() - t:.1f}s)') + return r + + +def get_token(cookie="./cookie"): + with open(cookie) as f: + for line in f: + if "download" in line: + return line.split()[-1] + return "" + +# def upload_blob(bucket_name, source_file_name, destination_blob_name): +# # Uploads a file to a bucket +# # https://cloud.google.com/storage/docs/uploading-objects#storage-upload-object-python +# +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(destination_blob_name) +# +# blob.upload_from_filename(source_file_name) +# +# print('File {} uploaded to {}.'.format( +# source_file_name, +# destination_blob_name)) +# +# +# def download_blob(bucket_name, source_blob_name, destination_file_name): +# # Uploads a blob from a bucket +# storage_client = storage.Client() +# bucket = storage_client.get_bucket(bucket_name) +# blob = bucket.blob(source_blob_name) +# +# blob.download_to_filename(destination_file_name) +# +# print('Blob {} downloaded to {}.'.format( +# source_blob_name, +# destination_file_name)) diff --git a/src/train_utils/train_models/models/yolov7/utils/loss.py b/src/train_utils/train_models/models/yolov7/utils/loss.py new file mode 100644 index 0000000..2b1d968 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/loss.py @@ -0,0 +1,1697 @@ +# Loss functions + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from utils.general import bbox_iou, bbox_alpha_iou, box_iou, box_giou, box_diou, box_ciou, xywh2xyxy +from utils.torch_utils import is_parallel + + +def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441 + # return positive, negative label smoothing BCE targets + return 1.0 - 0.5 * eps, 0.5 * eps + + +class BCEBlurWithLogitsLoss(nn.Module): + # BCEwithLogitLoss() with reduced missing label effects. + def __init__(self, alpha=0.05): + super(BCEBlurWithLogitsLoss, self).__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + self.alpha = alpha + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + pred = torch.sigmoid(pred) # prob from logits + dx = pred - true # reduce only missing label effects + # dx = (pred - true).abs() # reduce missing label and false label effects + alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4)) + loss *= alpha_factor + return loss.mean() + + +class SigmoidBin(nn.Module): + stride = None # strides computed during build + export = False # onnx export + + def __init__(self, bin_count=10, min=0.0, max=1.0, reg_scale = 2.0, use_loss_regression=True, use_fw_regression=True, BCE_weight=1.0, smooth_eps=0.0): + super(SigmoidBin, self).__init__() + + self.bin_count = bin_count + self.length = bin_count + 1 + self.min = min + self.max = max + self.scale = float(max - min) + self.shift = self.scale / 2.0 + + self.use_loss_regression = use_loss_regression + self.use_fw_regression = use_fw_regression + self.reg_scale = reg_scale + self.BCE_weight = BCE_weight + + start = min + (self.scale/2.0) / self.bin_count + end = max - (self.scale/2.0) / self.bin_count + step = self.scale / self.bin_count + self.step = step + #print(f" start = {start}, end = {end}, step = {step} ") + + bins = torch.range(start, end + 0.0001, step).float() + self.register_buffer('bins', bins) + + + self.cp = 1.0 - 0.5 * smooth_eps + self.cn = 0.5 * smooth_eps + + self.BCEbins = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([BCE_weight])) + self.MSELoss = nn.MSELoss() + + def get_length(self): + return self.length + + def forward(self, pred): + assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length) + + pred_reg = (pred[..., 0] * self.reg_scale - self.reg_scale/2.0) * self.step + pred_bin = pred[..., 1:(1+self.bin_count)] + + _, bin_idx = torch.max(pred_bin, dim=-1) + bin_bias = self.bins[bin_idx] + + if self.use_fw_regression: + result = pred_reg + bin_bias + else: + result = bin_bias + result = result.clamp(min=self.min, max=self.max) + + return result + + + def training_loss(self, pred, target): + assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length) + assert pred.shape[0] == target.shape[0], 'pred.shape=%d is not equal to the target.shape=%d' % (pred.shape[0], target.shape[0]) + device = pred.device + + pred_reg = (pred[..., 0].sigmoid() * self.reg_scale - self.reg_scale/2.0) * self.step + pred_bin = pred[..., 1:(1+self.bin_count)] + + diff_bin_target = torch.abs(target[..., None] - self.bins) + _, bin_idx = torch.min(diff_bin_target, dim=-1) + + bin_bias = self.bins[bin_idx] + bin_bias.requires_grad = False + result = pred_reg + bin_bias + + target_bins = torch.full_like(pred_bin, self.cn, device=device) # targets + n = pred.shape[0] + target_bins[range(n), bin_idx] = self.cp + + loss_bin = self.BCEbins(pred_bin, target_bins) # BCE + + if self.use_loss_regression: + loss_regression = self.MSELoss(result, target) # MSE + loss = loss_bin + loss_regression + else: + loss = loss_bin + + out_result = result.clamp(min=self.min, max=self.max) + + return loss, out_result + + +class FocalLoss(nn.Module): + # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(FocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + # p_t = torch.exp(-loss) + # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability + + # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py + pred_prob = torch.sigmoid(pred) # prob from logits + p_t = true * pred_prob + (1 - true) * (1 - pred_prob) + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = (1.0 - p_t) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + + +class QFocalLoss(nn.Module): + # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) + def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): + super(QFocalLoss, self).__init__() + self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() + self.gamma = gamma + self.alpha = alpha + self.reduction = loss_fcn.reduction + self.loss_fcn.reduction = 'none' # required to apply FL to each element + + def forward(self, pred, true): + loss = self.loss_fcn(pred, true) + + pred_prob = torch.sigmoid(pred) # prob from logits + alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha) + modulating_factor = torch.abs(true - pred_prob) ** self.gamma + loss *= alpha_factor * modulating_factor + + if self.reduction == 'mean': + return loss.mean() + elif self.reduction == 'sum': + return loss.sum() + else: # 'none' + return loss + +class RankSort(torch.autograd.Function): + @staticmethod + def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10): + + classification_grads=torch.zeros(logits.shape).cuda() + + #Filter fg logits + fg_labels = (targets > 0.) + fg_logits = logits[fg_labels] + fg_targets = targets[fg_labels] + fg_num = len(fg_logits) + + #Do not use bg with scores less than minimum fg logit + #since changing its score does not have an effect on precision + threshold_logit = torch.min(fg_logits)-delta_RS + relevant_bg_labels=((targets==0) & (logits>=threshold_logit)) + + relevant_bg_logits = logits[relevant_bg_labels] + relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() + sorting_error=torch.zeros(fg_num).cuda() + ranking_error=torch.zeros(fg_num).cuda() + fg_grad=torch.zeros(fg_num).cuda() + + #sort the fg logits + order=torch.argsort(fg_logits) + #Loops over each positive following the order + for ii in order: + # Difference Transforms (x_ij) + fg_relations=fg_logits-fg_logits[ii] + bg_relations=relevant_bg_logits-fg_logits[ii] + + if delta_RS > 0: + fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1) + bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1) + else: + fg_relations = (fg_relations >= 0).float() + bg_relations = (bg_relations >= 0).float() + + # Rank of ii among pos and false positive number (bg with larger scores) + rank_pos=torch.sum(fg_relations) + FP_num=torch.sum(bg_relations) + + # Rank of ii among all examples + rank=rank_pos+FP_num + + # Ranking error of example ii. target_ranking_error is always 0. (Eq. 7) + ranking_error[ii]=FP_num/rank + + # Current sorting error of example ii. (Eq. 7) + current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos + + #Find examples in the target sorted order for example ii + iou_relations = (fg_targets >= fg_targets[ii]) + target_sorted_order = iou_relations * fg_relations + + #The rank of ii among positives in sorted order + rank_pos_target = torch.sum(target_sorted_order) + + #Compute target sorting error. (Eq. 8) + #Since target ranking error is 0, this is also total target error + target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target + + #Compute sorting error on example ii + sorting_error[ii] = current_sorting_error - target_sorting_error + + #Identity Update for Ranking Error + if FP_num > eps: + #For ii the update is the ranking error + fg_grad[ii] -= ranking_error[ii] + #For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num) + relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num)) + + #Find the positives that are misranked (the cause of the error) + #These are the ones with smaller IoU but larger logits + missorted_examples = (~ iou_relations) * fg_relations + + #Denominotor of sorting pmf + sorting_pmf_denom = torch.sum(missorted_examples) + + #Identity Update for Sorting Error + if sorting_pmf_denom > eps: + #For ii the update is the sorting error + fg_grad[ii] -= sorting_error[ii] + #For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom) + fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom)) + + #Normalize gradients by number of positives + classification_grads[fg_labels]= (fg_grad/fg_num) + classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num) + + ctx.save_for_backward(classification_grads) + + return ranking_error.mean(), sorting_error.mean() + + @staticmethod + def backward(ctx, out_grad1, out_grad2): + g1, =ctx.saved_tensors + return g1*out_grad1, None, None, None + +class aLRPLoss(torch.autograd.Function): + @staticmethod + def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5): + classification_grads=torch.zeros(logits.shape).cuda() + + #Filter fg logits + fg_labels = (targets == 1) + fg_logits = logits[fg_labels] + fg_num = len(fg_logits) + + #Do not use bg with scores less than minimum fg logit + #since changing its score does not have an effect on precision + threshold_logit = torch.min(fg_logits)-delta + + #Get valid bg logits + relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) + relevant_bg_logits=logits[relevant_bg_labels] + relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() + rank=torch.zeros(fg_num).cuda() + prec=torch.zeros(fg_num).cuda() + fg_grad=torch.zeros(fg_num).cuda() + + max_prec=0 + #sort the fg logits + order=torch.argsort(fg_logits) + #Loops over each positive following the order + for ii in order: + #x_ij s as score differences with fgs + fg_relations=fg_logits-fg_logits[ii] + #Apply piecewise linear function and determine relations with fgs + fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) + #Discard i=j in the summation in rank_pos + fg_relations[ii]=0 + + #x_ij s as score differences with bgs + bg_relations=relevant_bg_logits-fg_logits[ii] + #Apply piecewise linear function and determine relations with bgs + bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) + + #Compute the rank of the example within fgs and number of bgs with larger scores + rank_pos=1+torch.sum(fg_relations) + FP_num=torch.sum(bg_relations) + #Store the total since it is normalizer also for aLRP Regression error + rank[ii]=rank_pos+FP_num + + #Compute precision for this example to compute classification loss + prec[ii]=rank_pos/rank[ii] + #For stability, set eps to a infinitesmall value (e.g. 1e-6), then compute grads + if FP_num > eps: + fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii] + relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num)) + + #aLRP with grad formulation fg gradient + classification_grads[fg_labels]= fg_grad + #aLRP with grad formulation bg gradient + classification_grads[relevant_bg_labels]= relevant_bg_grad + + classification_grads /= (fg_num) + + cls_loss=1-prec.mean() + ctx.save_for_backward(classification_grads) + + return cls_loss, rank, order + + @staticmethod + def backward(ctx, out_grad1, out_grad2, out_grad3): + g1, =ctx.saved_tensors + return g1*out_grad1, None, None, None, None + + +class APLoss(torch.autograd.Function): + @staticmethod + def forward(ctx, logits, targets, delta=1.): + classification_grads=torch.zeros(logits.shape).cuda() + + #Filter fg logits + fg_labels = (targets == 1) + fg_logits = logits[fg_labels] + fg_num = len(fg_logits) + + #Do not use bg with scores less than minimum fg logit + #since changing its score does not have an effect on precision + threshold_logit = torch.min(fg_logits)-delta + + #Get valid bg logits + relevant_bg_labels=((targets==0)&(logits>=threshold_logit)) + relevant_bg_logits=logits[relevant_bg_labels] + relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda() + rank=torch.zeros(fg_num).cuda() + prec=torch.zeros(fg_num).cuda() + fg_grad=torch.zeros(fg_num).cuda() + + max_prec=0 + #sort the fg logits + order=torch.argsort(fg_logits) + #Loops over each positive following the order + for ii in order: + #x_ij s as score differences with fgs + fg_relations=fg_logits-fg_logits[ii] + #Apply piecewise linear function and determine relations with fgs + fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1) + #Discard i=j in the summation in rank_pos + fg_relations[ii]=0 + + #x_ij s as score differences with bgs + bg_relations=relevant_bg_logits-fg_logits[ii] + #Apply piecewise linear function and determine relations with bgs + bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1) + + #Compute the rank of the example within fgs and number of bgs with larger scores + rank_pos=1+torch.sum(fg_relations) + FP_num=torch.sum(bg_relations) + #Store the total since it is normalizer also for aLRP Regression error + rank[ii]=rank_pos+FP_num + + #Compute precision for this example + current_prec=rank_pos/rank[ii] + + #Compute interpolated AP and store gradients for relevant bg examples + if (max_prec<=current_prec): + max_prec=current_prec + relevant_bg_grad += (bg_relations/rank[ii]) + else: + relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec))) + + #Store fg gradients + fg_grad[ii]=-(1-max_prec) + prec[ii]=max_prec + + #aLRP with grad formulation fg gradient + classification_grads[fg_labels]= fg_grad + #aLRP with grad formulation bg gradient + classification_grads[relevant_bg_labels]= relevant_bg_grad + + classification_grads /= fg_num + + cls_loss=1-prec.mean() + ctx.save_for_backward(classification_grads) + + return cls_loss + + @staticmethod + def backward(ctx, out_grad1): + g1, =ctx.saved_tensors + return g1*out_grad1, None, None + + +class ComputeLoss: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLoss, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.1, .05]) # P3-P7 + #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.5, 0.4, .1]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = indices[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), tcls[i]] = self.cp + #t[t==self.cp] = iou.detach().clamp(0).type(t.dtype) + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + tcls, tbox, indices, anch = [], [], [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + tbox.append(torch.cat((gxy - gij, gwh), 1)) # box + anch.append(anchors[a]) # anchors + tcls.append(c) # class + + return tcls, tbox, indices, anch + + +class ComputeLossOTA: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLossOTA, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors', 'stride': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets, imgs): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs) + pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p] + + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + grid = torch.stack([gi, gj], dim=1) + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + #pxy = ps[:, :2].sigmoid() * 3. - 1. + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i] + selected_tbox[:, :2] -= grid + iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + selected_tcls = targets[i][:, 1].long() + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), selected_tcls] = self.cp + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., 4], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets, imgs): + + #indices, anch = self.find_positive(p, targets) + indices, anch = self.find_3_positive(p, targets) + #indices, anch = self.find_4_positive(p, targets) + #indices, anch = self.find_5_positive(p, targets) + #indices, anch = self.find_9_positive(p, targets) + device = torch.device(targets.device) + matching_bs = [[] for pp in p] + matching_as = [[] for pp in p] + matching_gjs = [[] for pp in p] + matching_gis = [[] for pp in p] + matching_targets = [[] for pp in p] + matching_anchs = [[] for pp in p] + + nl = len(p) + + for batch_idx in range(p[0].shape[0]): + + b_idx = targets[:, 0]==batch_idx + this_target = targets[b_idx] + if this_target.shape[0] == 0: + continue + + txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] + txyxy = xywh2xyxy(txywh) + + pxyxys = [] + p_cls = [] + p_obj = [] + from_which_layer = [] + all_b = [] + all_a = [] + all_gj = [] + all_gi = [] + all_anch = [] + + for i, pi in enumerate(p): + + b, a, gj, gi = indices[i] + idx = (b == batch_idx) + b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] + all_b.append(b) + all_a.append(a) + all_gj.append(gj) + all_gi.append(gi) + all_anch.append(anch[i][idx]) + from_which_layer.append((torch.ones(size=(len(b),)) * i).to(device)) + + fg_pred = pi[b, a, gj, gi] + p_obj.append(fg_pred[:, 4:5]) + p_cls.append(fg_pred[:, 5:]) + + grid = torch.stack([gi, gj], dim=1) + pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. + #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i] + pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. + pxywh = torch.cat([pxy, pwh], dim=-1) + pxyxy = xywh2xyxy(pxywh) + pxyxys.append(pxyxy) + + pxyxys = torch.cat(pxyxys, dim=0) + if pxyxys.shape[0] == 0: + continue + p_obj = torch.cat(p_obj, dim=0) + p_cls = torch.cat(p_cls, dim=0) + from_which_layer = torch.cat(from_which_layer, dim=0) + all_b = torch.cat(all_b, dim=0) + all_a = torch.cat(all_a, dim=0) + all_gj = torch.cat(all_gj, dim=0) + all_gi = torch.cat(all_gi, dim=0) + all_anch = torch.cat(all_anch, dim=0) + + pair_wise_iou = box_iou(txyxy, pxyxys) + + pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) + + top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1) + dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) + + gt_cls_per_image = ( + F.one_hot(this_target[:, 1].to(torch.int64), self.nc) + .float() + .unsqueeze(1) + .repeat(1, pxyxys.shape[0], 1) + ) + + num_gt = this_target.shape[0] + cls_preds_ = ( + p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + ) + + y = cls_preds_.sqrt_() + pair_wise_cls_loss = F.binary_cross_entropy_with_logits( + torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" + ).sum(-1) + del cls_preds_ + + cost = ( + pair_wise_cls_loss + + 3.0 * pair_wise_iou_loss + ) + + matching_matrix = torch.zeros_like(cost, device=device) + + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False + ) + matching_matrix[gt_idx][pos_idx] = 1.0 + + del top_k, dynamic_ks + anchor_matching_gt = matching_matrix.sum(0) + if (anchor_matching_gt > 1).sum() > 0: + _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) + matching_matrix[:, anchor_matching_gt > 1] *= 0.0 + matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 + fg_mask_inboxes = (matching_matrix.sum(0) > 0.0).to(device) + matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) + + from_which_layer = from_which_layer[fg_mask_inboxes] + all_b = all_b[fg_mask_inboxes] + all_a = all_a[fg_mask_inboxes] + all_gj = all_gj[fg_mask_inboxes] + all_gi = all_gi[fg_mask_inboxes] + all_anch = all_anch[fg_mask_inboxes] + + this_target = this_target[matched_gt_inds] + + for i in range(nl): + layer_idx = from_which_layer == i + matching_bs[i].append(all_b[layer_idx]) + matching_as[i].append(all_a[layer_idx]) + matching_gjs[i].append(all_gj[layer_idx]) + matching_gis[i].append(all_gi[layer_idx]) + matching_targets[i].append(this_target[layer_idx]) + matching_anchs[i].append(all_anch[layer_idx]) + + for i in range(nl): + if matching_targets[i] != []: + matching_bs[i] = torch.cat(matching_bs[i], dim=0) + matching_as[i] = torch.cat(matching_as[i], dim=0) + matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) + matching_gis[i] = torch.cat(matching_gis[i], dim=0) + matching_targets[i] = torch.cat(matching_targets[i], dim=0) + matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) + else: + matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + + return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs + + def find_3_positive(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + indices, anch = [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + anch.append(anchors[a]) # anchors + + return indices, anch + + +class ComputeLossBinOTA: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLossBinOTA, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + #MSEangle = nn.MSELoss().to(device) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors', 'stride', 'bin_count': + setattr(self, k, getattr(det, k)) + + #xy_bin_sigmoid = SigmoidBin(bin_count=11, min=-0.5, max=1.5, use_loss_regression=False).to(device) + wh_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0, use_loss_regression=False).to(device) + #angle_bin_sigmoid = SigmoidBin(bin_count=31, min=-1.1, max=1.1, use_loss_regression=False).to(device) + self.wh_bin_sigmoid = wh_bin_sigmoid + + def __call__(self, p, targets, imgs): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs) + pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p] + + + # Losses + for i, pi in enumerate(p): # layer index, layer predictions + b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + + obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2 # x,y, w-bce, h-bce # xy_bin_sigmoid.get_length()*2 + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + grid = torch.stack([gi, gj], dim=1) + selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i] + selected_tbox[:, :2] -= grid + + #pxy = ps[:, :2].sigmoid() * 2. - 0.5 + ##pxy = ps[:, :2].sigmoid() * 3. - 1. + #pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + #pbox = torch.cat((pxy, pwh), 1) # predicted box + + #x_loss, px = xy_bin_sigmoid.training_loss(ps[..., 0:12], tbox[i][..., 0]) + #y_loss, py = xy_bin_sigmoid.training_loss(ps[..., 12:24], tbox[i][..., 1]) + w_loss, pw = self.wh_bin_sigmoid.training_loss(ps[..., 2:(3+self.bin_count)], selected_tbox[..., 2] / anchors[i][..., 0]) + h_loss, ph = self.wh_bin_sigmoid.training_loss(ps[..., (3+self.bin_count):obj_idx], selected_tbox[..., 3] / anchors[i][..., 1]) + + pw *= anchors[i][..., 0] + ph *= anchors[i][..., 1] + + px = ps[:, 0].sigmoid() * 2. - 0.5 + py = ps[:, 1].sigmoid() * 2. - 0.5 + + lbox += w_loss + h_loss # + x_loss + y_loss + + #print(f"\n px = {px.shape}, py = {py.shape}, pw = {pw.shape}, ph = {ph.shape} \n") + + pbox = torch.cat((px.unsqueeze(1), py.unsqueeze(1), pw.unsqueeze(1), ph.unsqueeze(1)), 1).to(device) # predicted box + + + + + iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + selected_tcls = targets[i][:, 1].long() + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, (1+obj_idx):], self.cn, device=device) # targets + t[range(n), selected_tcls] = self.cp + lcls += self.BCEcls(ps[:, (1+obj_idx):], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + obji = self.BCEobj(pi[..., obj_idx], tobj) + lobj += obji * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets, imgs): + + #indices, anch = self.find_positive(p, targets) + indices, anch = self.find_3_positive(p, targets) + #indices, anch = self.find_4_positive(p, targets) + #indices, anch = self.find_5_positive(p, targets) + #indices, anch = self.find_9_positive(p, targets) + + matching_bs = [[] for pp in p] + matching_as = [[] for pp in p] + matching_gjs = [[] for pp in p] + matching_gis = [[] for pp in p] + matching_targets = [[] for pp in p] + matching_anchs = [[] for pp in p] + + nl = len(p) + + for batch_idx in range(p[0].shape[0]): + + b_idx = targets[:, 0]==batch_idx + this_target = targets[b_idx] + if this_target.shape[0] == 0: + continue + + txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] + txyxy = xywh2xyxy(txywh) + + pxyxys = [] + p_cls = [] + p_obj = [] + from_which_layer = [] + all_b = [] + all_a = [] + all_gj = [] + all_gi = [] + all_anch = [] + + for i, pi in enumerate(p): + + obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2 + + b, a, gj, gi = indices[i] + idx = (b == batch_idx) + b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] + all_b.append(b) + all_a.append(a) + all_gj.append(gj) + all_gi.append(gi) + all_anch.append(anch[i][idx]) + from_which_layer.append(torch.ones(size=(len(b),)) * i) + + fg_pred = pi[b, a, gj, gi] + p_obj.append(fg_pred[:, obj_idx:(obj_idx+1)]) + p_cls.append(fg_pred[:, (obj_idx+1):]) + + grid = torch.stack([gi, gj], dim=1) + pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. + #pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. + pw = self.wh_bin_sigmoid.forward(fg_pred[..., 2:(3+self.bin_count)].sigmoid()) * anch[i][idx][:, 0] * self.stride[i] + ph = self.wh_bin_sigmoid.forward(fg_pred[..., (3+self.bin_count):obj_idx].sigmoid()) * anch[i][idx][:, 1] * self.stride[i] + + pxywh = torch.cat([pxy, pw.unsqueeze(1), ph.unsqueeze(1)], dim=-1) + pxyxy = xywh2xyxy(pxywh) + pxyxys.append(pxyxy) + + pxyxys = torch.cat(pxyxys, dim=0) + if pxyxys.shape[0] == 0: + continue + p_obj = torch.cat(p_obj, dim=0) + p_cls = torch.cat(p_cls, dim=0) + from_which_layer = torch.cat(from_which_layer, dim=0) + all_b = torch.cat(all_b, dim=0) + all_a = torch.cat(all_a, dim=0) + all_gj = torch.cat(all_gj, dim=0) + all_gi = torch.cat(all_gi, dim=0) + all_anch = torch.cat(all_anch, dim=0) + + pair_wise_iou = box_iou(txyxy, pxyxys) + + pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) + + top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1) + dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) + + gt_cls_per_image = ( + F.one_hot(this_target[:, 1].to(torch.int64), self.nc) + .float() + .unsqueeze(1) + .repeat(1, pxyxys.shape[0], 1) + ) + + num_gt = this_target.shape[0] + cls_preds_ = ( + p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + ) + + y = cls_preds_.sqrt_() + pair_wise_cls_loss = F.binary_cross_entropy_with_logits( + torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" + ).sum(-1) + del cls_preds_ + + cost = ( + pair_wise_cls_loss + + 3.0 * pair_wise_iou_loss + ) + + matching_matrix = torch.zeros_like(cost) + + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False + ) + matching_matrix[gt_idx][pos_idx] = 1.0 + + del top_k, dynamic_ks + anchor_matching_gt = matching_matrix.sum(0) + if (anchor_matching_gt > 1).sum() > 0: + _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) + matching_matrix[:, anchor_matching_gt > 1] *= 0.0 + matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 + fg_mask_inboxes = matching_matrix.sum(0) > 0.0 + matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) + + from_which_layer = from_which_layer[fg_mask_inboxes] + all_b = all_b[fg_mask_inboxes] + all_a = all_a[fg_mask_inboxes] + all_gj = all_gj[fg_mask_inboxes] + all_gi = all_gi[fg_mask_inboxes] + all_anch = all_anch[fg_mask_inboxes] + + this_target = this_target[matched_gt_inds] + + for i in range(nl): + layer_idx = from_which_layer == i + matching_bs[i].append(all_b[layer_idx]) + matching_as[i].append(all_a[layer_idx]) + matching_gjs[i].append(all_gj[layer_idx]) + matching_gis[i].append(all_gi[layer_idx]) + matching_targets[i].append(this_target[layer_idx]) + matching_anchs[i].append(all_anch[layer_idx]) + + for i in range(nl): + if matching_targets[i] != []: + matching_bs[i] = torch.cat(matching_bs[i], dim=0) + matching_as[i] = torch.cat(matching_as[i], dim=0) + matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) + matching_gis[i] = torch.cat(matching_gis[i], dim=0) + matching_targets[i] = torch.cat(matching_targets[i], dim=0) + matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) + else: + matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + + return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs + + def find_3_positive(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + indices, anch = [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + anch.append(anchors[a]) # anchors + + return indices, anch + + +class ComputeLossAuxOTA: + # Compute losses + def __init__(self, model, autobalance=False): + super(ComputeLossAuxOTA, self).__init__() + device = next(model.parameters()).device # get model device + h = model.hyp # hyperparameters + + # Define criteria + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + + # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 + self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + + # Focal loss + g = h['fl_gamma'] # focal loss gamma + if g > 0: + BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) + + det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index + self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance + for k in 'na', 'nc', 'nl', 'anchors', 'stride': + setattr(self, k, getattr(det, k)) + + def __call__(self, p, targets, imgs): # predictions, targets, model + device = targets.device + lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device) + bs_aux, as_aux_, gjs_aux, gis_aux, targets_aux, anchors_aux = self.build_targets2(p[:self.nl], targets, imgs) + bs, as_, gjs, gis, targets, anchors = self.build_targets(p[:self.nl], targets, imgs) + pre_gen_gains_aux = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]] + pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]] + + + # Losses + for i in range(self.nl): # layer index, layer predictions + pi = p[i] + pi_aux = p[i+self.nl] + b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx + b_aux, a_aux, gj_aux, gi_aux = bs_aux[i], as_aux_[i], gjs_aux[i], gis_aux[i] # image, anchor, gridy, gridx + tobj = torch.zeros_like(pi[..., 0], device=device) # target obj + tobj_aux = torch.zeros_like(pi_aux[..., 0], device=device) # target obj + + n = b.shape[0] # number of targets + if n: + ps = pi[b, a, gj, gi] # prediction subset corresponding to targets + + # Regression + grid = torch.stack([gi, gj], dim=1) + pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] + pbox = torch.cat((pxy, pwh), 1) # predicted box + selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i] + selected_tbox[:, :2] -= grid + iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += (1.0 - iou).mean() # iou loss + + # Objectness + tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio + + # Classification + selected_tcls = targets[i][:, 1].long() + if self.nc > 1: # cls loss (only if multiple classes) + t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets + t[range(n), selected_tcls] = self.cp + lcls += self.BCEcls(ps[:, 5:], t) # BCE + + # Append targets to text file + # with open('targets.txt', 'a') as file: + # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)] + + n_aux = b_aux.shape[0] # number of targets + if n_aux: + ps_aux = pi_aux[b_aux, a_aux, gj_aux, gi_aux] # prediction subset corresponding to targets + grid_aux = torch.stack([gi_aux, gj_aux], dim=1) + pxy_aux = ps_aux[:, :2].sigmoid() * 2. - 0.5 + #pxy_aux = ps_aux[:, :2].sigmoid() * 3. - 1. + pwh_aux = (ps_aux[:, 2:4].sigmoid() * 2) ** 2 * anchors_aux[i] + pbox_aux = torch.cat((pxy_aux, pwh_aux), 1) # predicted box + selected_tbox_aux = targets_aux[i][:, 2:6] * pre_gen_gains_aux[i] + selected_tbox_aux[:, :2] -= grid_aux + iou_aux = bbox_iou(pbox_aux.T, selected_tbox_aux, x1y1x2y2=False, CIoU=True) # iou(prediction, target) + lbox += 0.25 * (1.0 - iou_aux).mean() # iou loss + + # Objectness + tobj_aux[b_aux, a_aux, gj_aux, gi_aux] = (1.0 - self.gr) + self.gr * iou_aux.detach().clamp(0).type(tobj_aux.dtype) # iou ratio + + # Classification + selected_tcls_aux = targets_aux[i][:, 1].long() + if self.nc > 1: # cls loss (only if multiple classes) + t_aux = torch.full_like(ps_aux[:, 5:], self.cn, device=device) # targets + t_aux[range(n_aux), selected_tcls_aux] = self.cp + lcls += 0.25 * self.BCEcls(ps_aux[:, 5:], t_aux) # BCE + + obji = self.BCEobj(pi[..., 4], tobj) + obji_aux = self.BCEobj(pi_aux[..., 4], tobj_aux) + lobj += obji * self.balance[i] + 0.25 * obji_aux * self.balance[i] # obj loss + if self.autobalance: + self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item() + + if self.autobalance: + self.balance = [x / self.balance[self.ssi] for x in self.balance] + lbox *= self.hyp['box'] + lobj *= self.hyp['obj'] + lcls *= self.hyp['cls'] + bs = tobj.shape[0] # batch size + + loss = lbox + lobj + lcls + return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach() + + def build_targets(self, p, targets, imgs): + + indices, anch = self.find_3_positive(p, targets) + + matching_bs = [[] for pp in p] + matching_as = [[] for pp in p] + matching_gjs = [[] for pp in p] + matching_gis = [[] for pp in p] + matching_targets = [[] for pp in p] + matching_anchs = [[] for pp in p] + + nl = len(p) + + for batch_idx in range(p[0].shape[0]): + + b_idx = targets[:, 0]==batch_idx + this_target = targets[b_idx] + if this_target.shape[0] == 0: + continue + + txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] + txyxy = xywh2xyxy(txywh) + + pxyxys = [] + p_cls = [] + p_obj = [] + from_which_layer = [] + all_b = [] + all_a = [] + all_gj = [] + all_gi = [] + all_anch = [] + + for i, pi in enumerate(p): + + b, a, gj, gi = indices[i] + idx = (b == batch_idx) + b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] + all_b.append(b) + all_a.append(a) + all_gj.append(gj) + all_gi.append(gi) + all_anch.append(anch[i][idx]) + from_which_layer.append(torch.ones(size=(len(b),)) * i) + + fg_pred = pi[b, a, gj, gi] + p_obj.append(fg_pred[:, 4:5]) + p_cls.append(fg_pred[:, 5:]) + + grid = torch.stack([gi, gj], dim=1) + pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. + #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i] + pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. + pxywh = torch.cat([pxy, pwh], dim=-1) + pxyxy = xywh2xyxy(pxywh) + pxyxys.append(pxyxy) + + pxyxys = torch.cat(pxyxys, dim=0) + if pxyxys.shape[0] == 0: + continue + p_obj = torch.cat(p_obj, dim=0) + p_cls = torch.cat(p_cls, dim=0) + from_which_layer = torch.cat(from_which_layer, dim=0) + all_b = torch.cat(all_b, dim=0) + all_a = torch.cat(all_a, dim=0) + all_gj = torch.cat(all_gj, dim=0) + all_gi = torch.cat(all_gi, dim=0) + all_anch = torch.cat(all_anch, dim=0) + + pair_wise_iou = box_iou(txyxy, pxyxys) + + pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) + + top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1) + dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) + + gt_cls_per_image = ( + F.one_hot(this_target[:, 1].to(torch.int64), self.nc) + .float() + .unsqueeze(1) + .repeat(1, pxyxys.shape[0], 1) + ) + + num_gt = this_target.shape[0] + cls_preds_ = ( + p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + ) + + y = cls_preds_.sqrt_() + pair_wise_cls_loss = F.binary_cross_entropy_with_logits( + torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" + ).sum(-1) + del cls_preds_ + + cost = ( + pair_wise_cls_loss + + 3.0 * pair_wise_iou_loss + ) + + matching_matrix = torch.zeros_like(cost) + + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False + ) + matching_matrix[gt_idx][pos_idx] = 1.0 + + del top_k, dynamic_ks + anchor_matching_gt = matching_matrix.sum(0) + if (anchor_matching_gt > 1).sum() > 0: + _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) + matching_matrix[:, anchor_matching_gt > 1] *= 0.0 + matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 + fg_mask_inboxes = matching_matrix.sum(0) > 0.0 + matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) + + from_which_layer = from_which_layer[fg_mask_inboxes] + all_b = all_b[fg_mask_inboxes] + all_a = all_a[fg_mask_inboxes] + all_gj = all_gj[fg_mask_inboxes] + all_gi = all_gi[fg_mask_inboxes] + all_anch = all_anch[fg_mask_inboxes] + + this_target = this_target[matched_gt_inds] + + for i in range(nl): + layer_idx = from_which_layer == i + matching_bs[i].append(all_b[layer_idx]) + matching_as[i].append(all_a[layer_idx]) + matching_gjs[i].append(all_gj[layer_idx]) + matching_gis[i].append(all_gi[layer_idx]) + matching_targets[i].append(this_target[layer_idx]) + matching_anchs[i].append(all_anch[layer_idx]) + + for i in range(nl): + if matching_targets[i] != []: + matching_bs[i] = torch.cat(matching_bs[i], dim=0) + matching_as[i] = torch.cat(matching_as[i], dim=0) + matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) + matching_gis[i] = torch.cat(matching_gis[i], dim=0) + matching_targets[i] = torch.cat(matching_targets[i], dim=0) + matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) + else: + matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + + return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs + + def build_targets2(self, p, targets, imgs): + + indices, anch = self.find_5_positive(p, targets) + + matching_bs = [[] for pp in p] + matching_as = [[] for pp in p] + matching_gjs = [[] for pp in p] + matching_gis = [[] for pp in p] + matching_targets = [[] for pp in p] + matching_anchs = [[] for pp in p] + + nl = len(p) + + for batch_idx in range(p[0].shape[0]): + + b_idx = targets[:, 0]==batch_idx + this_target = targets[b_idx] + if this_target.shape[0] == 0: + continue + + txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1] + txyxy = xywh2xyxy(txywh) + + pxyxys = [] + p_cls = [] + p_obj = [] + from_which_layer = [] + all_b = [] + all_a = [] + all_gj = [] + all_gi = [] + all_anch = [] + + for i, pi in enumerate(p): + + b, a, gj, gi = indices[i] + idx = (b == batch_idx) + b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] + all_b.append(b) + all_a.append(a) + all_gj.append(gj) + all_gi.append(gi) + all_anch.append(anch[i][idx]) + from_which_layer.append(torch.ones(size=(len(b),)) * i) + + fg_pred = pi[b, a, gj, gi] + p_obj.append(fg_pred[:, 4:5]) + p_cls.append(fg_pred[:, 5:]) + + grid = torch.stack([gi, gj], dim=1) + pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8. + #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i] + pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8. + pxywh = torch.cat([pxy, pwh], dim=-1) + pxyxy = xywh2xyxy(pxywh) + pxyxys.append(pxyxy) + + pxyxys = torch.cat(pxyxys, dim=0) + if pxyxys.shape[0] == 0: + continue + p_obj = torch.cat(p_obj, dim=0) + p_cls = torch.cat(p_cls, dim=0) + from_which_layer = torch.cat(from_which_layer, dim=0) + all_b = torch.cat(all_b, dim=0) + all_a = torch.cat(all_a, dim=0) + all_gj = torch.cat(all_gj, dim=0) + all_gi = torch.cat(all_gi, dim=0) + all_anch = torch.cat(all_anch, dim=0) + + pair_wise_iou = box_iou(txyxy, pxyxys) + + pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8) + + top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1) + dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1) + + gt_cls_per_image = ( + F.one_hot(this_target[:, 1].to(torch.int64), self.nc) + .float() + .unsqueeze(1) + .repeat(1, pxyxys.shape[0], 1) + ) + + num_gt = this_target.shape[0] + cls_preds_ = ( + p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_() + ) + + y = cls_preds_.sqrt_() + pair_wise_cls_loss = F.binary_cross_entropy_with_logits( + torch.log(y/(1-y)) , gt_cls_per_image, reduction="none" + ).sum(-1) + del cls_preds_ + + cost = ( + pair_wise_cls_loss + + 3.0 * pair_wise_iou_loss + ) + + matching_matrix = torch.zeros_like(cost) + + for gt_idx in range(num_gt): + _, pos_idx = torch.topk( + cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False + ) + matching_matrix[gt_idx][pos_idx] = 1.0 + + del top_k, dynamic_ks + anchor_matching_gt = matching_matrix.sum(0) + if (anchor_matching_gt > 1).sum() > 0: + _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0) + matching_matrix[:, anchor_matching_gt > 1] *= 0.0 + matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0 + fg_mask_inboxes = matching_matrix.sum(0) > 0.0 + matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0) + + from_which_layer = from_which_layer[fg_mask_inboxes] + all_b = all_b[fg_mask_inboxes] + all_a = all_a[fg_mask_inboxes] + all_gj = all_gj[fg_mask_inboxes] + all_gi = all_gi[fg_mask_inboxes] + all_anch = all_anch[fg_mask_inboxes] + + this_target = this_target[matched_gt_inds] + + for i in range(nl): + layer_idx = from_which_layer == i + matching_bs[i].append(all_b[layer_idx]) + matching_as[i].append(all_a[layer_idx]) + matching_gjs[i].append(all_gj[layer_idx]) + matching_gis[i].append(all_gi[layer_idx]) + matching_targets[i].append(this_target[layer_idx]) + matching_anchs[i].append(all_anch[layer_idx]) + + for i in range(nl): + if matching_targets[i] != []: + matching_bs[i] = torch.cat(matching_bs[i], dim=0) + matching_as[i] = torch.cat(matching_as[i], dim=0) + matching_gjs[i] = torch.cat(matching_gjs[i], dim=0) + matching_gis[i] = torch.cat(matching_gis[i], dim=0) + matching_targets[i] = torch.cat(matching_targets[i], dim=0) + matching_anchs[i] = torch.cat(matching_anchs[i], dim=0) + else: + matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64) + + return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs + + def find_5_positive(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + indices, anch = [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 1.0 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + anch.append(anchors[a]) # anchors + + return indices, anch + + def find_3_positive(self, p, targets): + # Build targets for compute_loss(), input targets(image,class,x,y,w,h) + na, nt = self.na, targets.shape[0] # number of anchors, targets + indices, anch = [], [] + gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain + ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt) + targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices + + g = 0.5 # bias + off = torch.tensor([[0, 0], + [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], device=targets.device).float() * g # offsets + + for i in range(self.nl): + anchors = self.anchors[i] + gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain + + # Match targets to anchors + t = targets * gain + if nt: + # Matches + r = t[:, :, 4:6] / anchors[:, None] # wh ratio + j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) + t = t[j] # filter + + # Offsets + gxy = t[:, 2:4] # grid xy + gxi = gain[[2, 3]] - gxy # inverse + j, k = ((gxy % 1. < g) & (gxy > 1.)).T + l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j = torch.stack((torch.ones_like(j), j, k, l, m)) + t = t.repeat((5, 1, 1))[j] + offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] + else: + t = targets[0] + offsets = 0 + + # Define + b, c = t[:, :2].long().T # image, class + gxy = t[:, 2:4] # grid xy + gwh = t[:, 4:6] # grid wh + gij = (gxy - offsets).long() + gi, gj = gij.T # grid xy indices + + # Append + a = t[:, 6].long() # anchor indices + indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices + anch.append(anchors[a]) # anchors + + return indices, anch diff --git a/src/train_utils/train_models/models/yolov7/utils/metrics.py b/src/train_utils/train_models/models/yolov7/utils/metrics.py new file mode 100644 index 0000000..6d2f536 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/metrics.py @@ -0,0 +1,227 @@ +# Model validation metrics + +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import torch + +from . import general + + +def fitness(x): + # Model fitness as a weighted combination of metrics + w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95] + return (x[:, :4] * w).sum(1) + + +def ap_per_class(tp, conf, pred_cls, target_cls, v5_metric=False, plot=False, save_dir='.', names=()): + """ Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. + # Arguments + tp: True positives (nparray, nx1 or nx10). + conf: Objectness value from 0-1 (nparray). + pred_cls: Predicted object classes (nparray). + target_cls: True object classes (nparray). + plot: Plot precision-recall curve at mAP@0.5 + save_dir: Plot save directory + # Returns + The average precision as computed in py-faster-rcnn. + """ + + # Sort by objectness + i = np.argsort(-conf) + tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] + + # Find unique classes + unique_classes = np.unique(target_cls) + nc = unique_classes.shape[0] # number of classes, number of detections + + # Create Precision-Recall curve and compute AP for each class + px, py = np.linspace(0, 1, 1000), [] # for plotting + ap, p, r = np.zeros((nc, tp.shape[1])), np.zeros((nc, 1000)), np.zeros((nc, 1000)) + for ci, c in enumerate(unique_classes): + i = pred_cls == c + n_l = (target_cls == c).sum() # number of labels + n_p = i.sum() # number of predictions + + if n_p == 0 or n_l == 0: + continue + else: + # Accumulate FPs and TPs + fpc = (1 - tp[i]).cumsum(0) + tpc = tp[i].cumsum(0) + + # Recall + recall = tpc / (n_l + 1e-16) # recall curve + r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases + + # Precision + precision = tpc / (tpc + fpc) # precision curve + p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score + + # AP from recall-precision curve + for j in range(tp.shape[1]): + ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j], v5_metric=v5_metric) + if plot and j == 0: + py.append(np.interp(px, mrec, mpre)) # precision at mAP@0.5 + + # Compute F1 (harmonic mean of precision and recall) + f1 = 2 * p * r / (p + r + 1e-16) + if plot: + plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) + plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') + plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') + plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + + i = f1.mean(0).argmax() # max F1 index + return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + + +def compute_ap(recall, precision, v5_metric=False): + """ Compute the average precision, given the recall and precision curves + # Arguments + recall: The recall curve (list) + precision: The precision curve (list) + v5_metric: Assume maximum recall to be 1.0, as in YOLOv5, MMDetetion etc. + # Returns + Average precision, precision curve, recall curve + """ + + # Append sentinel values to beginning and end + if v5_metric: # New YOLOv5 metric, same as MMDetection and Detectron2 repositories + mrec = np.concatenate(([0.], recall, [1.0])) + else: # Old YOLOv5 metric, i.e. default YOLOv7 metric + mrec = np.concatenate(([0.], recall, [recall[-1] + 0.01])) + mpre = np.concatenate(([1.], precision, [0.])) + + # Compute the precision envelope + mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) + + # Integrate area under curve + method = 'interp' # methods: 'continuous', 'interp' + if method == 'interp': + x = np.linspace(0, 1, 101) # 101-point interp (COCO) + ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate + else: # 'continuous' + i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve + + return ap, mpre, mrec + + +class ConfusionMatrix: + # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix + def __init__(self, nc, conf=0.25, iou_thres=0.45): + self.matrix = np.zeros((nc + 1, nc + 1)) + self.nc = nc # number of classes + self.conf = conf + self.iou_thres = iou_thres + + def process_batch(self, detections, labels): + """ + Return intersection-over-union (Jaccard index) of boxes. + Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Arguments: + detections (Array[N, 6]), x1, y1, x2, y2, conf, class + labels (Array[M, 5]), class, x1, y1, x2, y2 + Returns: + None, updates confusion matrix accordingly + """ + detections = detections[detections[:, 4] > self.conf] + gt_classes = labels[:, 0].int() + detection_classes = detections[:, 5].int() + iou = general.box_iou(labels[:, 1:], detections[:, :4]) + + x = torch.where(iou > self.iou_thres) + if x[0].shape[0]: + matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() + if x[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + else: + matches = np.zeros((0, 3)) + + n = matches.shape[0] > 0 + m0, m1, _ = matches.transpose().astype(np.int16) + for i, gc in enumerate(gt_classes): + j = m0 == i + if n and sum(j) == 1: + self.matrix[gc, detection_classes[m1[j]]] += 1 # correct + else: + self.matrix[self.nc, gc] += 1 # background FP + + if n: + for i, dc in enumerate(detection_classes): + if not any(m1 == i): + self.matrix[dc, self.nc] += 1 # background FN + + def matrix(self): + return self.matrix + + def plot(self, save_dir='', names=()): + try: + import seaborn as sn + + array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize + array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) + + fig = plt.figure(figsize=(12, 9), tight_layout=True) + sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size + labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels + sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, + xticklabels=names + ['background FP'] if labels else "auto", + yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel('True') + fig.axes[0].set_ylabel('Predicted') + fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + except Exception as e: + pass + + def print(self): + for i in range(self.nc + 1): + print(' '.join(map(str, self.matrix[i]))) + + +# Plots ---------------------------------------------------------------------------------------------------------------- + +def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + # Precision-recall curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + py = np.stack(py, axis=1) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py.T): + ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + else: + ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + + ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) + ax.set_xlabel('Recall') + ax.set_ylabel('Precision') + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) + + +def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): + # Metric-confidence curve + fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) + + if 0 < len(names) < 21: # display per-class legend if < 21 classes + for i, y in enumerate(py): + ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + else: + ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + + y = py.mean(0) + ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_xlim(0, 1) + ax.set_ylim(0, 1) + plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") + fig.savefig(Path(save_dir), dpi=250) diff --git a/src/train_utils/train_models/models/yolov7/utils/plots.py b/src/train_utils/train_models/models/yolov7/utils/plots.py new file mode 100644 index 0000000..fdd8d0e --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/plots.py @@ -0,0 +1,489 @@ +# Plotting utils + +import glob +import math +import os +import random +from copy import copy +from pathlib import Path + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd +import seaborn as sns +import torch +import yaml +from PIL import Image, ImageDraw, ImageFont +from scipy.signal import butter, filtfilt + +from utils.general import xywh2xyxy, xyxy2xywh +from utils.metrics import fitness + +# Settings +matplotlib.rc('font', **{'size': 11}) +matplotlib.use('Agg') # for writing to files only + + +def color_list(): + # Return first 10 plt colors as (r,g,b) https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb + def hex2rgb(h): + return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + + return [hex2rgb(h) for h in matplotlib.colors.TABLEAU_COLORS.values()] # or BASE_ (8), CSS4_ (148), XKCD_ (949) + + +def hist2d(x, y, n=100): + # 2d histogram used in labels.png and evolve.png + xedges, yedges = np.linspace(x.min(), x.max(), n), np.linspace(y.min(), y.max(), n) + hist, xedges, yedges = np.histogram2d(x, y, (xedges, yedges)) + xidx = np.clip(np.digitize(x, xedges) - 1, 0, hist.shape[0] - 1) + yidx = np.clip(np.digitize(y, yedges) - 1, 0, hist.shape[1] - 1) + return np.log(hist[xidx, yidx]) + + +def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): + # https://stackoverflow.com/questions/28536191/how-to-filter-smooth-with-scipy-numpy + def butter_lowpass(cutoff, fs, order): + nyq = 0.5 * fs + normal_cutoff = cutoff / nyq + return butter(order, normal_cutoff, btype='low', analog=False) + + b, a = butter_lowpass(cutoff, fs, order=order) + return filtfilt(b, a, data) # forward-backward filter + + +def plot_one_box(x, img, color=None, label=None, line_thickness=3): + # Plots one bounding box on image img + tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness + color = color or [random.randint(0, 255) for _ in range(3)] + c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) + cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) + if label: + tf = max(tl - 1, 1) # font thickness + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 + cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled + cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) + + +def plot_one_box_PIL(box, img, color=None, label=None, line_thickness=None): + img = Image.fromarray(img) + draw = ImageDraw.Draw(img) + line_thickness = line_thickness or max(int(min(img.size) / 200), 2) + draw.rectangle(box, width=line_thickness, outline=tuple(color)) # plot + if label: + fontsize = max(round(max(img.size) / 40), 12) + font = ImageFont.truetype("Arial.ttf", fontsize) + txt_width, txt_height = font.getsize(label) + draw.rectangle([box[0], box[1] - txt_height + 4, box[0] + txt_width, box[1]], fill=tuple(color)) + draw.text((box[0], box[1] - txt_height + 1), label, fill=(255, 255, 255), font=font) + return np.asarray(img) + + +def plot_wh_methods(): # from utils.plots import *; plot_wh_methods() + # Compares the two methods for width-height anchor multiplication + # https://github.com/ultralytics/yolov3/issues/168 + x = np.arange(-4.0, 4.0, .1) + ya = np.exp(x) + yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2 + + fig = plt.figure(figsize=(6, 3), tight_layout=True) + plt.plot(x, ya, '.-', label='YOLOv3') + plt.plot(x, yb ** 2, '.-', label='YOLOR ^2') + plt.plot(x, yb ** 1.6, '.-', label='YOLOR ^1.6') + plt.xlim(left=-4, right=4) + plt.ylim(bottom=0, top=6) + plt.xlabel('input') + plt.ylabel('output') + plt.grid() + plt.legend() + fig.savefig('comparison.png', dpi=200) + + +def output_to_target(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + for *box, conf, cls in o.cpu().numpy(): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf]) + return np.array(targets) + + +def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16): + # Plot image grid with labels + + if isinstance(images, torch.Tensor): + images = images.cpu().float().numpy() + if isinstance(targets, torch.Tensor): + targets = targets.cpu().numpy() + + # un-normalise + if np.max(images[0]) <= 1: + images *= 255 + + tl = 3 # line thickness + tf = max(tl - 1, 1) # font thickness + bs, _, h, w = images.shape # batch size, _, height, width + bs = min(bs, max_subplots) # limit plot images + ns = np.ceil(bs ** 0.5) # number of subplots (square) + + # Check if we should resize + scale_factor = max_size / max(h, w) + if scale_factor < 1: + h = math.ceil(scale_factor * h) + w = math.ceil(scale_factor * w) + + colors = color_list() # list of colors + mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init + for i, img in enumerate(images): + if i == max_subplots: # if last batch has fewer images than we expect + break + + block_x = int(w * (i // ns)) + block_y = int(h * (i % ns)) + + img = img.transpose(1, 2, 0) + if scale_factor < 1: + img = cv2.resize(img, (w, h)) + + mosaic[block_y:block_y + h, block_x:block_x + w, :] = img + if len(targets) > 0: + image_targets = targets[targets[:, 0] == i] + boxes = xywh2xyxy(image_targets[:, 2:6]).T + classes = image_targets[:, 1].astype('int') + labels = image_targets.shape[1] == 6 # labels if no conf column + conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred) + + if boxes.shape[1]: + if boxes.max() <= 1.01: # if normalized with tolerance 0.01 + boxes[[0, 2]] *= w # scale to pixels + boxes[[1, 3]] *= h + elif scale_factor < 1: # absolute coords need scale if image scales + boxes *= scale_factor + boxes[[0, 2]] += block_x + boxes[[1, 3]] += block_y + for j, box in enumerate(boxes.T): + cls = int(classes[j]) + color = colors[cls % len(colors)] + cls = names[cls] if names else cls + if labels or conf[j] > 0.25: # 0.25 conf thresh + label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j]) + plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl) + + # Draw image filename labels + if paths: + label = Path(paths[i]).name[:40] # trim to 40 char + t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] + cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf, + lineType=cv2.LINE_AA) + + # Image border + cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3) + + if fname: + r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size + mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA) + # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save + Image.fromarray(mosaic).save(fname) # PIL save + return mosaic + + +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): + # Plot LR simulating training for full epochs + optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals + y = [] + for _ in range(epochs): + scheduler.step() + y.append(optimizer.param_groups[0]['lr']) + plt.plot(y, '.-', label='LR') + plt.xlabel('epoch') + plt.ylabel('LR') + plt.grid() + plt.xlim(0, epochs) + plt.ylim(0) + plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.close() + + +def plot_test_txt(): # from utils.plots import *; plot_test() + # Plot test.txt histograms + x = np.loadtxt('test.txt', dtype=np.float32) + box = xyxy2xywh(x[:, :4]) + cx, cy = box[:, 0], box[:, 1] + + fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) + ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) + ax.set_aspect('equal') + plt.savefig('hist2d.png', dpi=300) + + fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) + ax[0].hist(cx, bins=600) + ax[1].hist(cy, bins=600) + plt.savefig('hist1d.png', dpi=200) + + +def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() + # Plot targets.txt histograms + x = np.loadtxt('targets.txt', dtype=np.float32).T + s = ['x targets', 'y targets', 'width targets', 'height targets'] + fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) + ax = ax.ravel() + for i in range(4): + ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].legend() + ax[i].set_title(s[i]) + plt.savefig('targets.jpg', dpi=200) + + +def plot_study_txt(path='', x=None): # from utils.plots import *; plot_study_txt() + # Plot study.txt generated by test.py + fig, ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True) + # ax = ax.ravel() + + fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) + # for f in [Path(path) / f'study_coco_{x}.txt' for x in ['yolor-p6', 'yolor-w6', 'yolor-e6', 'yolor-d6']]: + for f in sorted(Path(path).glob('study*.txt')): + y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T + x = np.arange(y.shape[1]) if x is None else np.array(x) + s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_inference (ms/img)', 't_NMS (ms/img)', 't_total (ms/img)'] + # for i in range(7): + # ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + # ax[i].set_title(s[i]) + + j = y[3].argmax() + 1 + ax2.plot(y[6, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, + label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) + + ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + + ax2.grid(alpha=0.2) + ax2.set_yticks(np.arange(20, 60, 5)) + ax2.set_xlim(0, 57) + ax2.set_ylim(30, 55) + ax2.set_xlabel('GPU Speed (ms/img)') + ax2.set_ylabel('COCO AP val') + ax2.legend(loc='lower right') + plt.savefig(str(Path(path).name) + '.png', dpi=300) + + +def plot_labels(labels, names=(), save_dir=Path(''), loggers=None): + # plot dataset labels + print('Plotting labels... ') + c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes + nc = int(c.max() + 1) # number of classes + colors = color_list() + x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + + # seaborn correlogram + sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + plt.close() + + # matplotlib labels + matplotlib.use('svg') # faster + ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() + ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + ax[0].set_ylabel('instances') + if 0 < len(names) < 30: + ax[0].set_xticks(range(len(names))) + ax[0].set_xticklabels(names, rotation=90, fontsize=10) + else: + ax[0].set_xlabel('classes') + sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) + sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + + # rectangles + labels[:, 1:3] = 0.5 # center + labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000 + img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255) + for cls, *box in labels[:1000]: + ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot + ax[1].imshow(img) + ax[1].axis('off') + + for a in [0, 1, 2, 3]: + for s in ['top', 'right', 'left', 'bottom']: + ax[a].spines[s].set_visible(False) + + plt.savefig(save_dir / 'labels.jpg', dpi=200) + matplotlib.use('Agg') + plt.close() + + # loggers + for k, v in loggers.items() or {}: + if k == 'wandb' and v: + v.log({"Labels": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False) + + +def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution() + # Plot hyperparameter evolution results in evolve.txt + with open(yaml_file) as f: + hyp = yaml.load(f, Loader=yaml.SafeLoader) + x = np.loadtxt('evolve.txt', ndmin=2) + f = fitness(x) + # weights = (f - f.min()) ** 2 # for weighted results + plt.figure(figsize=(10, 12), tight_layout=True) + matplotlib.rc('font', **{'size': 8}) + for i, (k, v) in enumerate(hyp.items()): + y = x[:, i + 7] + # mu = (y * weights).sum() / weights.sum() # best weighted result + mu = y[f.argmax()] # best single result + plt.subplot(6, 5, i + 1) + plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none') + plt.plot(mu, f.max(), 'k+', markersize=15) + plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + if i % 5 != 0: + plt.yticks([]) + print('%15s: %.3g' % (k, mu)) + plt.savefig('evolve.png', dpi=200) + print('\nPlot saved as evolve.png') + + +def profile_idetection(start=0, stop=0, labels=(), save_dir=''): + # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() + ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() + s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] + files = list(Path(save_dir).glob('frames*.txt')) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows + n = results.shape[1] # number of rows + x = np.arange(start, min(stop, n) if stop else n) + results = results[:, x] + t = (results[0] - results[0].min()) # set t0=0s + results[0] = x + for i, a in enumerate(ax): + if i < len(results): + label = labels[fi] if len(labels) else f.stem.replace('frames_', '') + a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + a.set_title(s[i]) + a.set_xlabel('time (s)') + # if fi == len(files) - 1: + # a.set_ylim(bottom=0) + for side in ['top', 'right']: + a.spines[side].set_visible(False) + else: + a.remove() + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + + +def plot_results_overlay(start=0, stop=0): # from utils.plots import *; plot_results_overlay() + # Plot training 'results*.txt', overlaying train and val losses + s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'mAP@0.5:0.95'] # legends + t = ['Box', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles + for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')): + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + fig, ax = plt.subplots(1, 5, figsize=(14, 3.5), tight_layout=True) + ax = ax.ravel() + for i in range(5): + for j in [i, i + 5]: + y = results[j, x] + ax[i].plot(x, y, marker='.', label=s[j]) + # y_smooth = butter_lowpass_filtfilt(y) + # ax[i].plot(x, np.gradient(y_smooth), marker='.', label=s[j]) + + ax[i].set_title(t[i]) + ax[i].legend() + ax[i].set_ylabel(f) if i == 0 else None # add filename + fig.savefig(f.replace('.txt', '.png'), dpi=200) + + +def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''): + # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp') + fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) + ax = ax.ravel() + s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall', + 'val Box', 'val Objectness', 'val Classification', 'mAP@0.5', 'mAP@0.5:0.95'] + if bucket: + # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id] + files = ['results%g.txt' % x for x in id] + c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id) + os.system(c) + else: + files = list(Path(save_dir).glob('results*.txt')) + assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir) + for fi, f in enumerate(files): + try: + results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T + n = results.shape[1] # number of rows + x = range(start, min(stop, n) if stop else n) + for i in range(10): + y = results[i, x] + if i in [0, 1, 2, 5, 6, 7]: + y[y == 0] = np.nan # don't show zero loss values + # y /= y[0] # normalize + label = labels[fi] if len(labels) else f.stem + ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8) + ax[i].set_title(s[i]) + # if i in [5, 6, 7]: # share train and val loss y axes + # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) + except Exception as e: + print('Warning: Plotting error for %s; %s' % (f, e)) + + ax[1].legend() + fig.savefig(Path(save_dir) / 'results.png', dpi=200) + + +def output_to_keypoint(output): + # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] + targets = [] + for i, o in enumerate(output): + kpts = o[:,6:] + o = o[:,:6] + for index, (*box, conf, cls) in enumerate(o.detach().cpu().numpy()): + targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf, *list(kpts.detach().cpu().numpy()[index])]) + return np.array(targets) + + +def plot_skeleton_kpts(im, kpts, steps, orig_shape=None): + #Plot the skeleton and keypointsfor coco datatset + palette = np.array([[255, 128, 0], [255, 153, 51], [255, 178, 102], + [230, 230, 0], [255, 153, 255], [153, 204, 255], + [255, 102, 255], [255, 51, 255], [102, 178, 255], + [51, 153, 255], [255, 153, 153], [255, 102, 102], + [255, 51, 51], [153, 255, 153], [102, 255, 102], + [51, 255, 51], [0, 255, 0], [0, 0, 255], [255, 0, 0], + [255, 255, 255]]) + + skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], + [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], + [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] + + pose_limb_color = palette[[9, 9, 9, 9, 7, 7, 7, 0, 0, 0, 0, 0, 16, 16, 16, 16, 16, 16, 16]] + pose_kpt_color = palette[[16, 16, 16, 16, 16, 0, 0, 0, 0, 0, 0, 9, 9, 9, 9, 9, 9]] + radius = 5 + num_kpts = len(kpts) // steps + + for kid in range(num_kpts): + r, g, b = pose_kpt_color[kid] + x_coord, y_coord = kpts[steps * kid], kpts[steps * kid + 1] + if not (x_coord % 640 == 0 or y_coord % 640 == 0): + if steps == 3: + conf = kpts[steps * kid + 2] + if conf < 0.5: + continue + cv2.circle(im, (int(x_coord), int(y_coord)), radius, (int(r), int(g), int(b)), -1) + + for sk_id, sk in enumerate(skeleton): + r, g, b = pose_limb_color[sk_id] + pos1 = (int(kpts[(sk[0]-1)*steps]), int(kpts[(sk[0]-1)*steps+1])) + pos2 = (int(kpts[(sk[1]-1)*steps]), int(kpts[(sk[1]-1)*steps+1])) + if steps == 3: + conf1 = kpts[(sk[0]-1)*steps+2] + conf2 = kpts[(sk[1]-1)*steps+2] + if conf1<0.5 or conf2<0.5: + continue + if pos1[0]%640 == 0 or pos1[1]%640==0 or pos1[0]<0 or pos1[1]<0: + continue + if pos2[0] % 640 == 0 or pos2[1] % 640 == 0 or pos2[0]<0 or pos2[1]<0: + continue + cv2.line(im, pos1, pos2, (int(r), int(g), int(b)), thickness=2) diff --git a/src/train_utils/train_models/models/yolov7/utils/torch_utils.py b/src/train_utils/train_models/models/yolov7/utils/torch_utils.py new file mode 100644 index 0000000..1e631b5 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/torch_utils.py @@ -0,0 +1,374 @@ +# YOLOR PyTorch utils + +import datetime +import logging +import math +import os +import platform +import subprocess +import time +from contextlib import contextmanager +from copy import deepcopy +from pathlib import Path + +import torch +import torch.backends.cudnn as cudnn +import torch.nn as nn +import torch.nn.functional as F +import torchvision + +try: + import thop # for FLOPS computation +except ImportError: + thop = None +logger = logging.getLogger(__name__) + + +@contextmanager +def torch_distributed_zero_first(local_rank: int): + """ + Decorator to make all processes in distributed training wait for each local_master to do something. + """ + if local_rank not in [-1, 0]: + torch.distributed.barrier() + yield + if local_rank == 0: + torch.distributed.barrier() + + +def init_torch_seeds(seed=0): + # Speed-reproducibility tradeoff https://pytorch.org/docs/stable/notes/randomness.html + torch.manual_seed(seed) + if seed == 0: # slower, more reproducible + cudnn.benchmark, cudnn.deterministic = False, True + else: # faster, less reproducible + cudnn.benchmark, cudnn.deterministic = True, False + + +def date_modified(path=__file__): + # return human-readable file modification date, i.e. '2021-3-26' + t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) + return f'{t.year}-{t.month}-{t.day}' + + +def git_describe(path=Path(__file__).parent): # path must be a directory + # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe + s = f'git -C {path} describe --tags --long --always' + try: + return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] + except subprocess.CalledProcessError as e: + return '' # not a git repository + + +def select_device(device='', batch_size=None): + # device = 'cpu' or '0' or '0,1,2,3' + s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string + cpu = device.lower() == 'cpu' + if cpu: + os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + elif device: # non-cpu device requested + os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable + assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + + cuda = not cpu and torch.cuda.is_available() + if cuda: + n = torch.cuda.device_count() + if n > 1 and batch_size: # check that batch_size is compatible with device_count + assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' + space = ' ' * len(s) + for i, d in enumerate(device.split(',') if device else range(n)): + p = torch.cuda.get_device_properties(i) + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB + else: + s += 'CPU\n' + + logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe + return torch.device('cuda:0' if cuda else 'cpu') + + +def time_synchronized(): + # pytorch-accurate time + if torch.cuda.is_available(): + torch.cuda.synchronize() + return time.time() + + +def profile(x, ops, n=100, device=None): + # profile a pytorch module or list of modules. Example usage: + # x = torch.randn(16, 3, 640, 640) # input + # m1 = lambda x: x * torch.sigmoid(x) + # m2 = nn.SiLU() + # profile(x, [m1, m2], n=100) # profile speed over 100 iterations + + device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + x = x.to(device) + x.requires_grad = True + print(torch.__version__, device.type, torch.cuda.get_device_properties(0) if device.type == 'cuda' else '') + print(f"\n{'Params':>12s}{'GFLOPS':>12s}{'forward (ms)':>16s}{'backward (ms)':>16s}{'input':>24s}{'output':>24s}") + for m in ops if isinstance(ops, list) else [ops]: + m = m.to(device) if hasattr(m, 'to') else m # device + m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m # type + dtf, dtb, t = 0., 0., [0., 0., 0.] # dt forward, backward + try: + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPS + except: + flops = 0 + + for _ in range(n): + t[0] = time_synchronized() + y = m(x) + t[1] = time_synchronized() + try: + _ = y.sum().backward() + t[2] = time_synchronized() + except: # no backward method + t[2] = float('nan') + dtf += (t[1] - t[0]) * 1000 / n # ms per op forward + dtb += (t[2] - t[1]) * 1000 / n # ms per op backward + + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters + print(f'{p:12}{flops:12.4g}{dtf:16.4g}{dtb:16.4g}{str(s_in):>24s}{str(s_out):>24s}') + + +def is_parallel(model): + return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel) + + +def intersect_dicts(da, db, exclude=()): + # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values + return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape} + + +def initialize_weights(model): + for m in model.modules(): + t = type(m) + if t is nn.Conv2d: + pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif t is nn.BatchNorm2d: + m.eps = 1e-3 + m.momentum = 0.03 + elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6]: + m.inplace = True + + +def find_modules(model, mclass=nn.Conv2d): + # Finds layer indices matching module class 'mclass' + return [i for i, m in enumerate(model.module_list) if isinstance(m, mclass)] + + +def sparsity(model): + # Return global model sparsity + a, b = 0., 0. + for p in model.parameters(): + a += p.numel() + b += (p == 0).sum() + return b / a + + +def prune(model, amount=0.3): + # Prune model to requested global sparsity + import torch.nn.utils.prune as prune + print('Pruning model... ', end='') + for name, m in model.named_modules(): + if isinstance(m, nn.Conv2d): + prune.l1_unstructured(m, name='weight', amount=amount) # prune + prune.remove(m, 'weight') # make permanent + print(' %.3g global sparsity' % sparsity(model)) + + +def fuse_conv_and_bn(conv, bn): + # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ + fusedconv = nn.Conv2d(conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True).requires_grad_(False).to(conv.weight.device) + + # prepare filters + w_conv = conv.weight.clone().view(conv.out_channels, -1) + w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var))) + fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape)) + + # prepare spatial bias + b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias + b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps)) + fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn) + + return fusedconv + + +def model_info(model, verbose=False, img_size=640): + # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320] + n_p = sum(x.numel() for x in model.parameters()) # number parameters + n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients + if verbose: + print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + for i, (name, p) in enumerate(model.named_parameters()): + name = name.replace('module_list.', '') + print('%5g %40s %9s %12g %20s %10.3g %10.3g' % + (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + + try: # FLOPS + from thop import profile + stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 + img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPS + img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float + fs = ', %.1f GFLOPS' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPS + except (ImportError, Exception): + fs = '' + + logger.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") + + +def load_classifier(name='resnet101', n=2): + # Loads a pretrained model reshaped to n-class output + model = torchvision.models.__dict__[name](pretrained=True) + + # ResNet model properties + # input_size = [3, 224, 224] + # input_space = 'RGB' + # input_range = [0, 1] + # mean = [0.485, 0.456, 0.406] + # std = [0.229, 0.224, 0.225] + + # Reshape output to n classes + filters = model.fc.weight.shape[1] + model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True) + model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True) + model.fc.out_features = n + return model + + +def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) + # scales img(bs,3,y,x) by ratio constrained to gs-multiple + if ratio == 1.0: + return img + else: + h, w = img.shape[2:] + s = (int(h * ratio), int(w * ratio)) # new size + img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + if not same_shape: # pad/crop img + h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] + return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean + + +def copy_attr(a, b, include=(), exclude=()): + # Copy attributes from b to a, options to only include [...] and to exclude [...] + for k, v in b.__dict__.items(): + if (len(include) and k not in include) or k.startswith('_') or k in exclude: + continue + else: + setattr(a, k, v) + + +class ModelEMA: + """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models + Keep a moving average of everything in the model state_dict (parameters and buffers). + This is intended to allow functionality like + https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage + A smoothed version of the weights is necessary for some training schemes to perform well. + This class is sensitive where it is initialized in the sequence of model init, + GPU assignment and distributed training wrappers. + """ + + def __init__(self, model, decay=0.9999, updates=0): + # Create EMA + self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA + # if next(model.parameters()).device.type != 'cpu': + # self.ema.half() # FP16 EMA + self.updates = updates # number of EMA updates + self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs) + for p in self.ema.parameters(): + p.requires_grad_(False) + + def update(self, model): + # Update EMA parameters + with torch.no_grad(): + self.updates += 1 + d = self.decay(self.updates) + + msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict + for k, v in self.ema.state_dict().items(): + if v.dtype.is_floating_point: + v *= d + v += (1. - d) * msd[k].detach() + + def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + # Update EMA attributes + copy_attr(self.ema, model, include, exclude) + + +class BatchNormXd(torch.nn.modules.batchnorm._BatchNorm): + def _check_input_dim(self, input): + # The only difference between BatchNorm1d, BatchNorm2d, BatchNorm3d, etc + # is this method that is overwritten by the sub-class + # This original goal of this method was for tensor sanity checks + # If you're ok bypassing those sanity checks (eg. if you trust your inference + # to provide the right dimensional inputs), then you can just use this method + # for easy conversion from SyncBatchNorm + # (unfortunately, SyncBatchNorm does not store the original class - if it did + # we could return the one that was originally created) + return + +def revert_sync_batchnorm(module): + # this is very similar to the function that it is trying to revert: + # https://github.com/pytorch/pytorch/blob/c8b3686a3e4ba63dc59e5dcfe5db3430df256833/torch/nn/modules/batchnorm.py#L679 + module_output = module + if isinstance(module, torch.nn.modules.batchnorm.SyncBatchNorm): + new_cls = BatchNormXd + module_output = BatchNormXd(module.num_features, + module.eps, module.momentum, + module.affine, + module.track_running_stats) + if module.affine: + with torch.no_grad(): + module_output.weight = module.weight + module_output.bias = module.bias + module_output.running_mean = module.running_mean + module_output.running_var = module.running_var + module_output.num_batches_tracked = module.num_batches_tracked + if hasattr(module, "qconfig"): + module_output.qconfig = module.qconfig + for name, child in module.named_children(): + module_output.add_module(name, revert_sync_batchnorm(child)) + del module + return module_output + + +class TracedModel(nn.Module): + + def __init__(self, model=None, device=None, img_size=(640,640)): + super(TracedModel, self).__init__() + + print(" Convert model to Traced-model... ") + self.stride = model.stride + self.names = model.names + self.model = model + + self.model = revert_sync_batchnorm(self.model) + self.model.to('cpu') + self.model.eval() + + self.detect_layer = self.model.model[-1] + self.model.traced = True + + rand_example = torch.rand(1, 3, img_size, img_size) + + traced_script_module = torch.jit.trace(self.model, rand_example, strict=False) + #traced_script_module = torch.jit.script(self.model) + traced_script_module.save("traced_model.pt") + print(" traced_script_module saved! ") + self.model = traced_script_module + self.model.to(device) + self.detect_layer.to(device) + print(" model is traced! \n") + + def forward(self, x, augment=False, profile=False): + out = self.model(x) + out = self.detect_layer(out) + return out \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/utils/wandb_logging/__init__.py b/src/train_utils/train_models/models/yolov7/utils/wandb_logging/__init__.py new file mode 100644 index 0000000..84952a8 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/wandb_logging/__init__.py @@ -0,0 +1 @@ +# init \ No newline at end of file diff --git a/src/train_utils/train_models/models/yolov7/utils/wandb_logging/log_dataset.py b/src/train_utils/train_models/models/yolov7/utils/wandb_logging/log_dataset.py new file mode 100644 index 0000000..74cd6c6 --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/wandb_logging/log_dataset.py @@ -0,0 +1,24 @@ +import argparse + +import yaml + +from wandb_utils import WandbLogger + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def create_dataset_artifact(opt): + with open(opt.data) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--data', type=str, default='data/coco.yaml', help='data.yaml path') + parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') + parser.add_argument('--project', type=str, default='YOLOR', help='name of W&B Project') + opt = parser.parse_args() + opt.resume = False # Explicitly disallow resume check for dataset upload job + + create_dataset_artifact(opt) diff --git a/src/train_utils/train_models/models/yolov7/utils/wandb_logging/wandb_utils.py b/src/train_utils/train_models/models/yolov7/utils/wandb_logging/wandb_utils.py new file mode 100644 index 0000000..aec7c5f --- /dev/null +++ b/src/train_utils/train_models/models/yolov7/utils/wandb_logging/wandb_utils.py @@ -0,0 +1,306 @@ +import json +import sys +from pathlib import Path + +import torch +import yaml +from tqdm import tqdm + +sys.path.append(str(Path(__file__).parent.parent.parent)) # add utils/ to path +from utils.datasets import LoadImagesAndLabels +from utils.datasets import img2label_paths +from utils.general import colorstr, xywh2xyxy, check_dataset + +try: + import wandb + from wandb import init, finish +except ImportError: + wandb = None + +WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' + + +def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): + return from_string[len(prefix):] + + +def check_wandb_config_file(data_config_file): + wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + if Path(wandb_config).is_file(): + return wandb_config + return data_config_file + + +def get_run_info(run_path): + run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX)) + run_id = run_path.stem + project = run_path.parent.stem + model_artifact_name = 'run_' + run_id + '_model' + return run_id, project, model_artifact_name + + +def check_wandb_resume(opt): + process_wandb_config_ddp_mode(opt) if opt.global_rank not in [-1, 0] else None + if isinstance(opt.resume, str): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + if opt.global_rank not in [-1, 0]: # For resuming DDP runs + run_id, project, model_artifact_name = get_run_info(opt.resume) + api = wandb.Api() + artifact = api.artifact(project + '/' + model_artifact_name + ':latest') + modeldir = artifact.download() + opt.weights = str(Path(modeldir) / "last.pt") + return True + return None + + +def process_wandb_config_ddp_mode(opt): + with open(opt.data) as f: + data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict + train_dir, val_dir = None, None + if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_dir = train_artifact.download() + train_path = Path(train_dir) / 'data/images/' + data_dict['train'] = str(train_path) + + if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + api = wandb.Api() + val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_dir = val_artifact.download() + val_path = Path(val_dir) / 'data/images/' + data_dict['val'] = str(val_path) + if train_dir or val_dir: + ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') + with open(ddp_data_path, 'w') as f: + yaml.dump(data_dict, f) + opt.data = ddp_data_path + + +class WandbLogger(): + def __init__(self, opt, name, run_id, data_dict, job_type='Training'): + # Pre-training routine -- + self.job_type = job_type + self.wandb, self.wandb_run, self.data_dict = wandb, None if not wandb else wandb.run, data_dict + # It's more elegant to stick to 1 wandb.init call, but useful config data is overwritten in the WandbLogger's wandb.init call + if isinstance(opt.resume, str): # checks resume from artifact + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + run_id, project, model_artifact_name = get_run_info(opt.resume) + model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name + assert wandb, 'install wandb to resume wandb runs' + # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config + self.wandb_run = wandb.init(id=run_id, project=project, resume='allow') + opt.resume = model_artifact_name + elif self.wandb: + self.wandb_run = wandb.init(config=opt, + resume="allow", + project='YOLOR' if opt.project == 'runs/train' else Path(opt.project).stem, + name=name, + job_type=job_type, + id=run_id) if not wandb.run else wandb.run + if self.wandb_run: + if self.job_type == 'Training': + if not opt.resume: + wandb_data_dict = self.check_and_upload_dataset(opt) if opt.upload_dataset else data_dict + # Info useful for resuming from artifacts + self.wandb_run.config.opt = vars(opt) + self.wandb_run.config.data_dict = wandb_data_dict + self.data_dict = self.setup_training(opt, data_dict) + if self.job_type == 'Dataset Creation': + self.data_dict = self.check_and_upload_dataset(opt) + else: + prefix = colorstr('wandb: ') + print(f"{prefix}Install Weights & Biases for YOLOR logging with 'pip install wandb' (recommended)") + + def check_and_upload_dataset(self, opt): + assert wandb, 'Install wandb to upload dataset' + check_dataset(self.data_dict) + config_path = self.log_dataset_artifact(opt.data, + opt.single_cls, + 'YOLOR' if opt.project == 'runs/train' else Path(opt.project).stem) + print("Created dataset config file ", config_path) + with open(config_path) as f: + wandb_data_dict = yaml.load(f, Loader=yaml.SafeLoader) + return wandb_data_dict + + def setup_training(self, opt, data_dict): + self.log_dict, self.current_epoch, self.log_imgs = {}, 0, 16 # Logging Constants + self.bbox_interval = opt.bbox_interval + if isinstance(opt.resume, str): + modeldir, _ = self.download_model_artifact(opt) + if modeldir: + self.weights = Path(modeldir) / "last.pt" + config = self.wandb_run.config + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( + self.weights), config.save_period, config.total_batch_size, config.bbox_interval, config.epochs, \ + config.opt['hyp'] + data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume + if 'val_artifact' not in self.__dict__: # If --upload_dataset is set, use the existing artifact, don't download + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), + opt.artifact_alias) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), + opt.artifact_alias) + self.result_artifact, self.result_table, self.val_table, self.weights = None, None, None, None + if self.train_artifact_path is not None: + train_path = Path(self.train_artifact_path) / 'data/images/' + data_dict['train'] = str(train_path) + if self.val_artifact_path is not None: + val_path = Path(self.val_artifact_path) / 'data/images/' + data_dict['val'] = str(val_path) + self.val_table = self.val_artifact.get("val") + self.map_val_table_path() + if self.val_artifact is not None: + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + if opt.bbox_interval == -1: + self.bbox_interval = opt.bbox_interval = (opt.epochs // 10) if opt.epochs > 10 else 1 + return data_dict + + def download_dataset_artifact(self, path, alias): + if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): + dataset_artifact = wandb.use_artifact(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + datadir = dataset_artifact.download() + return datadir, dataset_artifact + return None, None + + def download_model_artifact(self, opt): + if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): + model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") + assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + modeldir = model_artifact.download() + epochs_trained = model_artifact.metadata.get('epochs_trained') + total_epochs = model_artifact.metadata.get('total_epochs') + assert epochs_trained < total_epochs, 'training to %g epochs is finished, nothing to resume.' % ( + total_epochs) + return modeldir, model_artifact + return None, None + + def log_model(self, path, opt, epoch, fitness_score, best_model=False): + model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ + 'original_url': str(path), + 'epochs_trained': epoch + 1, + 'save period': opt.save_period, + 'project': opt.project, + 'total_epochs': opt.epochs, + 'fitness_score': fitness_score + }) + model_artifact.add_file(str(path / 'last.pt'), name='last.pt') + wandb.log_artifact(model_artifact, + aliases=['latest', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + print("Saving model artifact on epoch ", epoch + 1) + + def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): + with open(data_file) as f: + data = yaml.load(f, Loader=yaml.SafeLoader) # data dict + nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + names = {k: v for k, v in enumerate(names)} # to index dictionary + self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['train']), names, name='train') if data.get('train') else None + self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( + data['val']), names, name='val') if data.get('val') else None + if data.get('train'): + data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') + if data.get('val'): + data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + path = data_file if overwrite_config else '_wandb.'.join(data_file.rsplit('.', 1)) # updated data.yaml path + data.pop('download', None) + with open(path, 'w') as f: + yaml.dump(data, f) + + if self.job_type == 'Training': # builds correct artifact pipeline graph + self.wandb_run.use_artifact(self.val_artifact) + self.wandb_run.use_artifact(self.train_artifact) + self.val_artifact.wait() + self.val_table = self.val_artifact.get('val') + self.map_val_table_path() + else: + self.wandb_run.log_artifact(self.train_artifact) + self.wandb_run.log_artifact(self.val_artifact) + return path + + def map_val_table_path(self): + self.val_table_map = {} + print("Mapping dataset") + for i, data in enumerate(tqdm(self.val_table.data)): + self.val_table_map[data[3]] = data[0] + + def create_dataset_table(self, dataset, class_to_id, name='dataset'): + # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + artifact = wandb.Artifact(name=name, type="dataset") + img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None + img_files = tqdm(dataset.img_files) if not img_files else img_files + for img_file in img_files: + if Path(img_file).is_dir(): + artifact.add_dir(img_file, name='data/images') + labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) + artifact.add_dir(labels_path, name='data/labels') + else: + artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + label_file = Path(img2label_paths([img_file])[0]) + artifact.add_file(str(label_file), + name='data/labels/' + label_file.name) if label_file.exists() else None + table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): + height, width = shapes[0] + labels[:, 2:] = (xywh2xyxy(labels[:, 2:].view(-1, 4))) * torch.Tensor([width, height, width, height]) + box_data, img_classes = [], {} + for cls, *xyxy in labels[:, 1:].tolist(): + cls = int(cls) + box_data.append({"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": cls, + "box_caption": "%s" % (class_to_id[cls]), + "scores": {"acc": 1}, + "domain": "pixel"}) + img_classes[cls] = class_to_id[cls] + boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space + table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), json.dumps(img_classes), + Path(paths).name) + artifact.add(table, name) + return artifact + + def log_training_progress(self, predn, path, names): + if self.val_table and self.result_table: + class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + box_data = [] + total_conf = 0 + for *xyxy, conf, cls in predn.tolist(): + if conf >= 0.25: + box_data.append( + {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": "%s %.3f" % (names[cls], conf), + "scores": {"class_score": conf}, + "domain": "pixel"}) + total_conf = total_conf + conf + boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space + id = self.val_table_map[Path(path).name] + self.result_table.add_data(self.current_epoch, + id, + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)) + ) + + def log(self, log_dict): + if self.wandb_run: + for key, value in log_dict.items(): + self.log_dict[key] = value + + def end_epoch(self, best_result=False): + if self.wandb_run: + wandb.log(self.log_dict) + self.log_dict = {} + if self.result_artifact: + train_results = wandb.JoinedTable(self.val_table, self.result_table, "id") + self.result_artifact.add(train_results, 'result') + wandb.log_artifact(self.result_artifact, aliases=['latest', 'epoch ' + str(self.current_epoch), + ('best' if best_result else '')]) + self.result_table = wandb.Table(["epoch", "id", "prediction", "avg_confidence"]) + self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") + + def finish_run(self): + if self.wandb_run: + if self.log_dict: + wandb.log(self.log_dict) + wandb.run.finish()