diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 00000000..dcdb39d9
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1 @@
+0e80a5fc7c514dacd1d8b957adf42ad636b4b1e3 pre-commit run -a
diff --git a/.github/workflows/test.yml b/.github/workflows/conda-test.yml
similarity index 51%
rename from .github/workflows/test.yml
rename to .github/workflows/conda-test.yml
index e8d7e867..8575ed0d 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/conda-test.yml
@@ -1,4 +1,4 @@
-name: Tests & Coverage
+name: Tests with conda
on: [ push, pull_request ]
@@ -10,7 +10,7 @@ jobs:
shell: bash -el {0}
strategy:
matrix:
- python-version: [ "3.9" ]
+ python-version: ["3.9", "3.10"]
steps:
- uses: actions/checkout@v4
with:
@@ -34,10 +34,12 @@ jobs:
run: echo "::set-output name=today::$(/bin/date -u '+%Y%m%d')"
shell: bash
- - name: Caching of the happypose installation
+ - name: Caching of the happypose installation and data
uses: actions/cache@v3
with:
- path: ${{ env.CONDA }}/envs
+ path: |
+ ${{ env.CONDA }}/envs
+ local_data
key:
conda-${{ runner.os }}--${{ runner.arch }}--${{steps.get-date.outputs.today }}-${{hashFiles('environment.yml') }}-${{env.CACHE_NUMBER }}
env:
@@ -46,41 +48,23 @@ jobs:
id: cache
- name: Update conda environment with happypose dependencies
- run:
- mamba env update -n happypose -f environment.yml
- if: steps.cache.outputs.cache-hit != 'true'
-
- - name: Install bop toolkit (temporal fix)
- run: |
- cd deps/bop_toolkit_challenge/
- sed 's/==.*$//' requirements.txt > req_nover.txt
- pip install -r req_nover.txt -e .
+ run: mamba env update -n happypose -f environment.yml
if: steps.cache.outputs.cache-hit != 'true'
- name: Install happypose
- run: |
- cd happypose/pose_estimators/cosypose
- pip install .
- cd ../../..
- pip install -e .
+ run: pip install -e ".[render,evaluation]"
- name: Download pre-trained models required for tests
run: |
- mkdir local_data
- python -m happypose.toolbox.utils.download --cosypose_model=detector-bop-ycbv-pbr--970850
- python -m happypose.toolbox.utils.download --cosypose_model=coarse-bop-ycbv-pbr--724183
- python -m happypose.toolbox.utils.download --cosypose_model=refiner-bop-ycbv-pbr--604090
-
- python -m happypose.toolbox.utils.download --megapose_models
-
- cd tests/data
- git clone https://github.com/petrikvladimir/happypose_test_data.git crackers_example
+ mkdir -p local_data
+ python -m happypose.toolbox.utils.download \
+ --megapose_models \
+ --examples \
+ crackers_example \
+ --cosypose_models \
+ detector-bop-ycbv-pbr--970850 \
+ coarse-bop-ycbv-pbr--724183 \
+ refiner-bop-ycbv-pbr--604090
- name: Run tests
- run: |
- pip install pytest coverage
- coverage run --source=happypose -m pytest tests
- coverage xml
- - uses: codecov/codecov-action@v3
- env:
- CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
+ run: python -m unittest
diff --git a/.github/workflows/packaging.yml b/.github/workflows/packaging.yml
deleted file mode 100644
index e7f977f3..00000000
--- a/.github/workflows/packaging.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-name: Check Packaging
-
-on: [push, pull_request]
-
-jobs:
- test:
- runs-on: ubuntu-latest
- strategy:
- matrix:
- python-version: ["3.8", "3.9", "3.10", "3.11"]
- steps:
- - uses: actions/checkout@v4
- - uses: actions/setup-python@v4
- with:
- python-version: ${{ matrix.python-version }}
- - run: python -m pip install .
diff --git a/.github/workflows/pip-test.yml b/.github/workflows/pip-test.yml
new file mode 100644
index 00000000..771cceb1
--- /dev/null
+++ b/.github/workflows/pip-test.yml
@@ -0,0 +1,48 @@
+name: Tests with pip
+
+on: [ push, pull_request ]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ["3.9", "3.10"]
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: 'recursive'
+
+ - name: Install EGL mesa - required for Panda3D renderer
+ run: sudo apt-get update && sudo apt-get install -qqy libegl1-mesa libegl1-mesa-dev rclone
+
+ - name: Caching of the happypose installation and data
+ uses: actions/cache@v3
+ with:
+ path: local_data
+ key: data
+
+ - uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Update pip
+ run: pip install -U pip
+
+ - name: Install happypose
+ run: pip install ".[render,cpu,evaluation]" --extra-index-url https://download.pytorch.org/whl/cpu
+
+ - name: Download pre-trained models required for tests
+ run: |
+ mkdir -p local_data
+ python -m happypose.toolbox.utils.download \
+ --megapose_models \
+ --examples \
+ crackers_example \
+ --cosypose_models \
+ detector-bop-ycbv-pbr--970850 \
+ coarse-bop-ycbv-pbr--724183 \
+ refiner-bop-ycbv-pbr--604090
+
+ - name: Run tests
+ run: python -m unittest
diff --git a/.github/workflows/poetry-test.yml b/.github/workflows/poetry-test.yml
new file mode 100644
index 00000000..c2cbce24
--- /dev/null
+++ b/.github/workflows/poetry-test.yml
@@ -0,0 +1,56 @@
+name: Tests with poetry + Coverage
+
+on: [ push, pull_request ]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ["3.9", "3.10"]
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: 'recursive'
+
+ - name: Install EGL mesa - required for Panda3D renderer
+ run: sudo apt-get update && sudo apt-get install -qqy libegl1-mesa libegl1-mesa-dev rclone
+
+ - name: Setup poetry
+ run: pipx install poetry
+
+ - name: Caching of the happypose installation and data
+ uses: actions/cache@v3
+ with:
+ path: local_data
+ key: data
+
+ - uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+ cache: poetry
+
+ - name: Install happypose
+ run: poetry install --with dev -E render -E cpu -E evaluation
+
+ - name: Download pre-trained models required for tests
+ run: |
+ mkdir -p local_data
+ poetry run python -m happypose.toolbox.utils.download \
+ --megapose_models \
+ --examples \
+ crackers_example \
+ --cosypose_models \
+ detector-bop-ycbv-pbr--970850 \
+ coarse-bop-ycbv-pbr--724183 \
+ refiner-bop-ycbv-pbr--604090
+
+ - name: Run tests
+ run: poetry run coverage run --source=happypose -m unittest
+
+ - name: Process coverage
+ run: poetry run coverage xml
+
+ - uses: codecov/codecov-action@v3
+ env:
+ CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
diff --git a/.gitignore b/.gitignore
index 4a401c3c..fa1239dc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,3 +14,5 @@ __pycache__
*.cpython
experiments/outputs
/book
+happypose/pose_estimators/cosypose/build/
+local_data
diff --git a/.gitmodules b/.gitmodules
index 699a737e..55a83202 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,15 +1,3 @@
-[submodule "happypose/pose_estimators/cosypose/deps/bop_toolkit_challenge"]
- path = happypose/pose_estimators/cosypose/deps/bop_toolkit_challenge
- url = https://github.com/ylabbe/bop_toolkit_challenge20.git
-[submodule "happypose/pose_estimators/cosypose/deps/bop_toolkit_cosypose"]
- path = happypose/pose_estimators/cosypose/deps/bop_toolkit_cosypose
- url = https://github.com/ylabbe/bop_toolkit_cosypose.git
-[submodule "happypose/pose_estimators/cosypose/deps/job-runner"]
- path = happypose/pose_estimators/cosypose/deps/job-runner
- url = https://github.com/ylabbe/job-runner.git
-[submodule "deps/bop_toolkit_challenge"]
- path = deps/bop_toolkit_challenge
- url = https://github.com/thodan/bop_toolkit
[submodule "deps/bop_renderer"]
path = deps/bop_renderer
url = https://github.com/thodan/bop_renderer/
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f29885ba..9d37db41 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,38 +1,39 @@
-exclude: 'cosypose|megapose6d|happypose|experiments'
repos:
-#- repo: https://github.com/pre-commit/pre-commit-hooks
- #rev: v4.4.0
- #hooks:
- #- id: check-added-large-files
- #- id: check-ast
- #- id: check-executables-have-shebangs
- #- id: check-json
- #- id: check-merge-conflict
- #- id: check-symlinks
- #- id: check-toml
- #- id: check-yaml
- #- id: debug-statements
- #- id: destroyed-symlinks
- #- id: detect-private-key
- #- id: end-of-file-fixer
- #- id: fix-byte-order-marker
- #- id: mixed-line-ending
- #- id: trailing-whitespace
-#- repo: https://github.com/PyCQA/isort
- #rev: 5.12.0
- #hooks:
- #- id: isort
-#- repo: https://github.com/psf/black
- #rev: 23.3.0
- #hooks:
- #- id: black
-- repo: https://github.com/pappasam/toml-sort
- rev: v0.23.1
- hooks:
- - id: toml-sort-fix
- exclude: 'poetry.lock'
-#- repo: https://github.com/charliermarsh/ruff-pre-commit
- #rev: v0.0.267
- #hooks:
- #- id: ruff
- #args: [--fix, --exit-non-zero-on-fix]
+- repo: https://github.com/PyCQA/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+- repo: https://github.com/charliermarsh/ruff-pre-commit
+ rev: v0.0.267
+ hooks:
+ - id: ruff
+ args:
+ - --fix
+ - --exit-non-zero-on-fix
+- repo: https://github.com/pappasam/toml-sort
+ rev: v0.23.1
+ hooks:
+ - id: toml-sort-fix
+ exclude: poetry.lock
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.4.0
+ hooks:
+ - id: check-added-large-files
+ - id: check-ast
+ - id: check-executables-have-shebangs
+ - id: check-json
+ - id: check-merge-conflict
+ - id: check-symlinks
+ - id: check-toml
+ - id: check-yaml
+ - id: debug-statements
+ - id: destroyed-symlinks
+ - id: detect-private-key
+ - id: end-of-file-fixer
+ - id: fix-byte-order-marker
+ - id: mixed-line-ending
+ - id: trailing-whitespace
+- repo: https://github.com/psf/black
+ rev: 23.3.0
+ hooks:
+ - id: black
diff --git a/README.md b/README.md
index 088e8282..4eccc140 100644
--- a/README.md
+++ b/README.md
@@ -84,7 +84,7 @@ conda activate happypose
mamba install compilers -c conda-forge
pip install open3d
mkdir /build && cd /build && git clone https://github.com/MIT-SPARK/TEASER-plusplus.git
-cd TEASER-plusplus && mkdir build && cd build
+cd TEASER-plusplus && mkdir build && cd build
cmake -DTEASERPP_PYTHON_VERSION=3.9 .. && make teaserpp_python
cd python && pip install .
-```
\ No newline at end of file
+```
diff --git a/configs/code_snapshot/dsgen_deps.yaml b/configs/code_snapshot/dsgen_deps.yaml
index 3afefd27..0133309b 100644
--- a/configs/code_snapshot/dsgen_deps.yaml
+++ b/configs/code_snapshot/dsgen_deps.yaml
@@ -5,4 +5,3 @@ python_packages_dir:
- ${oc.env:PROJECTS_DIR}/happypose
- ${oc.env:PROJECTS_DIR}/blenderproc
- ${oc.env:PROJECTS_DIR}/bop_toolkit_lib
-
diff --git a/configs/code_snapshot/happypose.yaml b/configs/code_snapshot/happypose.yaml
index fdcf7cfc..c2645962 100644
--- a/configs/code_snapshot/happypose.yaml
+++ b/configs/code_snapshot/happypose.yaml
@@ -5,5 +5,5 @@ python_packages_dir:
- ${oc.env:PROJECTS_DIR}/happypose
snapshot_dir: ${oc.env:HP_DATA_DIR}/code_snapshots/${hydra:job.id}
-
-exclude_path: ${oc.env:HP_ROOT_DIR}/configs/snapshot_ignore.txt
\ No newline at end of file
+
+exclude_path: ${oc.env:HP_ROOT_DIR}/configs/snapshot_ignore.txt
diff --git a/configs/dsgen/default.yaml b/configs/dsgen/default.yaml
index 67ae4486..568ba619 100644
--- a/configs/dsgen/default.yaml
+++ b/configs/dsgen/default.yaml
@@ -1,2 +1,2 @@
defaults:
- - base_dsgen
\ No newline at end of file
+ - base_dsgen
diff --git a/configs/dsgen/fastrun.yaml b/configs/dsgen/fastrun.yaml
index ac7c7367..67430e19 100644
--- a/configs/dsgen/fastrun.yaml
+++ b/configs/dsgen/fastrun.yaml
@@ -2,4 +2,4 @@ defaults:
- default
few: True
verbose: True
-debug: True
\ No newline at end of file
+debug: True
diff --git a/configs/dsgen/gso_1M.yaml b/configs/dsgen/gso_1M.yaml
index a34ffc20..3afef1cd 100644
--- a/configs/dsgen/gso_1M.yaml
+++ b/configs/dsgen/gso_1M.yaml
@@ -1,3 +1,3 @@
defaults:
- default
-dataset_id: gso_1M
\ No newline at end of file
+dataset_id: gso_1M
diff --git a/configs/job_env/happypose.yaml b/configs/job_env/happypose.yaml
index a7f2630b..eac801c6 100644
--- a/configs/job_env/happypose.yaml
+++ b/configs/job_env/happypose.yaml
@@ -1,3 +1,3 @@
defaults:
- base_job_env
-conda_env: 'happypose'
\ No newline at end of file
+conda_env: 'happypose'
diff --git a/configs/job_env/jz_yann.yaml b/configs/job_env/jz_yann.yaml
index 03352267..22156ba6 100644
--- a/configs/job_env/jz_yann.yaml
+++ b/configs/job_env/jz_yann.yaml
@@ -5,4 +5,4 @@ env:
HP_DATA_DIR: $WORK/data/happypose
HAPPYPOSE_DATA_DIR: $WORK/data/megapose
BLENDER_INSTALL_DIR: $WORK/blender/blender-2.93.0-linux-x64
- BLENDERPROC_DIR: $WORK/projects/blenderproc
\ No newline at end of file
+ BLENDERPROC_DIR: $WORK/projects/blenderproc
diff --git a/configs/job_env/lda.yaml b/configs/job_env/lda.yaml
index af3b0d57..f9d93348 100644
--- a/configs/job_env/lda.yaml
+++ b/configs/job_env/lda.yaml
@@ -5,4 +5,4 @@ env:
HP_DATA_DIR: /home/ylabbe/data/happypose
HAPPYPOSE_DATA_DIR: /home/ylabbe/data/megapose-private
BLENDER_INSTALL_DIR: $HOME/blenderproc/blender-2.93.88-linux-x64
- BLENDER_PROC_DIR: /home/ylabbe/projects/blenderproc
\ No newline at end of file
+ BLENDER_PROC_DIR: /home/ylabbe/projects/blenderproc
diff --git a/configs/local_job/single_gpu.yaml b/configs/local_job/single_gpu.yaml
index 6386e9e0..b67a42f1 100644
--- a/configs/local_job/single_gpu.yaml
+++ b/configs/local_job/single_gpu.yaml
@@ -2,4 +2,4 @@ defaults:
- base_local_job
nodes: 1
tasks_per_node: 1
-gpus_per_node: 1
\ No newline at end of file
+gpus_per_node: 1
diff --git a/configs/local_node/lda.yaml b/configs/local_node/lda.yaml
index 34d0eaf6..0b04d3a2 100644
--- a/configs/local_node/lda.yaml
+++ b/configs/local_node/lda.yaml
@@ -3,4 +3,4 @@ defaults:
gpus_per_node: 1
mem_per_gpu: '8GB'
cpus_per_gpu: 12
-mem_per_cpu: '2.7GB'
\ No newline at end of file
+mem_per_cpu: '2.7GB'
diff --git a/configs/run_ds_postproc/default.yaml b/configs/run_ds_postproc/default.yaml
index 3b6910b3..aa419664 100644
--- a/configs/run_ds_postproc/default.yaml
+++ b/configs/run_ds_postproc/default.yaml
@@ -16,8 +16,8 @@ runner:
cpus_per_task: 1
hydra:
- run:
+ run:
dir: ${oc.env:HP_DATA_DIR}/hydra_outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
n_jobs: 128
-ds_dir: ${oc.env:HP_DATA_DIR}/blender_pbr_datasets/gso_1M
\ No newline at end of file
+ds_dir: ${oc.env:HP_DATA_DIR}/blender_pbr_datasets/gso_1M
diff --git a/configs/run_dsgen/default.yaml b/configs/run_dsgen/default.yaml
index ce8ee19d..19d65cdc 100644
--- a/configs/run_dsgen/default.yaml
+++ b/configs/run_dsgen/default.yaml
@@ -20,5 +20,5 @@ runner:
cpus_per_task: 1
hydra:
- run:
- dir: ${oc.env:HP_DATA_DIR}/hydra_outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
\ No newline at end of file
+ run:
+ dir: ${oc.env:HP_DATA_DIR}/hydra_outputs/${now:%Y-%m-%d}/${now:%H-%M-%S}
diff --git a/configs/run_dsgen/fastrun.yaml b/configs/run_dsgen/fastrun.yaml
index 3e0d22ce..1aa887d2 100644
--- a/configs/run_dsgen/fastrun.yaml
+++ b/configs/run_dsgen/fastrun.yaml
@@ -2,8 +2,8 @@
start_chunk: 0
n_jobs: 4
n_chunks: 4
-ds:
+ds:
debug: True
verbose: True
overwrite: True
- few: True
\ No newline at end of file
+ few: True
diff --git a/configs/run_dsgen/gso_1M.yaml b/configs/run_dsgen/gso_1M.yaml
index 3a5fe8b7..4ff2e19e 100644
--- a/configs/run_dsgen/gso_1M.yaml
+++ b/configs/run_dsgen/gso_1M.yaml
@@ -3,4 +3,4 @@ defaults:
- override /dsgen@ds: gso_1M
n_jobs: 128
-n_chunks: 25000
\ No newline at end of file
+n_chunks: 25000
diff --git a/configs/run_dsgen/shapenet_1M.yaml b/configs/run_dsgen/shapenet_1M.yaml
index 435351da..546e1872 100644
--- a/configs/run_dsgen/shapenet_1M.yaml
+++ b/configs/run_dsgen/shapenet_1M.yaml
@@ -3,4 +3,4 @@ defaults:
- override /dsgen@ds: shapenet_1M
n_jobs: 350
-n_chunks: 50000
\ No newline at end of file
+n_chunks: 50000
diff --git a/configs/runner/yann_sgpu.yaml b/configs/runner/yann_sgpu.yaml
index 26786b8f..a92b0adf 100644
--- a/configs/runner/yann_sgpu.yaml
+++ b/configs/runner/yann_sgpu.yaml
@@ -7,9 +7,9 @@ defaults:
- yann
- single_gpu
- jz
- - /job_env:
+ - /job_env:
- happypose
- lda
- _self_
-log_dir: ${oc.env:HP_DATA_DIR}/submitit_logs
\ No newline at end of file
+log_dir: ${oc.env:HP_DATA_DIR}/submitit_logs
diff --git a/configs/slurm_job/jz.yaml b/configs/slurm_job/jz.yaml
index 83cebb06..697f9ff8 100644
--- a/configs/slurm_job/jz.yaml
+++ b/configs/slurm_job/jz.yaml
@@ -4,4 +4,4 @@ defaults:
qos: 'qos_gpu-t3'
time: '20:00:00'
additional_parameters:
- hint: nomultithread
\ No newline at end of file
+ hint: nomultithread
diff --git a/configs/slurm_job/single_gpu.yaml b/configs/slurm_job/single_gpu.yaml
index 007c9d06..8e82d1a2 100644
--- a/configs/slurm_job/single_gpu.yaml
+++ b/configs/slurm_job/single_gpu.yaml
@@ -3,4 +3,4 @@ defaults:
nodes: 1
tasks_per_node: 1
-gpus_per_node: 1
\ No newline at end of file
+gpus_per_node: 1
diff --git a/configs/slurm_job/yann.yaml b/configs/slurm_job/yann.yaml
index 79cac88e..68387641 100644
--- a/configs/slurm_job/yann.yaml
+++ b/configs/slurm_job/yann.yaml
@@ -1,4 +1,4 @@
defaults:
- base_slurm_job
-account: 'vuw@v100'
\ No newline at end of file
+account: 'vuw@v100'
diff --git a/configs/slurm_queue/gpu_p2.yaml b/configs/slurm_queue/gpu_p2.yaml
index 47796384..b9fa99c6 100644
--- a/configs/slurm_queue/gpu_p2.yaml
+++ b/configs/slurm_queue/gpu_p2.yaml
@@ -4,4 +4,4 @@ partition: "gpu_p2"
gpus_per_node: 8
cpus_per_gpu: 3
mem_per_gpu: '32GB'
-mem_per_cpu: ???
\ No newline at end of file
+mem_per_cpu: ???
diff --git a/configs/slurm_queue/v100.yaml b/configs/slurm_queue/v100.yaml
index 2993db9c..06d0d8f9 100644
--- a/configs/slurm_queue/v100.yaml
+++ b/configs/slurm_queue/v100.yaml
@@ -4,4 +4,4 @@ partition: "gpu_p13"
gpus_per_node: 4
cpus_per_gpu: 10
mem_per_gpu: '32GB'
-mem_per_cpu: '10GB'
\ No newline at end of file
+mem_per_cpu: '10GB'
diff --git a/configs/snapshot_ignore.txt b/configs/snapshot_ignore.txt
index 834b22c3..b3131c8b 100644
--- a/configs/snapshot_ignore.txt
+++ b/configs/snapshot_ignore.txt
@@ -23,4 +23,4 @@ blenderproc/scripts/
blenderproc/images/
my-notebooks/*.json
colors.json
-*.ttf
\ No newline at end of file
+*.ttf
diff --git a/deps/bop_toolkit_challenge b/deps/bop_toolkit_challenge
deleted file mode 160000
index 26bad9ca..00000000
--- a/deps/bop_toolkit_challenge
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit 26bad9cae08a866b463829a880b75a028cfdc258
diff --git a/docs/book/cosypose/download_data.md b/docs/book/cosypose/download_data.md
index a06e9f8d..faefa55e 100644
--- a/docs/book/cosypose/download_data.md
+++ b/docs/book/cosypose/download_data.md
@@ -8,22 +8,19 @@ All data used (datasets, models, results, ...) are stored in a directory `$HAPPY
For both T-LESS and YCB-Video, we use the datasets in the [BOP format](https://bop.felk.cvut.cz/datasets/). If you already have them on your disk, place them in `$HAPPYPOSE_DATA_DIR/bop_datasets`. Alternatively, you can download it using :
```sh
-python -m happypose.toolbox.utils.download --bop_dataset=ycbv
-python -m happypose.toolbox.utils.download --bop_dataset=tless
+python -m happypose.toolbox.utils.download --bop_dataset ycbv tless
```
Additional files that contain information about the datasets used to fairly compare with prior works on both datasets.
```sh
-python -m happypose.toolbox.utils.download --bop_extra_files=ycbv
-python -m happypose.toolbox.utils.download --bop_extra_files=tless
+python -m happypose.toolbox.utils.download --bop_extra_files ycbv tless
```
We use [pybullet](https://pybullet.org/wordpress/) for rendering images which requires object models to be provided in the URDF format. We provide converted URDF files, they can be downloaded using:
```sh
-python -m happypose.toolbox.utils.download --urdf_models=ycbv
-python -m happypose.toolbox.utils.download --urdf_models=tless.cad
+python -m happypose.toolbox.utils.download --urdf_models ycbv tless.cad
```
In the BOP format, the YCB objects `002_master_chef_can` and `040_large_marker` are considered symmetric, but not by previous works such as PoseCNN, PVNet and DeepIM. To ensure a fair comparison (using ADD instead of ADD-S for ADD-(S) for these objects), these objects must *not* be considered symmetric in the evaluation. To keep the uniformity of the models format, we generate a set of YCB objects `models_bop-compat_eval` that can be used to fairly compare our approach against previous works. You can download them directly:
@@ -51,14 +48,16 @@ Notes:
```sh
#ycbv
- python -m happypose.toolbox.utils.download --cosypose_model=detector-bop-ycbv-pbr--970850
- python -m happypose.toolbox.utils.download --cosypose_model=coarse-bop-ycbv-pbr--724183
- python -m happypose.toolbox.utils.download --cosypose_model=refiner-bop-ycbv-pbr--604090
+ python -m happypose.toolbox.utils.download --cosypose_models \
+ detector-bop-ycbv-pbr--970850 \
+ coarse-bop-ycbv-pbr--724183 \
+ refiner-bop-ycbv-pbr--604090
#tless
- python -m happypose.toolbox.utils.download --cosypose_model=detector-bop-tless-pbr--873074
- python -m happypose.toolbox.utils.download --cosypose_model=coarse-bop-tless-pbr--506801
- python -m happypose.toolbox.utils.download --cosypose_model=refiner-bop-tless-pbr--233420
+ python -m happypose.toolbox.utils.download --cosypose_models \
+ detector-bop-tless-pbr--873074 \
+ coarse-bop-tless-pbr--506801 \
+ refiner-bop-tless-pbr--233420
```
## Pre-trained models for single-view estimator
@@ -68,15 +67,14 @@ The pre-trained models of the single-view pose estimator can be downloaded using
```sh
# YCB-V Single-view refiner
-python -m happypose.toolbox.utils.download --cosypose_model=ycbv-refiner-finetune--251020
+python -m happypose.toolbox.utils.download --cosypose_models ycbv-refiner-finetune--251020
# YCB-V Single-view refiner trained on synthetic data only
# Only download this if you are interested in retraining the above model
-python -m happypose.toolbox.utils.download --cosypose_model=ycbv-refiner-syntonly--596719
+python -m happypose.toolbox.utils.download --cosypose_models ycbv-refiner-syntonly--596719
# T-LESS coarse and refiner models
-python -m happypose.toolbox.utils.download --cosypose_model=tless-coarse--10219
-python -m happypose.toolbox.utils.download --cosypose_model=tless-refiner--585928
+python -m happypose.toolbox.utils.download --cosypose_models tless-coarse--10219 tless-refiner--585928
```
## 2D detections
@@ -84,16 +82,16 @@ python -m happypose.toolbox.utils.download --cosypose_model=tless-refiner--58592
To ensure a fair comparison with prior works on both datasets, we use the same detections as DeepIM (from PoseCNN) on YCB-Video and the same as Pix2pose (from a RetinaNet model) on T-LESS. Download the saved 2D detections for both datasets using
```sh
-python -m happypose.toolbox.utils.download --detections=ycbv_posecnn
+python -m happypose.toolbox.utils.download --detections ycbv_posecnn
# SiSo detections: 1 detection with highest per score per class per image on all images
# Available for each image of the T-LESS dataset (primesense sensor)
# These are the same detections as used in Pix2pose's experiments
-python -m happypose.toolbox.utils.download --detections=tless_pix2pose_retinanet_siso_top1
+python -m happypose.toolbox.utils.download --detections tless_pix2pose_retinanet_siso_top1
# ViVo detections: All detections for a subset of 1000 images of T-LESS.
# Used in our multi-view experiments.
-python -m happypose.toolbox.utils.download --detections=tless_pix2pose_retinanet_vivo_all
+python -m happypose.toolbox.utils.download --detections tless_pix2pose_retinanet_vivo_all
```
If you are interested in re-training a detector, please see the BOP 2020 section.
@@ -103,4 +101,4 @@ Notes:
- The PoseCNN detections (and coarse pose estimates) on YCB-Video were extracted and converted from [these PoseCNN results](https://github.com/yuxng/YCB_Video_toolbox/blob/master/results_PoseCNN_RSS2018.zip).
- The Pix2pose detections were extracted using [pix2pose's](https://github.com/kirumang/Pix2Pose) code. We used the detection model from their paper, see [here](https://github.com/kirumang/Pix2Pose#download-pre-trained-weights). For the ViVo detections, their code was slightly modified. The code used to extract detections can be found [here](https://github.com/ylabbe/pix2pose_cosypose).
-
\ No newline at end of file
+
diff --git a/docs/book/cosypose/inference.md b/docs/book/cosypose/inference.md
index 196ae917..90e50c6e 100644
--- a/docs/book/cosypose/inference.md
+++ b/docs/book/cosypose/inference.md
@@ -1,14 +1,15 @@
# Inference
-Here are provided the minimal commands you have to run in order to run the inference of CosyPose. You need to set up the environment variable `$HAPPYPOSE_DATA_DIR` as explained in the README.
+Here are provided the minimal commands you have to run in order to run the inference of CosyPose. You need to set up the environment variable `$HAPPYPOSE_DATA_DIR` as explained in the README.
## 1. Download pre-trained pose estimation models
```sh
#ycbv
-python -m happypose.toolbox.utils.download --cosypose_model=detector-bop-ycbv-pbr--970850
-python -m happypose.toolbox.utils.download --cosypose_model=coarse-bop-ycbv-pbr--724183
-python -m happypose.toolbox.utils.download --cosypose_model=refiner-bop-ycbv-pbr--604090
+python -m happypose.toolbox.utils.download --cosypose_models \
+ detector-bop-ycbv-pbr--970850 \
+ coarse-bop-ycbv-pbr--724183 \
+ refiner-bop-ycbv-pbr--604090
```
## 2. Download YCB-V Dataset
@@ -22,7 +23,7 @@ python -m happypose.toolbox.utils.download --bop_dataset=ycbv
```sh
cd $HAPPYPOSE_DATA_DIR
wget https://memmo-data.laas.fr/static/examples.tar.xz
-tar xf examples.tar.xz
+tar xf examples.tar.xz
```
## 4. Run the script
diff --git a/docs/book/cosypose/overview_method.md b/docs/book/cosypose/overview_method.md
index e00056f5..9b689ead 100644
--- a/docs/book/cosypose/overview_method.md
+++ b/docs/book/cosypose/overview_method.md
@@ -52,7 +52,7 @@ This repository contains the code for the full CosyPose approach, including:
## Single-view single-object 6D pose estimator
![Single view predictions](./images/example_predictions.png)
-
+
Given an RGB image and a 2D bounding box of an object with known 3D model, the 6D pose estimator predicts the full 6D pose of the object with respect to the camera. Our method is inspired by DeepIM with several simplifications and technical improvements. It is fully implemented in pytorch and achieve single-view state-of-the-art on YCB-Video and T-LESS. We provide pre-trained models used in our experiments on both datasets. We make the training code that we used to train them available. It can be parallelized on multiple GPUs and multiple nodes.
## Synthetic data generation
@@ -71,4 +71,4 @@ Single-view object-level reconstruction of a scene often fails because of detect
![BOP](./images/bop_datasets.png)
-We used our {coarse+refinement} single-view 6D pose estimation method in the [BOP challenge 2020](https://bop.felk.cvut.cz/challenges/bop-challenge-2020/). In addition, we trained a MaskRCNN detector (torchvision's implementation) on each of the 7 core datasets (LM-O, T-LESS, TUD-L, IC-BIN, ITODD, HB, YCB-V). We provide 2D detectors and 6D pose estimation models for these datasets. All training (including 2D detector), inference and evaluation code are available in this repository. It can be easily used for another dataset in the BOP format.
\ No newline at end of file
+We used our {coarse+refinement} single-view 6D pose estimation method in the [BOP challenge 2020](https://bop.felk.cvut.cz/challenges/bop-challenge-2020/). In addition, we trained a MaskRCNN detector (torchvision's implementation) on each of the 7 core datasets (LM-O, T-LESS, TUD-L, IC-BIN, ITODD, HB, YCB-V). We provide 2D detectors and 6D pose estimation models for these datasets. All training (including 2D detector), inference and evaluation code are available in this repository. It can be easily used for another dataset in the BOP format.
diff --git a/docs/book/cosypose/overview_repo.md b/docs/book/cosypose/overview_repo.md
index df551296..25ec2ba6 100644
--- a/docs/book/cosypose/overview_repo.md
+++ b/docs/book/cosypose/overview_repo.md
@@ -2,8 +2,8 @@
This repository is divided into different entry points
-- [Inference](./test-install.md): `run_cosypose_on_example.py` is used to run the inference pipeline on a single example image.
-- [Evaluation](./evaluate.md): `run_full_cosypose_evaluation.py` is ued to first run inference on one or several datasets, and then use the results obtained to evaluate the method on these datasets.
+- [Inference](./test-install.md): `run_cosypose_on_example.py` is used to run the inference pipeline on a single example image.
+- [Evaluation](./evaluate.md): `run_full_cosypose_evaluation.py` is ued to first run inference on one or several datasets, and then use the results obtained to evaluate the method on these datasets.
- [Training](./train.md): `run_detector_training.py` is used to train the detector part of Cosypose.`run_pose_training.py` can be used to train the `coarse` model or the `refiner` model.
-In this repository, the version provided of CosyPose is different to the one of the original repository. In particular, we switched the 3D renderer from [PyBullet](https://pybullet.org/wordpress/) to [Panda3d](https://www.panda3d.org/). Thus, the results obtained may differ from the one reported in the original paper and repository.
\ No newline at end of file
+In this repository, the version provided of CosyPose is different to the one of the original repository. In particular, we switched the 3D renderer from [PyBullet](https://pybullet.org/wordpress/) to [Panda3d](https://www.panda3d.org/). Thus, the results obtained may differ from the one reported in the original paper and repository.
diff --git a/docs/book/megapose/download_data.md b/docs/book/megapose/download_data.md
index 5902ac36..6a5253f3 100644
--- a/docs/book/megapose/download_data.md
+++ b/docs/book/megapose/download_data.md
@@ -1,25 +1,37 @@
# Download example data for minimal testing
-```
+```sh
cd $HAPPYPOSE_DATA_DIR
wget https://memmo-data.laas.fr/static/examples.tar.xz
tar xf examples.tar.xz
```
-
+
# Download pre-trained pose estimation models
-Download pose estimation models to $HAPPYPOSE_DATA_DIR/megapose-models:
+Download pose estimation models to `$HAPPYPOSE_DATA_DIR/megapose-models`:
-```
+```sh
python -m happypose.toolbox.utils.download --megapose_models
```
+# Download pre-trained detection models
+Megapose can use pretrained detectors from CosyPose, which can be downloaded to `$HAPPYPOSE_DATA_DIR/experiments`:
+
+python -m happypose.toolbox.utils.download --cosypose_model detector-bop-hb-pbr--497808
+python -m happypose.toolbox.utils.download --cosypose_model detector-bop-hope-pbr--15246
+python -m happypose.toolbox.utils.download --cosypose_model detector-bop-icbin-pbr--947409
+python -m happypose.toolbox.utils.download --cosypose_model detector-bop-itodd-pbr--509908
+python -m happypose.toolbox.utils.download --cosypose_model detector-bop-lmo-pbr--517542
+python -m happypose.toolbox.utils.download --cosypose_model detector-bop-tless-pbr--873074
+python -m happypose.toolbox.utils.download --cosypose_model detector-bop-tudl-pbr--728047
+python -m happypose.toolbox.utils.download --cosypose_model detector-bop-ycbv-pbr--970850
+
# Dataset
## Dataset information
The dataset is available at this [url](https://drive.google.com/drive/folders/1CXc_GG11jNVMeGr-Mb4o4iiNjYeKDkKd?usp=sharing). It is split into two datasets: `gso_1M` (Google Scanned Objects) and `shapenet_1M` (ShapeNet objects). Each dataset has 1 million images which were generated using [BlenderProc](https://github.com/DLR-RM/BlenderProc).
-Datasets are released in the [webdataset](https://github.com/webdataset/webdataset) format for high reading performance. Each dataset is split into chunks of size ~600MB containing 1000 images each.
+Datasets are released in the [webdataset](https://github.com/webdataset/webdataset) format for high reading performance. Each dataset is split into chunks of size ~600MB containing 1000 images each.
We provide the pre-processed meshes ready to be used for rendering and training in this [directory](https://drive.google.com/drive/folders/1AYxkv7jpDniOnTcMAxiWbdhPo8WBJaZG):
- `google_scanned_objects.zip`
diff --git a/docs/book/megapose/evaluate.md b/docs/book/megapose/evaluate.md
index 0dc893ef..ee9fc1ef 100644
--- a/docs/book/megapose/evaluate.md
+++ b/docs/book/megapose/evaluate.md
@@ -2,17 +2,41 @@
Please make sure you followed the steps relative to the evaluation in the main readme.
-An example to run the evaluation on `YCBV` dataset. Several datasets can be added to the list.
+## Evaluating with Megapose detector
+Run a detector part of Megapose pipeline to detect bounding boxes in the image dataset at run-time.
```
-python -m happypose.pose_estimators.megapose.src.megapose.scripts.run_full_megapose_eval detector_run_id=bop_pbr coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[ycbv.bop19] result_id=fastsam_kbestdet_1posehyp detection_coarse_types=[["sam","SO3_grid"]] inference.n_pose_hypotheses=1 skip_inference=true run_bop_eval=true
+python -m happypose.pose_estimators.megapose.scripts.run_full_megapose_eval detector_run_id=bop_pbr coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[ycbv.bop19,lmo.bop19,tless.bop19,tudl.bop19,icbin.bop19,hb.bop19,itodd.bop19] result_id=detector_1posehyp detection_coarse_types=[["detector","SO3_grid"]] inference.n_pose_hypotheses=1 skip_inference=false run_bop_eval=true
```
-To reproduce the results we obtained for the BOP-Challenge, please run the following commands :
+## Evaluating with external detections
+
+First step: download external detections from bop website (such as default detections for task 1 and 4). You should have one csv file for every bop dataset.
+Place these in a directory of your choice and define the environment variable.
+
+```sh
+EXTERNAL_DETECTIONS_DIR = /path/to/saved/detections/
+```
+
+Megapose expects a json file named `bop_detections_filenames.json` placed in `EXTERNAL_DETECTIONS_DIR` mapping bop dataset names to the csv file names, e.g. for CNOS detection (default detection for task 4, bop23):
+
+```json
+{
+ "ycbv": "cnos-fastsam_ycbv-test_f4f2127c-6f59-447c-95b3-28e1e591f1a1.json",
+ "lmo": "cnos-fastsam_lmo-test_3cb298ea-e2eb-4713-ae9e-5a7134c5da0f.json",
+ "tless": "cnos-fastsam_tless-test_8ca61cb0-4472-4f11-bce7-1362a12d396f.json",
+ "tudl": "cnos-fastsam_tudl-test_c48a2a95-1b41-4a51-9920-a667cb3d7149.json",
+ "icbin": "cnos-fastsam_icbin-test_f21a9faf-7ef2-4325-885f-f4b6460f4432.json",
+ "itodd": "cnos-fastsam_itodd-test_df32d45b-301c-4fc9-8769-797904dd9325.json",
+ "hb": "cnos-fastsam_hb-test_db836947-020a-45bd-8ec5-c95560b68011.json"
+}
+```
+
+To reproduce the results we obtained for the BOP-Challenge, please run the following commands :
```sh
# RGB 1 hyp
-python -m happypose.pose_estimators.megapose.src.megapose.scripts.run_full_megapose_eval detector_run_id=bop_pbr coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[ycbv.bop19,lmo.bop19,tless.bop19,tudl.bop19,icbin.bop19,hb.bop19,itodd.bop19] result_id=fastsam_kbestdet_1posehyp detection_coarse_types=[["sam","SO3_grid"]] inference.n_pose_hypotheses=1 skip_inference=False run_bop_eval=true
+python -m happypose.pose_estimators.megapose.scripts.run_full_megapose_eval coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[ycbv.bop19,lmo.bop19,tless.bop19,tudl.bop19,icbin.bop19,hb.bop19,itodd.bop19] result_id=exte_det_1posehyp detection_coarse_types=[["exte","SO3_grid"]] inference.n_pose_hypotheses=1 skip_inference=False run_bop_eval=true
```
Results :
@@ -22,7 +46,7 @@ Results :
```sh
# RGB 5 hyp
-python -m happypose.pose_estimators.megapose.src.megapose.scripts.run_full_megapose_eval detector_run_id=bop_pbr coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[ycbv.bop19,lmo.bop19,tless.bop19,tudl.bop19,icbin.bop19,hb.bop19,itodd.bop19] result_id=fastsam_kbestdet_5posehyp detection_coarse_types=[["sam","SO3_grid"]] inference.n_pose_hypotheses=5 skip_inference=False run_bop_eval=true
+python -m happypose.pose_estimators.megapose.scripts.run_full_megapose_eval coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[ycbv.bop19,lmo.bop19,tless.bop19,tudl.bop19,icbin.bop19,hb.bop19,itodd.bop19] result_id=exte_det_5posehyp detection_coarse_types=[["exte","SO3_grid"]] inference.n_pose_hypotheses=5 skip_inference=False run_bop_eval=true
```
Results :
@@ -31,7 +55,7 @@ Results :
```sh
# RGB-D 5 hyp
-python -m torch.distributed.run --nproc_per_node gpu -m happypose.pose_estimators.megapose.src.megapose.scripts.run_full_megapose_eval detector_run_id=bop_pbr coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[tless.bop19,tudl.bop19,icbin.bop19,hb.bop19,itodd.bop19] result_id=fastsam_kbestdet_5posehyp_teaserpp detection_coarse_types=[["sam","SO3_grid"]] inference.n_pose_hypotheses=5 inference.run_depth_refiner=true inference.depth_refiner=teaserpp skip_inference=False run_bop_eval=True
+python -m torch.distributed.run --nproc_per_node gpu -m happypose.pose_estimators.megapose.scripts.run_full_megapose_eval coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[tless.bop19,tudl.bop19,icbin.bop19,hb.bop19,itodd.bop19] result_id=exte_det_5posehyp_teaserpp detection_coarse_types=[["exte","SO3_grid"]] inference.n_pose_hypotheses=5 inference.run_depth_refiner=true inference.depth_refiner=teaserpp skip_inference=False run_bop_eval=True
```
Results :
@@ -42,7 +66,7 @@ Results :
In particular, for this challenge, we used Jean Zay, a french supercalculator. Here is a quick documentation, for additional information on who can use this calculator, please refer to the [official documentation](http://www.idris.fr/eng/jean-zay/index.html).
-You need to create an account to log on Jean Zay : https://www.edari.fr/
+You need to create an account to log on Jean Zay : https://www.edari.fr/
To connect by ssh to Jean Zay using this account, you need to register the IP address of the machine you use to connect to Jean Zay. If you work in a french research laboratory, your laboratory probably have a bouncing machine that is registered.
@@ -104,5 +128,5 @@ conda activate happypose_pytorch3d
cd happypose
-python -m torch.distributed.run --nproc_per_node gpu -m happypose.pose_estimators.megapose.src.megapose.scripts.run_full_megapose_eval detector_run_id=bop_pbr coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[lmo.bop19] result_id=fastsam_kbestdet_1posehyp detection_coarse_types=[["sam","SO3_grid"]] inference.n_pose_hypotheses=1 skip_inference=False run_bop_eval=true
-```
\ No newline at end of file
+python -m torch.distributed.run --nproc_per_node gpu -m happypose.pose_estimators.megapose.scripts.run_full_megapose_eval coarse_run_id=coarse-rgb-906902141 refiner_run_id=refiner-rgb-653307694 ds_names=[lmo.bop19] result_id=exte_det_1posehyp detection_coarse_types=[["exte","SO3_grid"]] inference.n_pose_hypotheses=1 skip_inference=False run_bop_eval=true
+```
diff --git a/docs/book/megapose/inference.md b/docs/book/megapose/inference.md
index a5997dd2..b44c0d46 100644
--- a/docs/book/megapose/inference.md
+++ b/docs/book/megapose/inference.md
@@ -1,6 +1,6 @@
# Inference
-Here are provided the minimal commands you have to run in order to run the inference of CosyPose. You need to set up the environment variable `$HAPPYPOSE_DATA_DIR` as explained in the README.
+Here are provided the minimal commands you have to run in order to run the inference of CosyPose. You need to set up the environment variable `$HAPPYPOSE_DATA_DIR` as explained in the README.
## 1. Download pre-trained pose estimation models
@@ -15,7 +15,7 @@ We estimate the pose for a barbecue sauce bottle (from the [HOPE](https://github
```sh
cd $HAPPYPOSE_DATA_DIR
wget https://memmo-data.laas.fr/static/examples.tar.xz
-tar xf examples.tar.xz
+tar xf examples.tar.xz
```
The input files are the following:
@@ -67,7 +67,7 @@ This file contains a list of objects with their estimated poses . For each objec
[{"label": "barbecue-sauce", "TWO": [[0.5453961536730983, 0.6226545207599095, -0.43295293693197473, 0.35692612413663855], [0.10723329335451126, 0.07313819974660873, 0.45735278725624084]]}]
-Finally, you can visualize the results using:
+Finally, you can visualize the results using:
```sh
python -m happypose.pose_estimators.megapose.scripts.run_inference_on_example barbecue-sauce --run-inference --vis-outputs
@@ -81,4 +81,4 @@ $HAPPYPOSE_DATA_DIR/examples/barbecue-sauce/
visualizations/all_results.png
```
-
\ No newline at end of file
+
diff --git a/docs/book/megapose/overview_method.md b/docs/book/megapose/overview_method.md
index 063ec46a..2fba64fd 100644
--- a/docs/book/megapose/overview_method.md
+++ b/docs/book/megapose/overview_method.md
@@ -1,5 +1,5 @@
# MegaPose
-This repository contains code, models and dataset for our MegaPose paper.
+This repository contains code, models and dataset for our MegaPose paper.
Yann Labbé, Lucas Manuelli, Arsalan Mousavian, Stephen Tyree, Stan Birchfield, Jonathan Tremblay, Justin Carpentier, Mathieu Aubry, Dieter Fox, Josef Sivic. “MegaPose: 6D Pose Estimation of Novel Objects via Render & Compare.” In: CoRL 2022.
@@ -33,19 +33,19 @@ This repository contains pre-trained models for pose estimation of novel objects
## Pose estimation of novel objects
-We provide pre-trained models for 6D pose estimation of novel objects.
+We provide pre-trained models for 6D pose estimation of novel objects.
-Given as inputs:
+Given as inputs:
- an RGB image (depth can also be used but is optional),
- the intrinsic parameters of the camera,
- a mesh of the object,
- a bounding box of that object in the image,
-our approach estimates the 6D pose of the object (3D rotation + 3D translation) with respect to the camera.
+our approach estimates the 6D pose of the object (3D rotation + 3D translation) with respect to the camera.
We provide a script and an example for inference on novel objects. After installation, please see the [Inference tutorial](#inference-tutorial).
## Large-scale synthetic dataset
-We provide the synthetic dataset we used to train MegaPose. The dataset contains 2 million images displaying more than 20,000 objects from the Google Scanned Objects and ShapeNet datasets. After installation, please see the [Dataset section](#dataset).
\ No newline at end of file
+We provide the synthetic dataset we used to train MegaPose. The dataset contains 2 million images displaying more than 20,000 objects from the Google Scanned Objects and ShapeNet datasets. After installation, please see the [Dataset section](#dataset).
diff --git a/docs/book/megapose/overview_repo.md b/docs/book/megapose/overview_repo.md
index f759a525..5a3e8700 100644
--- a/docs/book/megapose/overview_repo.md
+++ b/docs/book/megapose/overview_repo.md
@@ -2,8 +2,8 @@
This repository is divided into different entry points
-- [Inference](./test-install.md): `run_megapose_on_example.py` is used to run the inference pipeline on a single example image.
-- [Evaluation](./evaluate.md): `run_full_megapose_eval.py` is ued to first run inference on one or several datasets, and then use the results obtained to evaluate the method on these datasets.
+- [Inference](./test-install.md): `run_megapose_on_example.py` is used to run the inference pipeline on a single example image.
+- [Evaluation](./evaluate.md): `run_full_megapose_eval.py` is ued to first run inference on one or several datasets, and then use the results obtained to evaluate the method on these datasets.
# Model Zoo
@@ -21,4 +21,4 @@ This repository is divided into different entry points
- Evaluates refined hypotheses using score from coarse model and selects the highest scoring one.
- `-icp` indicates running ICP refinement on the depth data.
-For optimal performance, we recommend using `megapose-1.0-RGB-multi-hypothesis` for an RGB image and `megapose-1.0-RGB-multi-hypothesis-icp` for an RGB-D image. An extended paper with full evaluation of these new approaches is coming soon.
\ No newline at end of file
+For optimal performance, we recommend using `megapose-1.0-RGB-multi-hypothesis` for an RGB image and `megapose-1.0-RGB-multi-hypothesis-icp` for an RGB-D image. An extended paper with full evaluation of these new approaches is coming soon.
diff --git a/environment.yml b/environment.yml
index 69a9a86c..e857f5c1 100644
--- a/environment.yml
+++ b/environment.yml
@@ -9,86 +9,5 @@ dependencies:
- nvidia::cudatoolkit==11.3.1
- python=3.9
- pip
- - wget
- - python-wget
- - joblib
- pytorch::pytorch==1.11.0
- torchvision==0.12.0
- - ipython
- - ipykernel
- - jupyterlab
-# - notebook
-# - nb_conda_kernels
-# - jupyter_contrib_nbextensions
- - pinocchio
- - rclone
- - pillow
- - ipywidgets
- - selenium
- - geckodriver
- - firefox
- - assimp
- - gxx_linux-64
- - ffmpeg
- - bzip2
- - git
- - cmake
- - numpy
- - pip:
- - pybullet
- - distributed
- - jinja2
- - pycollada
- - shapely
- - PyOpenGL
- - Cython
- - glumpy
- - scikit-video
- - scikit-image
- - scikit-learn
- - pywavefront
- - jupyter-client
- - jupyter-core
- - jupyter-server-proxy
- - jupyterlab
- - jupyterlab-server
- - meshcat
- - selenium
- - omegaconf
- - simplejson
- - line_profiler
- - opencv-python
- - torchnet
- - tqdm
- - lxml
- - transforms3d
- - panda3d
- - joblib
- - xarray
- - pandas
- - matplotlib
- - bokeh
- - plyfile
- - trimesh
- - ipdb
- - panda3d-gltf
- - colorama
- - pyyaml
- - ipykernel
- - scipy
- - pypng
- - h5py
- - seaborn
- - kornia
- - pyarrow
- - dt_apriltags
- - open3d
- - structlog
- - imageio
- - progressbar
- - pyyaml
- - psutil
- - webdataset
- - opencv-contrib-python
- - roma
- - torchgeometry
diff --git a/experiments/generate_dataset.py b/experiments/generate_dataset.py
index c552823f..8b00d21e 100644
--- a/experiments/generate_dataset.py
+++ b/experiments/generate_dataset.py
@@ -1,25 +1,21 @@
+import copy
+import time
import typing as tp
+from dataclasses import dataclass
+
import hydra
-import omegaconf
-import tqdm
-import time
-import copy
import numpy as np
+import omegaconf
import submitit
-from dataclasses import dataclass
from hydra.core.config_store import ConfigStore
-
-
-from job_runner.configs import (
- RunnerConfig,
-)
+from job_runner.configs import RunnerConfig
from job_runner.utils import make_setup
@dataclass
class DatasetGenerationConfig:
dataset_id: str
- chunk_ids: tp.Optional[tp.List[int]]
+ chunk_ids: tp.Optional[list[int]]
debug: bool = False
verbose: bool = True
overwrite: bool = False
@@ -52,10 +48,11 @@ def generate_chunks(ds_cfg: DatasetGenerationConfig):
@hydra.main(
- version_base=None, config_path="../configs", config_name="run_dsgen/default"
+ version_base=None,
+ config_path="../configs",
+ config_name="run_dsgen/default",
)
def main(cfg: Config):
-
if cfg.runner.use_slurm:
executor = submitit.AutoExecutor(folder=cfg.runner.log_dir)
executor.update_parameters(
@@ -84,7 +81,7 @@ def main(cfg: Config):
jobs = []
with executor.batch():
- for n, chunk_split_ in enumerate(chunk_splits):
+ for _n, chunk_split_ in enumerate(chunk_splits):
ds_cfg = copy.deepcopy(cfg.ds)
ds_cfg.chunk_ids = chunk_split_.tolist()
if cfg.dry_run:
diff --git a/experiments/job-runner/job_runner/configs.py b/experiments/job-runner/job_runner/configs.py
index db314421..33b44aa3 100644
--- a/experiments/job-runner/job_runner/configs.py
+++ b/experiments/job-runner/job_runner/configs.py
@@ -1,5 +1,6 @@
import typing as tp
from dataclasses import dataclass
+
from hydra.core.config_store import ConfigStore
@@ -30,21 +31,21 @@ class SlurmJobConfig(JobConfig):
account: str
qos: str
time: str
- additional_parameters: tp.Optional[tp.Dict[str, tp.Any]]
+ additional_parameters: tp.Optional[dict[str, tp.Any]]
@dataclass
class CodeSnapshotConfig:
snapshot_dir: tp.Optional[str]
exclude_path: tp.Optional[str]
- python_packages_dir: tp.Optional[tp.List[str]] = None
+ python_packages_dir: tp.Optional[list[str]] = None
@dataclass
class JobEnvironmentConfig:
conda_env: str
code_snapshot: tp.Optional[CodeSnapshotConfig] = None
- env: tp.Optional[tp.Dict[str, str]] = None
+ env: tp.Optional[dict[str, str]] = None
@dataclass
diff --git a/experiments/job-runner/job_runner/utils.py b/experiments/job-runner/job_runner/utils.py
index 427b598d..ed2b0c53 100644
--- a/experiments/job-runner/job_runner/utils.py
+++ b/experiments/job-runner/job_runner/utils.py
@@ -1,14 +1,11 @@
-import typing as tp
import pathlib
-import submitit
+import typing as tp
-from job_runner.configs import (
- JobEnvironmentConfig,
- RunnerConfig
-)
+import submitit
+from job_runner.configs import JobEnvironmentConfig, RunnerConfig
-def make_setup(cfg: JobEnvironmentConfig) -> tp.List[str]:
+def make_setup(cfg: JobEnvironmentConfig) -> list[str]:
setup = []
if cfg.env:
for k, v in cfg.env.items():
@@ -17,13 +14,15 @@ def make_setup(cfg: JobEnvironmentConfig) -> tp.List[str]:
def make_snapshots(
- code_directories: tp.List[pathlib.Path],
+ code_directories: list[pathlib.Path],
output_dir: pathlib.Path,
exclude: tp.Sequence[str] = (),
):
for code_dir in code_directories:
snapshot = submitit.helpers.RsyncSnapshot(
- snapshot_dir=output_dir / code_dir.name, root_dir=code_dir, exclude=exclude
+ snapshot_dir=output_dir / code_dir.name,
+ root_dir=code_dir,
+ exclude=exclude,
)
with snapshot:
pass
@@ -59,4 +58,4 @@ def make_submitit_executor(
cpus_per_task=cfg.local_job.cpus_per_task,
)
- return executor
\ No newline at end of file
+ return executor
diff --git a/experiments/job-runner/setup.py b/experiments/job-runner/setup.py
index d7776d8f..64299cbe 100644
--- a/experiments/job-runner/setup.py
+++ b/experiments/job-runner/setup.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -31,6 +30,6 @@
entry_points={
"console_scripts": [
"runjob=job_runner.runjob:main",
- ]
+ ],
},
)
diff --git a/experiments/make_gso_ids.py b/experiments/make_gso_ids.py
index e4abf944..cc62a2e4 100644
--- a/experiments/make_gso_ids.py
+++ b/experiments/make_gso_ids.py
@@ -1,13 +1,12 @@
-import os
import json
+import os
import pathlib as p
-
if __name__ == "__main__":
hp_data_dir = os.environ["HP_DATA_DIR"]
gso_dir = p.Path(hp_data_dir) / "google_scanned_objects" / "models_orig"
models = []
for n, model_path in enumerate(gso_dir.glob("**/meshes/model.obj")):
- models.append(dict(obj_id=n, gso_id=model_path.parent.parent.name))
+ models.append({"obj_id": n, "gso_id": model_path.parent.parent.name})
infos_path = p.Path(hp_data_dir) / "dataset-infos" / "gso_models.json"
infos_path.write_text(json.dumps(models, indent=2))
diff --git a/experiments/make_shapenet_ids.py b/experiments/make_shapenet_ids.py
index 0c324bc3..14005d50 100644
--- a/experiments/make_shapenet_ids.py
+++ b/experiments/make_shapenet_ids.py
@@ -1,9 +1,8 @@
-import os
-import pandas as pd
-from collections import deque
import json
-import typing as tp
+import os
import pathlib as p
+import typing as tp
+from collections import deque
from dataclasses import dataclass
@@ -11,8 +10,8 @@
class ShapeNetSynset:
id: str
name: str
- parents: tp.List[str]
- children: tp.List[str]
+ parents: list[str]
+ children: list[str]
@dataclass
@@ -25,14 +24,17 @@ class ModelInfo:
def read_models(shapenet_dir):
# TODO: This probably has issues / is poorly implemented and very slow
- taxonomy = json.load(open(shapenet_dir / "taxonomy.json", "r"))
+ taxonomy = json.load(open(shapenet_dir / "taxonomy.json"))
- id_to_synset: tp.Dict[int, ShapeNetSynset] = dict()
+ id_to_synset: dict[int, ShapeNetSynset] = {}
for synset in taxonomy:
synset_id = synset["synsetId"]
id_to_synset[synset_id] = ShapeNetSynset(
- id=synset_id, name=synset["name"], children=synset["children"], parents=[]
+ id=synset_id,
+ name=synset["name"],
+ children=synset["children"],
+ parents=[],
)
for synset in taxonomy:
@@ -53,19 +55,19 @@ def get_names(synset_id, id_to_synset):
return names
models_path = shapenet_dir.glob("**/**/models/model_normalized.obj")
- models: tp.List[tp.Dict[str, tp.Union[int, str]]] = []
+ models: list[dict[str, tp.Union[int, str]]] = []
for n, model_path in enumerate(models_path):
source_id = model_path.parent.parent.name
synset_id = model_path.parent.parent.parent.name
names = get_names(synset_id, id_to_synset)
names = ",".join(names)
models.append(
- dict(
- obj_id=n,
- shapenet_synset_id=synset_id,
- shapenet_source_id=source_id,
- shapenet_name=names,
- )
+ {
+ "obj_id": n,
+ "shapenet_synset_id": synset_id,
+ "shapenet_source_id": source_id,
+ "shapenet_name": names,
+ },
)
return models
diff --git a/experiments/postprocess_dataset.py b/experiments/postprocess_dataset.py
index a78b12df..b20d73f6 100644
--- a/experiments/postprocess_dataset.py
+++ b/experiments/postprocess_dataset.py
@@ -1,16 +1,15 @@
-import os
-import hydra
-from dataclasses import dataclass
-import submitit
-import numpy as np
import json
import pathlib as p
-from bop_toolkit_lib.dataset.bop_imagewise import io_load_gt
-from bop_toolkit_lib import inout
+from dataclasses import dataclass
-from job_runner.utils import make_submitit_executor
-from job_runner.configs import RunnerConfig
+import hydra
+import numpy as np
+import submitit
+from bop_toolkit_lib import inout
+from bop_toolkit_lib.dataset.bop_imagewise import io_load_gt
from hydra.core.config_store import ConfigStore
+from job_runner.configs import RunnerConfig
+from job_runner.utils import make_submitit_executor
def process_key(key, ds_dir, stoi_obj, out_dir):
@@ -37,7 +36,7 @@ def process_key(key, ds_dir, stoi_obj, out_dir):
if is_valid:
out_dir.mkdir(exist_ok=True)
- with open(ds_dir / f"{key}.gt.json", "r") as f:
+ with open(ds_dir / f"{key}.gt.json") as f:
gt = io_load_gt(f)
for gt_n in gt:
gt_n["obj_id"] = stoi_obj[gt_n["obj_id"]]
@@ -56,8 +55,8 @@ def load_stoi(ds_dir):
if not p.exists():
p = ds_dir / "shapenet_models.json"
assert p.exists()
- infos = json.load(open(p, "r"))
- stoi = dict()
+ infos = json.load(open(p))
+ stoi = {}
for info in infos:
if "gso_id" in info:
stoi[f"gso_{info['gso_id']}"] = info["obj_id"]
@@ -83,7 +82,9 @@ class Config:
@hydra.main(
- version_base=None, config_path="../configs", config_name="run_ds_postproc/default"
+ version_base=None,
+ config_path="../configs",
+ config_name="run_ds_postproc/default",
)
def main(cfg: Config):
executor = make_submitit_executor(cfg.runner)
@@ -92,7 +93,7 @@ def main(cfg: Config):
stoi = load_stoi(ds_dir)
paths = (ds_dir / "train_pbr_v2format").glob("*")
- keys = list(set([str(p.name).split(".")[0] for p in paths]))
+ keys = list({str(p.name).split(".")[0] for p in paths})
keys_splits = np.array_split(keys, cfg.n_jobs)
jobs = []
diff --git a/happypose/pose_estimators/cosypose/LICENSE b/happypose/pose_estimators/cosypose/LICENSE
index a4b49a84..1a8af987 100644
--- a/happypose/pose_estimators/cosypose/LICENSE
+++ b/happypose/pose_estimators/cosypose/LICENSE
@@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
+SOFTWARE.
diff --git a/happypose/pose_estimators/cosypose/README.md b/happypose/pose_estimators/cosypose/README.md
index a92470f6..9e26ff7e 100644
--- a/happypose/pose_estimators/cosypose/README.md
+++ b/happypose/pose_estimators/cosypose/README.md
@@ -165,11 +165,11 @@ The pre-trained models of the single-view pose estimator can be downloaded using
# YCB-V Single-view refiner
python -m cosypose.scripts.download --model=ycbv-refiner-finetune--251020
-# YCB-V Single-view refiner trained on synthetic data only
-# Only download this if you are interested in retraining the above model
+# YCB-V Single-view refiner trained on synthetic data only
+# Only download this if you are interested in retraining the above model
python -m cosypose.scripts.download --model=ycbv-refiner-syntonly--596719
-# T-LESS coarse and refiner models
+# T-LESS coarse and refiner models
python -m cosypose.scripts.download --model=tless-coarse--10219
python -m cosypose.scripts.download --model=tless-refiner--585928
```
@@ -419,7 +419,7 @@ The results can be also downloaded directly:
```sh
# YCB-Video 5 views
-python -m cosypose.scripts.download --result_id=ycbv-n_views=5--8073381555
+python -m cosypose.scripts.download --result_id=ycbv-n_views=5--8073381555
# T-LESS ViVo 4 views
python -m cosypose.scripts.download --result_id=tless-vivo-n_views=4--2731943061
diff --git a/happypose/pose_estimators/cosypose/config/conf.prc b/happypose/pose_estimators/cosypose/config/conf.prc
index 1fd5f758..66a8b120 100644
--- a/happypose/pose_estimators/cosypose/config/conf.prc
+++ b/happypose/pose_estimators/cosypose/config/conf.prc
@@ -1,3 +1,3 @@
win-size 1280 720
window-title Rendering
-load-file-type p3assimp
\ No newline at end of file
+load-file-type p3assimp
diff --git a/happypose/pose_estimators/cosypose/cosypose/__init__.py b/happypose/pose_estimators/cosypose/cosypose/__init__.py
index 485795da..8ab38677 100644
--- a/happypose/pose_estimators/cosypose/cosypose/__init__.py
+++ b/happypose/pose_estimators/cosypose/cosypose/__init__.py
@@ -1,4 +1,5 @@
import os
-os.environ['MKL_NUM_THREADS'] = '1'
-os.environ['OMP_NUM_THREADS'] = '1'
+
+os.environ["MKL_NUM_THREADS"] = "1"
+os.environ["OMP_NUM_THREADS"] = "1"
print("Setting OMP and MKL num threads to 1.")
diff --git a/happypose/pose_estimators/cosypose/cosypose/bop_config.py b/happypose/pose_estimators/cosypose/cosypose/bop_config.py
index 1e716cba..aa93dcc3 100644
--- a/happypose/pose_estimators/cosypose/cosypose/bop_config.py
+++ b/happypose/pose_estimators/cosypose/cosypose/bop_config.py
@@ -1,123 +1,127 @@
-BOP_CONFIG = dict()
-BOP_CONFIG['hb'] = dict(
- input_resize=(640, 480),
- urdf_ds_name='hb',
- obj_ds_name='hb',
- train_pbr_ds_name=['hb.pbr'],
- inference_ds_name=['hb.bop19'],
- test_ds_name=[],
-)
-
-BOP_CONFIG['icbin'] = dict(
- input_resize=(640, 480),
- urdf_ds_name='icbin',
- obj_ds_name='icbin',
- train_pbr_ds_name=['icbin.pbr'],
- inference_ds_name=['icbin.bop19'],
- test_ds_name=['icbin.bop19'],
-)
-
-
-BOP_CONFIG['itodd'] = dict(
- input_resize=(1280, 960),
- urdf_ds_name='itodd',
- obj_ds_name='itodd',
- train_pbr_ds_name=['itodd.pbr'],
- inference_ds_name=['itodd.bop19'],
- test_ds_name=[],
- val_ds_name=['itodd.val'],
-)
-
-
-BOP_CONFIG['lmo'] = dict(
- input_resize=(640, 480),
- urdf_ds_name='lm',
- obj_ds_name='lm',
- train_pbr_ds_name=['lm.pbr'],
- inference_ds_name=['lmo.bop19'],
- test_ds_name=['lmo.bop19'],
-)
-
-
-BOP_CONFIG['tless'] = dict(
- input_resize=(720, 540),
- urdf_ds_name='tless.cad',
- obj_ds_name='tless.cad',
- train_pbr_ds_name=['tless.pbr'],
- inference_ds_name=['tless.bop19'],
- test_ds_name=['tless.bop19'],
- train_synt_real_ds_names=[('tless.pbr', 4), ('tless.primesense.train', 1)]
-)
-
-BOP_CONFIG['tudl'] = dict(
- input_resize=(640, 480),
- urdf_ds_name='tudl',
- obj_ds_name='tudl',
- train_pbr_ds_name=['tudl.pbr'],
- inference_ds_name=['tudl.bop19'],
- test_ds_name=['tudl.bop19'],
- train_synt_real_ds_names=[('tudl.pbr', 10), ('tudl.train.real', 1)]
-)
-
-
-BOP_CONFIG['ycbv'] = dict(
- input_resize=(640, 480),
- urdf_ds_name='ycbv',
- obj_ds_name='ycbv.bop',
- train_pbr_ds_name=['ycbv.pbr'],
- train_pbr_real_ds_names=[('ycbv.pbr', 1), ()],
- inference_ds_name=['ycbv.bop19'],
- test_ds_name=['ycbv.bop19'],
- train_synt_real_ds_names=[('ycbv.pbr', 20), ('ycbv.train.synt', 1), ('ycbv.train.real', 3)]
-)
-
-PBR_DETECTORS = dict(
- hb='detector-bop-hb-pbr--497808',
- icbin='detector-bop-icbin-pbr--947409',
- itodd='detector-bop-itodd-pbr--509908',
- lmo='detector-bop-lmo-pbr--517542',
- tless='detector-bop-tless-pbr--873074',
- tudl='detector-bop-tudl-pbr--728047',
- ycbv='detector-bop-ycbv-pbr--970850',
-)
-
-PBR_COARSE = dict(
- hb='coarse-bop-hb-pbr--70752',
- icbin='coarse-bop-icbin-pbr--915044',
- itodd='coarse-bop-itodd-pbr--681884',
- lmo='coarse-bop-lmo-pbr--707448',
- tless='coarse-bop-tless-pbr--506801',
- tudl='coarse-bop-tudl-pbr--373484',
- ycbv='coarse-bop-ycbv-pbr--724183',
-)
-
-PBR_REFINER = dict(
- hb='refiner-bop-hb-pbr--247731',
- icbin='refiner-bop-icbin-pbr--841882',
- itodd='refiner-bop-itodd-pbr--834427',
- lmo='refiner-bop-lmo-pbr--325214',
- tless='refiner-bop-tless-pbr--233420',
- tudl='refiner-bop-tudl-pbr--487212',
- ycbv='refiner-bop-ycbv-pbr--604090',
-)
-
-SYNT_REAL_DETECTORS = dict(
- tudl='detector-bop-tudl-synt+real--298779',
- tless='detector-bop-tless-synt+real--452847',
- ycbv='detector-bop-ycbv-synt+real--292971',
-)
-
-SYNT_REAL_COARSE = dict(
- tudl='coarse-bop-tudl-synt+real--610074',
- tless='coarse-bop-tless-synt+real--160982',
- ycbv='coarse-bop-ycbv-synt+real--822463',
-)
-
-SYNT_REAL_REFINER = dict(
- tudl='refiner-bop-tudl-synt+real--423239',
- tless='refiner-bop-tless-synt+real--881314',
- ycbv='refiner-bop-ycbv-synt+real--631598',
-)
+BOP_CONFIG = {}
+BOP_CONFIG["hb"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "hb",
+ "obj_ds_name": "hb",
+ "train_pbr_ds_name": ["hb.pbr"],
+ "inference_ds_name": ["hb.bop19"],
+ "test_ds_name": [],
+}
+
+BOP_CONFIG["icbin"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "icbin",
+ "obj_ds_name": "icbin",
+ "train_pbr_ds_name": ["icbin.pbr"],
+ "inference_ds_name": ["icbin.bop19"],
+ "test_ds_name": ["icbin.bop19"],
+}
+
+
+BOP_CONFIG["itodd"] = {
+ "input_resize": (1280, 960),
+ "urdf_ds_name": "itodd",
+ "obj_ds_name": "itodd",
+ "train_pbr_ds_name": ["itodd.pbr"],
+ "inference_ds_name": ["itodd.bop19"],
+ "test_ds_name": [],
+ "val_ds_name": ["itodd.val"],
+}
+
+
+BOP_CONFIG["lmo"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "lm",
+ "obj_ds_name": "lm",
+ "train_pbr_ds_name": ["lm.pbr"],
+ "inference_ds_name": ["lmo.bop19"],
+ "test_ds_name": ["lmo.bop19"],
+}
+
+
+BOP_CONFIG["tless"] = {
+ "input_resize": (720, 540),
+ "urdf_ds_name": "tless.cad",
+ "obj_ds_name": "tless.cad",
+ "train_pbr_ds_name": ["tless.pbr"],
+ "inference_ds_name": ["tless.bop19"],
+ "test_ds_name": ["tless.bop19"],
+ "train_synt_real_ds_names": [("tless.pbr", 4), ("tless.primesense.train", 1)],
+}
+
+BOP_CONFIG["tudl"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "tudl",
+ "obj_ds_name": "tudl",
+ "train_pbr_ds_name": ["tudl.pbr"],
+ "inference_ds_name": ["tudl.bop19"],
+ "test_ds_name": ["tudl.bop19"],
+ "train_synt_real_ds_names": [("tudl.pbr", 10), ("tudl.train.real", 1)],
+}
+
+
+BOP_CONFIG["ycbv"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "ycbv",
+ "obj_ds_name": "ycbv.bop",
+ "train_pbr_ds_name": ["ycbv.pbr"],
+ "train_pbr_real_ds_names": [("ycbv.pbr", 1), ()],
+ "inference_ds_name": ["ycbv.bop19"],
+ "test_ds_name": ["ycbv.bop19"],
+ "train_synt_real_ds_names": [
+ ("ycbv.pbr", 20),
+ ("ycbv.train.synt", 1),
+ ("ycbv.train.real", 3),
+ ],
+}
+
+PBR_DETECTORS = {
+ "hb": "detector-bop-hb-pbr--497808",
+ "icbin": "detector-bop-icbin-pbr--947409",
+ "itodd": "detector-bop-itodd-pbr--509908",
+ "lmo": "detector-bop-lmo-pbr--517542",
+ "tless": "detector-bop-tless-pbr--873074",
+ "tudl": "detector-bop-tudl-pbr--728047",
+ "ycbv": "detector-bop-ycbv-pbr--970850",
+}
+
+PBR_COARSE = {
+ "hb": "coarse-bop-hb-pbr--70752",
+ "icbin": "coarse-bop-icbin-pbr--915044",
+ "itodd": "coarse-bop-itodd-pbr--681884",
+ "lmo": "coarse-bop-lmo-pbr--707448",
+ "tless": "coarse-bop-tless-pbr--506801",
+ "tudl": "coarse-bop-tudl-pbr--373484",
+ "ycbv": "coarse-bop-ycbv-pbr--724183",
+}
+
+PBR_REFINER = {
+ "hb": "refiner-bop-hb-pbr--247731",
+ "icbin": "refiner-bop-icbin-pbr--841882",
+ "itodd": "refiner-bop-itodd-pbr--834427",
+ "lmo": "refiner-bop-lmo-pbr--325214",
+ "tless": "refiner-bop-tless-pbr--233420",
+ "tudl": "refiner-bop-tudl-pbr--487212",
+ "ycbv": "refiner-bop-ycbv-pbr--604090",
+}
+
+SYNT_REAL_DETECTORS = {
+ "tudl": "detector-bop-tudl-synt+real--298779",
+ "tless": "detector-bop-tless-synt+real--452847",
+ "ycbv": "detector-bop-ycbv-synt+real--292971",
+}
+
+SYNT_REAL_COARSE = {
+ "tudl": "coarse-bop-tudl-synt+real--610074",
+ "tless": "coarse-bop-tless-synt+real--160982",
+ "ycbv": "coarse-bop-ycbv-synt+real--822463",
+}
+
+SYNT_REAL_REFINER = {
+ "tudl": "refiner-bop-tudl-synt+real--423239",
+ "tless": "refiner-bop-tless-synt+real--881314",
+ "ycbv": "refiner-bop-ycbv-synt+real--631598",
+}
for k, v in PBR_COARSE.items():
@@ -133,8 +137,8 @@
SYNT_REAL_DETECTORS[k] = v
-PBR_INFERENCE_ID = 'bop-pbr--223026'
-SYNT_REAL_INFERENCE_ID = 'bop-synt+real--815712'
-SYNT_REAL_ICP_INFERENCE_ID = 'bop-synt+real-icp--121351'
-SYNT_REAL_4VIEWS_INFERENCE_ID = 'bop-synt+real-nviews=4--419066'
-SYNT_REAL_8VIEWS_INFERENCE_ID = 'bop-synt+real-nviews=8--763684'
+PBR_INFERENCE_ID = "bop-pbr--223026"
+SYNT_REAL_INFERENCE_ID = "bop-synt+real--815712"
+SYNT_REAL_ICP_INFERENCE_ID = "bop-synt+real-icp--121351"
+SYNT_REAL_4VIEWS_INFERENCE_ID = "bop-synt+real-nviews=4--419066"
+SYNT_REAL_8VIEWS_INFERENCE_ID = "bop-synt+real-nviews=8--763684"
diff --git a/happypose/pose_estimators/cosypose/cosypose/config.py b/happypose/pose_estimators/cosypose/cosypose/config.py
index b745f35c..e1f0a366 100644
--- a/happypose/pose_estimators/cosypose/cosypose/config.py
+++ b/happypose/pose_estimators/cosypose/cosypose/config.py
@@ -1,35 +1,39 @@
-import cosypose
-import happypose
-import os
-import yaml
-from joblib import Memory
-from pathlib import Path
import getpass
+import os
import socket
+from pathlib import Path
+
import torch.multiprocessing
-torch.multiprocessing.set_sharing_strategy('file_system')
+import yaml
+from joblib import Memory
+
+import happypose
+
+torch.multiprocessing.set_sharing_strategy("file_system")
hostname = socket.gethostname()
username = getpass.getuser()
PROJECT_ROOT = Path(happypose.__file__).parent.parent
PROJECT_DIR = PROJECT_ROOT
-DATA_DIR = PROJECT_DIR / 'data'
-LOCAL_DATA_DIR = Path(os.environ.get("HAPPYPOSE_DATA_DIR", Path(PROJECT_DIR) / "local_data"))
+DATA_DIR = PROJECT_DIR / "data"
+LOCAL_DATA_DIR = Path(
+ os.environ.get("HAPPYPOSE_DATA_DIR", Path(PROJECT_DIR) / "local_data"),
+)
TEST_DATA_DIR = LOCAL_DATA_DIR
-DASK_LOGS_DIR = LOCAL_DATA_DIR / 'dasklogs'
-SYNT_DS_DIR = LOCAL_DATA_DIR / 'synt_datasets'
-BOP_DS_DIR = LOCAL_DATA_DIR / 'bop_datasets'
+DASK_LOGS_DIR = LOCAL_DATA_DIR / "dasklogs"
+SYNT_DS_DIR = LOCAL_DATA_DIR / "synt_datasets"
+BOP_DS_DIR = LOCAL_DATA_DIR / "bop_datasets"
-BOP_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_cosypose'
-BOP_CHALLENGE_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_challenge'
+BOP_TOOLKIT_DIR = PROJECT_DIR / "deps" / "bop_toolkit_cosypose"
+BOP_CHALLENGE_TOOLKIT_DIR = PROJECT_DIR / "deps" / "bop_toolkit_challenge"
-EXP_DIR = LOCAL_DATA_DIR / 'experiments'
-RESULTS_DIR = LOCAL_DATA_DIR / 'results'
-DEBUG_DATA_DIR = LOCAL_DATA_DIR / 'debug_data'
+EXP_DIR = LOCAL_DATA_DIR / "experiments"
+RESULTS_DIR = LOCAL_DATA_DIR / "results"
+DEBUG_DATA_DIR = LOCAL_DATA_DIR / "debug_data"
-DEPS_DIR = PROJECT_DIR / 'deps'
-CACHE_DIR = LOCAL_DATA_DIR / 'joblib_cache'
+DEPS_DIR = PROJECT_DIR / "deps"
+CACHE_DIR = LOCAL_DATA_DIR / "joblib_cache"
assert LOCAL_DATA_DIR.exists()
CACHE_DIR.mkdir(exist_ok=True)
@@ -39,20 +43,24 @@
RESULTS_DIR.mkdir(exist_ok=True)
DEBUG_DATA_DIR.mkdir(exist_ok=True)
-ASSET_DIR = DATA_DIR / 'assets'
+ASSET_DIR = DATA_DIR / "assets"
MEMORY = Memory(CACHE_DIR, verbose=2)
-CONDA_PREFIX = os.environ['CONDA_PREFIX']
-if 'CONDA_PREFIX_1' in os.environ:
- CONDA_BASE_DIR = os.environ['CONDA_PREFIX_1']
- CONDA_ENV = os.environ['CONDA_DEFAULT_ENV']
-else:
- CONDA_BASE_DIR = os.environ['CONDA_PREFIX']
- CONDA_ENV = 'base'
+if "CONDA_PREFIX" in os.environ:
+ CONDA_PREFIX = os.environ["CONDA_PREFIX"]
+ if "CONDA_PREFIX_1" in os.environ:
+ CONDA_BASE_DIR = os.environ["CONDA_PREFIX_1"]
+ CONDA_ENV = os.environ["CONDA_DEFAULT_ENV"]
+ else:
+ CONDA_BASE_DIR = os.environ["CONDA_PREFIX"]
+ CONDA_ENV = "base"
-cfg = yaml.load((PROJECT_DIR / 'happypose/pose_estimators/cosypose/config_yann.yaml').read_text(), Loader=yaml.FullLoader)
+cfg = yaml.load(
+ (PROJECT_DIR / "happypose/pose_estimators/cosypose/config_yann.yaml").read_text(),
+ Loader=yaml.FullLoader,
+)
-SLURM_GPU_QUEUE = cfg['slurm_gpu_queue']
-SLURM_QOS = cfg['slurm_qos']
-DASK_NETWORK_INTERFACE = cfg['dask_network_interface']
+SLURM_GPU_QUEUE = cfg["slurm_gpu_queue"]
+SLURM_QOS = cfg["slurm_qos"]
+DASK_NETWORK_INTERFACE = cfg["dask_network_interface"]
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/augmentations.py b/happypose/pose_estimators/cosypose/cosypose/datasets/augmentations.py
index ad110b77..362b7b52 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/augmentations.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/augmentations.py
@@ -1,14 +1,18 @@
+import random
+from copy import deepcopy
+
import numpy as np
import PIL
import torch
-import random
+import torch.nn.functional as F
from PIL import ImageEnhance, ImageFilter
from torchvision.datasets import VOCSegmentation
-import torch.nn.functional as F
-from copy import deepcopy
-from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import get_K_crop_resize
-from .utils import make_detections_from_segmentation, crop_to_aspect_ratio
+from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import (
+ get_K_crop_resize,
+)
+
+from .utils import crop_to_aspect_ratio, make_detections_from_segmentation
def to_pil(im):
@@ -19,7 +23,8 @@ def to_pil(im):
elif isinstance(im, np.ndarray):
return PIL.Image.fromarray(im)
else:
- raise ValueError('Type not supported', type(im))
+ msg = "Type not supported"
+ raise ValueError(msg, type(im))
def to_torch_uint8(im):
@@ -31,7 +36,8 @@ def to_torch_uint8(im):
assert im.dtype == np.uint8
im = torch.as_tensor(im)
else:
- raise ValueError('Type not supported', type(im))
+ msg = "Type not supported"
+ raise ValueError(msg, type(im))
if im.dim() == 3:
assert im.shape[-1] in {1, 3}
return im
@@ -58,36 +64,46 @@ def __init__(self, pillow_fn, p, factor_interval):
def __call__(self, im, mask, obs):
im = to_pil(im)
if random.random() <= self.p:
- im = self._pillow_fn(im).enhance(factor=random.uniform(*self.factor_interval))
+ im = self._pillow_fn(im).enhance(
+ factor=random.uniform(*self.factor_interval),
+ )
return im, mask, obs
class PillowSharpness(PillowRGBAugmentation):
- def __init__(self, p=0.3, factor_interval=(0., 50.)):
- super().__init__(pillow_fn=ImageEnhance.Sharpness,
- p=p,
- factor_interval=factor_interval)
+ def __init__(self, p=0.3, factor_interval=(0.0, 50.0)):
+ super().__init__(
+ pillow_fn=ImageEnhance.Sharpness,
+ p=p,
+ factor_interval=factor_interval,
+ )
class PillowContrast(PillowRGBAugmentation):
- def __init__(self, p=0.3, factor_interval=(0.2, 50.)):
- super().__init__(pillow_fn=ImageEnhance.Contrast,
- p=p,
- factor_interval=factor_interval)
+ def __init__(self, p=0.3, factor_interval=(0.2, 50.0)):
+ super().__init__(
+ pillow_fn=ImageEnhance.Contrast,
+ p=p,
+ factor_interval=factor_interval,
+ )
class PillowBrightness(PillowRGBAugmentation):
def __init__(self, p=0.5, factor_interval=(0.1, 6.0)):
- super().__init__(pillow_fn=ImageEnhance.Brightness,
- p=p,
- factor_interval=factor_interval)
+ super().__init__(
+ pillow_fn=ImageEnhance.Brightness,
+ p=p,
+ factor_interval=factor_interval,
+ )
class PillowColor(PillowRGBAugmentation):
def __init__(self, p=0.3, factor_interval=(0.0, 20.0)):
- super().__init__(pillow_fn=ImageEnhance.Color,
- p=p,
- factor_interval=factor_interval)
+ super().__init__(
+ pillow_fn=ImageEnhance.Color,
+ p=p,
+ factor_interval=factor_interval,
+ )
class GrayScale(PillowRGBAugmentation):
@@ -128,7 +144,12 @@ def __call__(self, im, mask, obs):
class VOCBackgroundAugmentation(BackgroundAugmentation):
def __init__(self, voc_root, p=0.3):
print("voc_root =", voc_root)
- image_dataset = VOCSegmentation(root=voc_root, year='2012', image_set='trainval', download=False)
+ image_dataset = VOCSegmentation(
+ root=voc_root,
+ year="2012",
+ image_set="trainval",
+ download=False,
+ )
super().__init__(image_dataset=image_dataset, p=p)
def get_bg_image(self, idx):
@@ -143,26 +164,26 @@ def __init__(self, resize=(640, 480)):
def __call__(self, im, mask, obs):
im = to_torch_uint8(im)
mask = to_torch_uint8(mask)
- obs['orig_camera'] = deepcopy(obs['camera'])
+ obs["orig_camera"] = deepcopy(obs["camera"])
assert im.shape[-1] == 3
h, w = im.shape[:2]
if (h, w) == self.resize:
- obs['orig_camera']['crop_resize_bbox'] = (0, 0, w-1, h-1)
+ obs["orig_camera"]["crop_resize_bbox"] = (0, 0, w - 1, h - 1)
return im, mask, obs
images = (torch.as_tensor(im).float() / 255).unsqueeze(0).permute(0, 3, 1, 2)
masks = torch.as_tensor(mask).unsqueeze(0).unsqueeze(0).float()
- K = torch.tensor(obs['camera']['K']).unsqueeze(0)
+ K = torch.tensor(obs["camera"]["K"]).unsqueeze(0)
# Match the width on input image with an image of target aspect ratio.
- if not np.isclose(w/h, self.aspect):
+ if not np.isclose(w / h, self.aspect):
x0, y0 = images.shape[-1] / 2, images.shape[-2] / 2
w = images.shape[-1]
r = self.aspect
- h = w * 1/r
+ h = w * 1 / r
box_size = (h, w)
h, w = min(box_size), max(box_size)
- x1, y1, x2, y2 = x0-w/2, y0-h/2, x0+w/2, y0+h/2
+ x1, y1, x2, y2 = x0 - w / 2, y0 - h / 2, x0 + w / 2, y0 + h / 2
box = torch.tensor([x1, y1, x2, y2])
images, masks, K = crop_to_aspect_ratio(images, box, masks=masks, K=K)
@@ -172,24 +193,34 @@ def __call__(self, im, mask, obs):
h_output, w_output = min(self.resize), max(self.resize)
box_size = (h_input, w_input)
h, w = min(box_size), max(box_size)
- x1, y1, x2, y2 = x0-w/2, y0-h/2, x0+w/2, y0+h/2
+ x1, y1, x2, y2 = x0 - w / 2, y0 - h / 2, x0 + w / 2, y0 + h / 2
box = torch.tensor([x1, y1, x2, y2])
- images = F.interpolate(images, size=(h_output, w_output), mode='bilinear', align_corners=False)
- masks = F.interpolate(masks, size=(h_output, w_output), mode='nearest')
- obs['orig_camera']['crop_resize_bbox'] = tuple(box.tolist())
- K = get_K_crop_resize(K, box.unsqueeze(0), orig_size=(h_input, w_input), crop_resize=(h_output, w_output))
+ images = F.interpolate(
+ images,
+ size=(h_output, w_output),
+ mode="bilinear",
+ align_corners=False,
+ )
+ masks = F.interpolate(masks, size=(h_output, w_output), mode="nearest")
+ obs["orig_camera"]["crop_resize_bbox"] = tuple(box.tolist())
+ K = get_K_crop_resize(
+ K,
+ box.unsqueeze(0),
+ orig_size=(h_input, w_input),
+ crop_resize=(h_output, w_output),
+ )
# Update the bounding box annotations
dets_gt = make_detections_from_segmentation(masks)[0]
- for n, obj in enumerate(obs['objects']):
- if 'bbox' in obj:
- assert 'id_in_segm' in obj
- obj['bbox'] = dets_gt[obj['id_in_segm']]
+ for _n, obj in enumerate(obs["objects"]):
+ if "bbox" in obj:
+ assert "id_in_segm" in obj
+ obj["bbox"] = dets_gt[obj["id_in_segm"]]
im = (images[0].permute(1, 2, 0) * 255).to(torch.uint8)
mask = masks[0, 0].to(torch.uint8)
- obs['camera']['K'] = K.squeeze(0).numpy()
- obs['camera']['resolution'] = (w_output, h_output)
+ obs["camera"]["K"] = K.squeeze(0).numpy()
+ obs["camera"]["resolution"] = (w_output, h_output)
return im, mask, obs
@@ -201,23 +232,23 @@ def __init__(self, resize=(640, 480)):
def __call__(self, im, mask, obs):
im = to_torch_uint8(im)
mask = to_torch_uint8(mask)
- obs['orig_camera'] = deepcopy(obs['camera'])
+ obs["orig_camera"] = deepcopy(obs["camera"])
assert im.shape[-1] == 3
h, w = im.shape[:2]
if (h, w) == self.resize:
- obs['orig_camera']['crop_resize_bbox'] = (0, 0, w-1, h-1)
+ obs["orig_camera"]["crop_resize_bbox"] = (0, 0, w - 1, h - 1)
return im, mask, obs
images = (torch.as_tensor(im).float() / 255).unsqueeze(0).permute(0, 3, 1, 2)
masks = torch.as_tensor(mask).unsqueeze(0).unsqueeze(0).float()
- K = torch.tensor(obs['camera']['K']).unsqueeze(0)
+ K = torch.tensor(obs["camera"]["K"]).unsqueeze(0)
# Match the width on input image with an image of target aspect ratio.
- if not np.isclose(w/h, self.aspect):
+ if not np.isclose(w / h, self.aspect):
x0, y0 = images.shape[-1] / 2, images.shape[-2] / 2
box_size = self.resize
h, w = min(box_size), max(box_size)
- x1, y1, x2, y2 = x0-w/2, y0-h/2, x0+w/2, y0+h/2
+ x1, y1, x2, y2 = x0 - w / 2, y0 - h / 2, x0 + w / 2, y0 + h / 2
box = torch.tensor([x1, y1, x2, y2])
images, masks, K = crop_to_aspect_ratio(images, box, masks=masks, K=K)
@@ -227,22 +258,32 @@ def __call__(self, im, mask, obs):
h_output, w_output = min(self.resize), max(self.resize)
box_size = (h_input, w_input)
h, w = min(box_size), max(box_size)
- x1, y1, x2, y2 = x0-w/2, y0-h/2, x0+w/2, y0+h/2
+ x1, y1, x2, y2 = x0 - w / 2, y0 - h / 2, x0 + w / 2, y0 + h / 2
box = torch.tensor([x1, y1, x2, y2])
- images = F.interpolate(images, size=(h_output, w_output), mode='bilinear', align_corners=False)
- masks = F.interpolate(masks, size=(h_output, w_output), mode='nearest')
- obs['orig_camera']['crop_resize_bbox'] = tuple(box.tolist())
- K = get_K_crop_resize(K, box.unsqueeze(0), orig_size=(h_input, w_input), crop_resize=(h_output, w_output))
+ images = F.interpolate(
+ images,
+ size=(h_output, w_output),
+ mode="bilinear",
+ align_corners=False,
+ )
+ masks = F.interpolate(masks, size=(h_output, w_output), mode="nearest")
+ obs["orig_camera"]["crop_resize_bbox"] = tuple(box.tolist())
+ K = get_K_crop_resize(
+ K,
+ box.unsqueeze(0),
+ orig_size=(h_input, w_input),
+ crop_resize=(h_output, w_output),
+ )
# Update the bounding box annotations
dets_gt = make_detections_from_segmentation(masks)[0]
- for n, obj in enumerate(obs['objects']):
- if 'bbox' in obj:
- assert 'id_in_segm' in obj
- obj['bbox'] = dets_gt[obj['id_in_segm']]
+ for _n, obj in enumerate(obs["objects"]):
+ if "bbox" in obj:
+ assert "id_in_segm" in obj
+ obj["bbox"] = dets_gt[obj["id_in_segm"]]
im = (images[0].permute(1, 2, 0) * 255).to(torch.uint8)
mask = masks[0, 0].to(torch.uint8)
- obs['camera']['K'] = K.squeeze(0).numpy()
- obs['camera']['resolution'] = (w_output, h_output)
+ obs["camera"]["K"] = K.squeeze(0).numpy()
+ obs["camera"]["resolution"] = (w_output, h_output)
return im, mask, obs
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/bop.py b/happypose/pose_estimators/cosypose/cosypose/datasets/bop.py
index a57c70ed..e94fcff8 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/bop.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/bop.py
@@ -1,5 +1,6 @@
-import pickle
import json
+import pickle
+import sys
from pathlib import Path
import numpy as np
@@ -7,13 +8,13 @@
import torch
from PIL import Image
-from happypose.pose_estimators.cosypose.cosypose.config import MEMORY, BOP_TOOLKIT_DIR
-from happypose.pose_estimators.cosypose.cosypose.lib3d import Transform
+from happypose.pose_estimators.cosypose.cosypose.config import BOP_TOOLKIT_DIR, MEMORY
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+from happypose.toolbox.lib3d.transform import Transform
-import sys
sys.path.append(str(BOP_TOOLKIT_DIR))
from bop_toolkit_lib import inout # noqa
+
sys.path = sys.path[:-1]
@@ -21,8 +22,8 @@
def remap_bop_targets(targets):
- targets = targets.rename(columns={'im_id': 'view_id'})
- targets['label'] = targets['obj_id'].apply(lambda x: f'obj_{x:06d}')
+ targets = targets.rename(columns={"im_id": "view_id"})
+ targets["label"] = targets["obj_id"].apply(lambda x: f"obj_{x:06d}")
return targets
@@ -30,52 +31,60 @@ def remap_bop_targets(targets):
def build_index(ds_dir, save_file, split, save_file_annotations):
scene_ids, cam_ids, view_ids = [], [], []
- annotations = dict()
+ annotations = {}
base_dir = ds_dir / split
for scene_dir in base_dir.iterdir():
scene_id = scene_dir.name
- annotations_scene = dict()
- for f in ('scene_camera.json', 'scene_gt_info.json', 'scene_gt.json'):
- path = (scene_dir / f)
+ annotations_scene = {}
+ for f in ("scene_camera.json", "scene_gt_info.json", "scene_gt.json"):
+ path = scene_dir / f
if path.exists():
- annotations_scene[f.split('.')[0]] = json.loads(path.read_text())
+ annotations_scene[f.split(".")[0]] = json.loads(path.read_text())
annotations[scene_id] = annotations_scene
# for view_id in annotations_scene['scene_gt_info'].keys():
- for view_id in annotations_scene['scene_camera'].keys():
- cam_id = 'cam'
+ for view_id in annotations_scene["scene_camera"].keys():
+ cam_id = "cam"
scene_ids.append(int(scene_id))
cam_ids.append(cam_id)
view_ids.append(int(view_id))
- frame_index = pd.DataFrame({'scene_id': scene_ids, 'cam_id': cam_ids,
- 'view_id': view_ids, 'cam_name': cam_ids})
+ frame_index = pd.DataFrame(
+ {
+ "scene_id": scene_ids,
+ "cam_id": cam_ids,
+ "view_id": view_ids,
+ "cam_name": cam_ids,
+ },
+ )
frame_index.to_feather(save_file)
save_file_annotations.write_bytes(pickle.dumps(annotations))
return
class BOPDataset:
- def __init__(self, ds_dir, split='train', load_depth=False):
+ def __init__(self, ds_dir, split="train", load_depth=False):
ds_dir = Path(ds_dir)
self.ds_dir = ds_dir
- assert ds_dir.exists(), 'Dataset does not exists.'
+ assert ds_dir.exists(), "Dataset does not exists."
self.split = split
self.base_dir = ds_dir / split
- logger.info(f'Building index and loading annotations...')
- save_file_index = self.ds_dir / f'index_{split}.feather'
- save_file_annotations = self.ds_dir / f'annotations_{split}.pkl'
+ logger.info("Building index and loading annotations...")
+ save_file_index = self.ds_dir / f"index_{split}.feather"
+ save_file_annotations = self.ds_dir / f"annotations_{split}.pkl"
build_index(
- ds_dir=ds_dir, save_file=save_file_index,
+ ds_dir=ds_dir,
+ save_file=save_file_index,
save_file_annotations=save_file_annotations,
- split=split)
+ split=split,
+ )
self.frame_index = pd.read_feather(save_file_index).reset_index(drop=True)
self.annotations = pickle.loads(save_file_annotations.read_bytes())
- models_infos = json.loads((ds_dir / 'models' / 'models_info.json').read_text())
- self.all_labels = [f'obj_{int(obj_id):06d}' for obj_id in models_infos.keys()]
+ models_infos = json.loads((ds_dir / "models" / "models_info.json").read_text())
+ self.all_labels = [f"obj_{int(obj_id):06d}" for obj_id in models_infos.keys()]
self.load_depth = load_depth
def __len__(self):
@@ -85,18 +94,18 @@ def __getitem__(self, frame_id):
row = self.frame_index.iloc[frame_id]
scene_id, view_id = row.scene_id, row.view_id
view_id = int(view_id)
- view_id_str = f'{view_id:06d}'
- scene_id_str = f'{int(scene_id):06d}'
+ view_id_str = f"{view_id:06d}"
+ scene_id_str = f"{int(scene_id):06d}"
scene_dir = self.base_dir / scene_id_str
- rgb_dir = scene_dir / 'rgb'
+ rgb_dir = scene_dir / "rgb"
if not rgb_dir.exists():
- rgb_dir = scene_dir / 'gray'
- rgb_path = rgb_dir / f'{view_id_str}.png'
+ rgb_dir = scene_dir / "gray"
+ rgb_path = rgb_dir / f"{view_id_str}.png"
if not rgb_path.exists():
- rgb_path = rgb_path.with_suffix('.jpg')
+ rgb_path = rgb_path.with_suffix(".jpg")
if not rgb_path.exists():
- rgb_path = rgb_path.with_suffix('.tif')
+ rgb_path = rgb_path.with_suffix(".tif")
rgb = np.array(Image.open(rgb_path))
if rgb.ndim == 2:
@@ -105,65 +114,75 @@ def __getitem__(self, frame_id):
h, w = rgb.shape[:2]
rgb = torch.as_tensor(rgb)
- cam_annotation = self.annotations[scene_id_str]['scene_camera'][str(view_id)]
- if 'cam_R_w2c' in cam_annotation:
- RC0 = np.array(cam_annotation['cam_R_w2c']).reshape(3, 3)
- tC0 = np.array(cam_annotation['cam_t_w2c']) * 0.001
+ cam_annotation = self.annotations[scene_id_str]["scene_camera"][str(view_id)]
+ if "cam_R_w2c" in cam_annotation:
+ RC0 = np.array(cam_annotation["cam_R_w2c"]).reshape(3, 3)
+ tC0 = np.array(cam_annotation["cam_t_w2c"]) * 0.001
TC0 = Transform(RC0, tC0)
else:
TC0 = Transform(np.eye(3), np.zeros(3))
- K = np.array(cam_annotation['cam_K']).reshape(3, 3)
+ K = np.array(cam_annotation["cam_K"]).reshape(3, 3)
T0C = TC0.inverse()
T0C = T0C.toHomogeneousMatrix()
- camera = dict(T0C=T0C, K=K, TWC=T0C, resolution=rgb.shape[:2])
+ camera = {"T0C": T0C, "K": K, "TWC": T0C, "resolution": rgb.shape[:2]}
T0C = TC0.inverse()
objects = []
mask = np.zeros((h, w), dtype=np.uint8)
- if 'scene_gt_info' in self.annotations[scene_id_str]:
- annotation = self.annotations[scene_id_str]['scene_gt'][str(view_id)]
+ if "scene_gt_info" in self.annotations[scene_id_str]:
+ annotation = self.annotations[scene_id_str]["scene_gt"][str(view_id)]
n_objects = len(annotation)
- visib = self.annotations[scene_id_str]['scene_gt_info'][str(view_id)]
+ visib = self.annotations[scene_id_str]["scene_gt_info"][str(view_id)]
for n in range(n_objects):
- RCO = np.array(annotation[n]['cam_R_m2c']).reshape(3, 3)
- tCO = np.array(annotation[n]['cam_t_m2c']) * 0.001
+ RCO = np.array(annotation[n]["cam_R_m2c"]).reshape(3, 3)
+ tCO = np.array(annotation[n]["cam_t_m2c"]) * 0.001
TCO = Transform(RCO, tCO)
T0O = T0C * TCO
T0O = T0O.toHomogeneousMatrix()
- obj_id = annotation[n]['obj_id']
- name = f'obj_{int(obj_id):06d}'
- bbox_visib = np.array(visib[n]['bbox_visib'])
+ obj_id = annotation[n]["obj_id"]
+ name = f"obj_{int(obj_id):06d}"
+ bbox_visib = np.array(visib[n]["bbox_visib"])
x, y, w, h = bbox_visib
x1 = x
y1 = y
x2 = x + w
y2 = y + h
- obj = dict(label=name, name=name, TWO=T0O, T0O=T0O,
- visib_fract=visib[n]['visib_fract'],
- id_in_segm=n+1, bbox=[x1, y1, x2, y2])
+ obj = {
+ "label": name,
+ "name": name,
+ "TWO": T0O,
+ "T0O": T0O,
+ "visib_fract": visib[n]["visib_fract"],
+ "id_in_segm": n + 1,
+ "bbox": [x1, y1, x2, y2],
+ }
objects.append(obj)
- mask_path = scene_dir / 'mask_visib' / f'{view_id_str}_all.png'
+ mask_path = scene_dir / "mask_visib" / f"{view_id_str}_all.png"
if mask_path.exists():
mask = np.array(Image.open(mask_path))
else:
for n in range(n_objects):
- mask_n = np.array(Image.open(scene_dir / 'mask_visib' / f'{view_id_str}_{n:06d}.png'))
+ mask_n = np.array(
+ Image.open(
+ scene_dir / "mask_visib" / f"{view_id_str}_{n:06d}.png",
+ ),
+ )
mask[mask_n == 255] = n + 1
mask = torch.as_tensor(mask)
if self.load_depth:
- depth_path = scene_dir / 'depth' / f'{view_id_str}.png'
+ depth_path = scene_dir / "depth" / f"{view_id_str}.png"
if not depth_path.exists():
- depth_path = depth_path.with_suffix('.tif')
+ depth_path = depth_path.with_suffix(".tif")
depth = np.array(inout.load_depth(depth_path))
- camera['depth'] = depth * cam_annotation['depth_scale'] / 1000
+ camera["depth"] = depth * cam_annotation["depth_scale"] / 1000
- obs = dict(
- objects=objects,
- camera=camera,
- frame_info=row.to_dict(),
- )
+ obs = {
+ "objects": objects,
+ "camera": camera,
+ "frame_info": row.to_dict(),
+ }
return rgb, mask, obs
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/bop_object_datasets.py b/happypose/pose_estimators/cosypose/cosypose/datasets/bop_object_datasets.py
index 306050bb..22fa5301 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/bop_object_datasets.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/bop_object_datasets.py
@@ -5,28 +5,28 @@
class BOPObjectDataset:
def __init__(self, ds_dir):
ds_dir = Path(ds_dir)
- infos_file = ds_dir / 'models_info.json'
+ infos_file = ds_dir / "models_info.json"
infos = json.loads(infos_file.read_text())
objects = []
for obj_id, bop_info in infos.items():
obj_id = int(obj_id)
- obj_label = f'obj_{obj_id:06d}'
- mesh_path = (ds_dir / obj_label).with_suffix('.ply').as_posix()
- obj = dict(
- label=obj_label,
- category=None,
- mesh_path=mesh_path,
- mesh_units='mm',
- )
+ obj_label = f"obj_{obj_id:06d}"
+ mesh_path = (ds_dir / obj_label).with_suffix(".ply").as_posix()
+ obj = {
+ "label": obj_label,
+ "category": None,
+ "mesh_path": mesh_path,
+ "mesh_units": "mm",
+ }
is_symmetric = False
- for k in ('symmetries_discrete', 'symmetries_continuous'):
+ for k in ("symmetries_discrete", "symmetries_continuous"):
obj[k] = bop_info.get(k, [])
if len(obj[k]) > 0:
is_symmetric = True
- obj['is_symmetric'] = is_symmetric
- obj['diameter'] = bop_info['diameter']
- scale = 0.001 if obj['mesh_units'] == 'mm' else 1.0
- obj['diameter_m'] = bop_info['diameter'] * scale
+ obj["is_symmetric"] = is_symmetric
+ obj["diameter"] = bop_info["diameter"]
+ scale = 0.001 if obj["mesh_units"] == "mm" else 1.0
+ obj["diameter_m"] = bop_info["diameter"] * scale
objects.append(obj)
self.objects = objects
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/datasets_cfg.py b/happypose/pose_estimators/cosypose/cosypose/datasets/datasets_cfg.py
index 5d84b75b..618bd37d 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/datasets_cfg.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/datasets_cfg.py
@@ -1,29 +1,34 @@
import numpy as np
import pandas as pd
-from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR, ASSET_DIR, BOP_DS_DIR
+from happypose.pose_estimators.cosypose.cosypose.config import (
+ ASSET_DIR,
+ BOP_DS_DIR,
+ LOCAL_DATA_DIR,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
-from .bop_object_datasets import BOPObjectDataset
from .bop import BOPDataset, remap_bop_targets
-from .urdf_dataset import BOPUrdfDataset, OneUrdfDataset
+from .bop_object_datasets import BOPObjectDataset
from .texture_dataset import TextureDataset
-
+from .urdf_dataset import BOPUrdfDataset, OneUrdfDataset
logger = get_logger(__name__)
def _make_tless_dataset(split):
- ds_dir = BOP_DS_DIR / 'tless'
+ ds_dir = BOP_DS_DIR / "tless"
ds = BOPDataset(ds_dir, split=split)
return ds
def keep_bop19(ds):
- targets = pd.read_json(ds.ds_dir / 'test_targets_bop19.json')
+ targets = pd.read_json(ds.ds_dir / "test_targets_bop19.json")
targets = remap_bop_targets(targets)
- targets = targets.loc[:, ['scene_id', 'view_id']].drop_duplicates()
- index = ds.frame_index.merge(targets, on=['scene_id', 'view_id']).reset_index(drop=True)
+ targets = targets.loc[:, ["scene_id", "view_id"]].drop_duplicates()
+ index = ds.frame_index.merge(targets, on=["scene_id", "view_id"]).reset_index(
+ drop=True,
+ )
assert len(index) == len(targets)
ds.frame_index = index
return ds
@@ -31,112 +36,118 @@ def keep_bop19(ds):
def make_scene_dataset(ds_name, n_frames=None):
# TLESS
- if ds_name == 'tless.primesense.train':
- ds = _make_tless_dataset('train_primesense')
+ if ds_name == "tless.primesense.train":
+ ds = _make_tless_dataset("train_primesense")
- elif ds_name == 'tless.primesense.test':
- ds = _make_tless_dataset('test_primesense')
+ elif ds_name == "tless.primesense.test":
+ ds = _make_tless_dataset("test_primesense")
- elif ds_name == 'tless.primesense.test.bop19':
- ds = _make_tless_dataset('test_primesense')
+ elif ds_name == "tless.primesense.test.bop19":
+ ds = _make_tless_dataset("test_primesense")
ds = keep_bop19(ds)
# YCBV
- elif ds_name == 'ycbv.train.real':
- ds_dir = BOP_DS_DIR / 'ycbv'
- ds = BOPDataset(ds_dir, split='train_real')
-
- elif ds_name == 'ycbv.train.synt':
- ds_dir = BOP_DS_DIR / 'ycbv'
- ds = BOPDataset(ds_dir, split='train_synt')
-
- elif ds_name == 'ycbv.test':
- ds_dir = BOP_DS_DIR / 'ycbv'
- ds = BOPDataset(ds_dir, split='test')
-
- elif ds_name == 'ycbv.test.keyframes':
- ds_dir = BOP_DS_DIR / 'ycbv'
- ds = BOPDataset(ds_dir, split='test')
- keyframes_path = ds_dir / 'keyframe.txt'
- ls = keyframes_path.read_text().split('\n')[:-1]
+ elif ds_name == "ycbv.train.real":
+ ds_dir = BOP_DS_DIR / "ycbv"
+ ds = BOPDataset(ds_dir, split="train_real")
+
+ elif ds_name == "ycbv.train.synt":
+ ds_dir = BOP_DS_DIR / "ycbv"
+ ds = BOPDataset(ds_dir, split="train_synt")
+
+ elif ds_name == "ycbv.test":
+ ds_dir = BOP_DS_DIR / "ycbv"
+ ds = BOPDataset(ds_dir, split="test")
+
+ elif ds_name == "ycbv.test.keyframes":
+ ds_dir = BOP_DS_DIR / "ycbv"
+ ds = BOPDataset(ds_dir, split="test")
+ keyframes_path = ds_dir / "keyframe.txt"
+ ls = keyframes_path.read_text().split("\n")[:-1]
frame_index = ds.frame_index
ids = []
for l_n in ls:
- scene_id, view_id = l_n.split('/')
+ scene_id, view_id = l_n.split("/")
scene_id, view_id = int(scene_id), int(view_id)
- mask = (frame_index['scene_id'] == scene_id) & (frame_index['view_id'] == view_id)
+ mask = (frame_index["scene_id"] == scene_id) & (
+ frame_index["view_id"] == view_id
+ )
ids.append(np.where(mask)[0].item())
ds.frame_index = frame_index.iloc[ids].reset_index(drop=True)
# BOP challenge
- elif ds_name == 'hb.bop19':
- ds_dir = BOP_DS_DIR / 'hb'
- ds = BOPDataset(ds_dir, split='test_primesense')
+ elif ds_name == "hb.bop19":
+ ds_dir = BOP_DS_DIR / "hb"
+ ds = BOPDataset(ds_dir, split="test_primesense")
ds = keep_bop19(ds)
- elif ds_name == 'icbin.bop19':
- ds_dir = BOP_DS_DIR / 'icbin'
- ds = BOPDataset(ds_dir, split='test')
+ elif ds_name == "icbin.bop19":
+ ds_dir = BOP_DS_DIR / "icbin"
+ ds = BOPDataset(ds_dir, split="test")
ds = keep_bop19(ds)
- elif ds_name == 'itodd.bop19':
- ds_dir = BOP_DS_DIR / 'itodd'
- ds = BOPDataset(ds_dir, split='test')
+ elif ds_name == "itodd.bop19":
+ ds_dir = BOP_DS_DIR / "itodd"
+ ds = BOPDataset(ds_dir, split="test")
ds = keep_bop19(ds)
- elif ds_name == 'lmo.bop19':
- ds_dir = BOP_DS_DIR / 'lmo'
- ds = BOPDataset(ds_dir, split='test')
+ elif ds_name == "lmo.bop19":
+ ds_dir = BOP_DS_DIR / "lmo"
+ ds = BOPDataset(ds_dir, split="test")
ds = keep_bop19(ds)
- elif ds_name == 'tless.bop19':
- ds_dir = BOP_DS_DIR / 'tless'
- ds = BOPDataset(ds_dir, split='test_primesense')
+ elif ds_name == "tless.bop19":
+ ds_dir = BOP_DS_DIR / "tless"
+ ds = BOPDataset(ds_dir, split="test_primesense")
ds = keep_bop19(ds)
- elif ds_name == 'tudl.bop19':
- ds_dir = BOP_DS_DIR / 'tudl'
- ds = BOPDataset(ds_dir, split='test')
+ elif ds_name == "tudl.bop19":
+ ds_dir = BOP_DS_DIR / "tudl"
+ ds = BOPDataset(ds_dir, split="test")
ds = keep_bop19(ds)
- elif ds_name == 'ycbv.bop19':
- ds_dir = BOP_DS_DIR / 'ycbv'
- ds = BOPDataset(ds_dir, split='test')
+ elif ds_name == "ycbv.bop19":
+ ds_dir = BOP_DS_DIR / "ycbv"
+ ds = BOPDataset(ds_dir, split="test")
ds = keep_bop19(ds)
- elif ds_name == 'hb.pbr':
- ds_dir = BOP_DS_DIR / 'hb'
- ds = BOPDataset(ds_dir, split='train_pbr')
- elif ds_name == 'icbin.pbr':
- ds_dir = BOP_DS_DIR / 'icbin'
- ds = BOPDataset(ds_dir, split='train_pbr')
- elif ds_name == 'itodd.pbr':
- ds_dir = BOP_DS_DIR / 'itodd'
- ds = BOPDataset(ds_dir, split='train_pbr')
- elif ds_name == 'lm.pbr':
- ds_dir = BOP_DS_DIR / 'lm'
- ds = BOPDataset(ds_dir, split='train_pbr')
- elif ds_name == 'tless.pbr':
- ds_dir = BOP_DS_DIR / 'tless'
- ds = BOPDataset(ds_dir, split='train_pbr')
- elif ds_name == 'tudl.pbr':
- ds_dir = BOP_DS_DIR / 'tudl'
- ds = BOPDataset(ds_dir, split='train_pbr')
- elif ds_name == 'ycbv.pbr':
- ds_dir = BOP_DS_DIR / 'ycbv'
- ds = BOPDataset(ds_dir, split='train_pbr')
-
- elif ds_name == 'hb.val':
- ds_dir = BOP_DS_DIR / 'hb'
- ds = BOPDataset(ds_dir, split='val_primesense')
- elif ds_name == 'itodd.val':
- ds_dir = BOP_DS_DIR / 'itodd'
- ds = BOPDataset(ds_dir, split='val')
- elif ds_name == 'tudl.train.real':
- ds_dir = BOP_DS_DIR / 'tudl'
- ds = BOPDataset(ds_dir, split='train_real')
+ elif ds_name == "hb.pbr":
+ ds_dir = BOP_DS_DIR / "hb"
+ ds = BOPDataset(ds_dir, split="train_pbr")
+ elif ds_name == "icbin.pbr":
+ ds_dir = BOP_DS_DIR / "icbin"
+ ds = BOPDataset(ds_dir, split="train_pbr")
+ elif ds_name == "itodd.pbr":
+ ds_dir = BOP_DS_DIR / "itodd"
+ ds = BOPDataset(ds_dir, split="train_pbr")
+ elif ds_name == "lm.pbr":
+ ds_dir = BOP_DS_DIR / "lm"
+ ds = BOPDataset(ds_dir, split="train_pbr")
+ elif ds_name == "tless.pbr":
+ ds_dir = BOP_DS_DIR / "tless"
+ ds = BOPDataset(ds_dir, split="train_pbr")
+ elif ds_name == "tudl.pbr":
+ ds_dir = BOP_DS_DIR / "tudl"
+ ds = BOPDataset(ds_dir, split="train_pbr")
+ elif ds_name == "ycbv.pbr":
+ ds_dir = BOP_DS_DIR / "ycbv"
+ ds = BOPDataset(ds_dir, split="train_pbr")
+
+ elif ds_name == "hb.val":
+ ds_dir = BOP_DS_DIR / "hb"
+ ds = BOPDataset(ds_dir, split="val_primesense")
+ elif ds_name == "itodd.val":
+ ds_dir = BOP_DS_DIR / "itodd"
+ ds = BOPDataset(ds_dir, split="val")
+ elif ds_name == "tudl.train.real":
+ ds_dir = BOP_DS_DIR / "tudl"
+ ds = BOPDataset(ds_dir, split="train_real")
# Synthetic datasets
- elif 'synthetic.' in ds_name:
+ elif "synthetic." in ds_name:
from .synthetic_dataset import SyntheticSceneDataset
- assert '.train' in ds_name or '.val' in ds_name
- is_train = 'train' in ds_name.split('.')[-1]
- ds_name = ds_name.split('.')[1]
- ds = SyntheticSceneDataset(ds_dir=LOCAL_DATA_DIR / 'synt_datasets' / ds_name, train=is_train)
+
+ assert ".train" in ds_name or ".val" in ds_name
+ is_train = "train" in ds_name.split(".")[-1]
+ ds_name = ds_name.split(".")[1]
+ ds = SyntheticSceneDataset(
+ ds_dir=LOCAL_DATA_DIR / "synt_datasets" / ds_name,
+ train=is_train,
+ )
else:
raise ValueError(ds_name)
@@ -149,33 +160,33 @@ def make_scene_dataset(ds_name, n_frames=None):
def make_object_dataset(ds_name):
ds = None
- if ds_name == 'tless.cad':
- ds = BOPObjectDataset(BOP_DS_DIR / 'tless/models_cad')
- elif ds_name == 'tless.eval' or ds_name == 'tless.bop':
- ds = BOPObjectDataset(BOP_DS_DIR / 'tless/models_eval')
+ if ds_name == "tless.cad":
+ ds = BOPObjectDataset(BOP_DS_DIR / "tless/models_cad")
+ elif ds_name == "tless.eval" or ds_name == "tless.bop":
+ ds = BOPObjectDataset(BOP_DS_DIR / "tless/models_eval")
# YCBV
- elif ds_name == 'ycbv.bop':
- ds = BOPObjectDataset(BOP_DS_DIR / 'ycbv/models')
- elif ds_name == 'ycbv.bop-compat':
+ elif ds_name == "ycbv.bop":
+ ds = BOPObjectDataset(BOP_DS_DIR / "ycbv/models")
+ elif ds_name == "ycbv.bop-compat":
# BOP meshes (with their offsets) and symmetries
# Replace symmetries of objects not considered symmetric in PoseCNN
- ds = BOPObjectDataset(BOP_DS_DIR / 'ycbv/models_bop-compat')
- elif ds_name == 'ycbv.bop-compat.eval':
+ ds = BOPObjectDataset(BOP_DS_DIR / "ycbv/models_bop-compat")
+ elif ds_name == "ycbv.bop-compat.eval":
# PoseCNN eval meshes and symmetries, WITH bop offsets
- ds = BOPObjectDataset(BOP_DS_DIR / 'ycbv/models_bop-compat_eval')
+ ds = BOPObjectDataset(BOP_DS_DIR / "ycbv/models_bop-compat_eval")
# Other BOP
- elif ds_name == 'hb':
- ds = BOPObjectDataset(BOP_DS_DIR / 'hb/models')
- elif ds_name == 'icbin':
- ds = BOPObjectDataset(BOP_DS_DIR / 'icbin/models')
- elif ds_name == 'itodd':
- ds = BOPObjectDataset(BOP_DS_DIR / 'itodd/models')
- elif ds_name == 'lm':
- ds = BOPObjectDataset(BOP_DS_DIR / 'lm/models')
- elif ds_name == 'tudl':
- ds = BOPObjectDataset(BOP_DS_DIR / 'tudl/models')
+ elif ds_name == "hb":
+ ds = BOPObjectDataset(BOP_DS_DIR / "hb/models")
+ elif ds_name == "icbin":
+ ds = BOPObjectDataset(BOP_DS_DIR / "icbin/models")
+ elif ds_name == "itodd":
+ ds = BOPObjectDataset(BOP_DS_DIR / "itodd/models")
+ elif ds_name == "lm":
+ ds = BOPObjectDataset(BOP_DS_DIR / "lm/models")
+ elif ds_name == "tudl":
+ ds = BOPObjectDataset(BOP_DS_DIR / "tudl/models")
else:
raise ValueError(ds_name)
@@ -192,38 +203,38 @@ def make_urdf_dataset(ds_name):
return dataset
# BOP
- if ds_name == 'tless.cad':
- ds = BOPUrdfDataset(LOCAL_DATA_DIR / 'urdfs' / 'tless.cad')
- elif ds_name == 'tless.reconst':
- ds = BOPUrdfDataset(LOCAL_DATA_DIR / 'urdfs' / 'tless.reconst')
- elif ds_name == 'ycbv':
- ds = BOPUrdfDataset(LOCAL_DATA_DIR / 'urdfs' / 'ycbv')
- elif ds_name == 'hb':
- ds = BOPUrdfDataset(LOCAL_DATA_DIR / 'urdfs' / 'hb')
- elif ds_name == 'icbin':
- ds = BOPUrdfDataset(LOCAL_DATA_DIR / 'urdfs' / 'icbin')
- elif ds_name == 'itodd':
- ds = BOPUrdfDataset(LOCAL_DATA_DIR / 'urdfs' / 'itodd')
- elif ds_name == 'lm':
- ds = BOPUrdfDataset(LOCAL_DATA_DIR / 'urdfs' / 'lm')
- elif ds_name == 'tudl':
- ds = BOPUrdfDataset(LOCAL_DATA_DIR / 'urdfs' / 'tudl')
+ if ds_name == "tless.cad":
+ ds = BOPUrdfDataset(LOCAL_DATA_DIR / "urdfs" / "tless.cad")
+ elif ds_name == "tless.reconst":
+ ds = BOPUrdfDataset(LOCAL_DATA_DIR / "urdfs" / "tless.reconst")
+ elif ds_name == "ycbv":
+ ds = BOPUrdfDataset(LOCAL_DATA_DIR / "urdfs" / "ycbv")
+ elif ds_name == "hb":
+ ds = BOPUrdfDataset(LOCAL_DATA_DIR / "urdfs" / "hb")
+ elif ds_name == "icbin":
+ ds = BOPUrdfDataset(LOCAL_DATA_DIR / "urdfs" / "icbin")
+ elif ds_name == "itodd":
+ ds = BOPUrdfDataset(LOCAL_DATA_DIR / "urdfs" / "itodd")
+ elif ds_name == "lm":
+ ds = BOPUrdfDataset(LOCAL_DATA_DIR / "urdfs" / "lm")
+ elif ds_name == "tudl":
+ ds = BOPUrdfDataset(LOCAL_DATA_DIR / "urdfs" / "tudl")
# Custom scenario
- elif 'custom' in ds_name:
- scenario = ds_name.split('.')[1]
- ds = BOPUrdfDataset(LOCAL_DATA_DIR / 'scenarios' / scenario / 'urdfs')
+ elif "custom" in ds_name:
+ scenario = ds_name.split(".")[1]
+ ds = BOPUrdfDataset(LOCAL_DATA_DIR / "scenarios" / scenario / "urdfs")
- elif ds_name == 'camera':
- ds = OneUrdfDataset(ASSET_DIR / 'camera/model.urdf', 'camera')
+ elif ds_name == "camera":
+ ds = OneUrdfDataset(ASSET_DIR / "camera/model.urdf", "camera")
else:
raise ValueError(ds_name)
return ds
def make_texture_dataset(ds_name):
- if ds_name == 'shapenet':
- ds = TextureDataset(LOCAL_DATA_DIR / 'texture_datasets' / 'shapenet')
+ if ds_name == "shapenet":
+ ds = TextureDataset(LOCAL_DATA_DIR / "texture_datasets" / "shapenet")
else:
raise ValueError(ds_name)
return ds
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/detection_dataset.py b/happypose/pose_estimators/cosypose/cosypose/datasets/detection_dataset.py
index 5b12aaf5..04acc89e 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/detection_dataset.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/detection_dataset.py
@@ -1,41 +1,51 @@
-import torch
-import numpy as np
import random
+
+import numpy as np
+import torch
+
from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
-from .wrappers.visibility_wrapper import VisibilityWrapper
from .augmentations import (
- CropResizeToAspectAugmentation, VOCBackgroundAugmentation,
- PillowBlur, PillowSharpness, PillowContrast, PillowBrightness, PillowColor, to_torch_uint8,
- GrayScale
+ CropResizeToAspectAugmentation,
+ PillowBlur,
+ PillowBrightness,
+ PillowColor,
+ PillowContrast,
+ PillowSharpness,
+ VOCBackgroundAugmentation,
+ to_torch_uint8,
)
+from .wrappers.visibility_wrapper import VisibilityWrapper
class DetectionDataset(torch.utils.data.Dataset):
- def __init__(self,
- scene_ds,
- label_to_category_id,
- min_area=50,
- resize=(640, 480),
- gray_augmentation=False,
- rgb_augmentation=False,
- background_augmentation=False):
-
+ def __init__(
+ self,
+ scene_ds,
+ label_to_category_id,
+ min_area=50,
+ resize=(640, 480),
+ gray_augmentation=False,
+ rgb_augmentation=False,
+ background_augmentation=False,
+ ):
self.scene_ds = VisibilityWrapper(scene_ds)
self.resize_augmentation = CropResizeToAspectAugmentation(resize=resize)
self.background_augmentation = background_augmentation
self.background_augmentations = VOCBackgroundAugmentation(
- voc_root=LOCAL_DATA_DIR / 'VOCdevkit/VOC2012', p=0.3)
+ voc_root=LOCAL_DATA_DIR / "VOCdevkit/VOC2012",
+ p=0.3,
+ )
self.rgb_augmentation = rgb_augmentation
self.rgb_augmentations = [
PillowBlur(p=0.4, factor_interval=(1, 3)),
- PillowSharpness(p=0.3, factor_interval=(0., 50.)),
- PillowContrast(p=0.3, factor_interval=(0.2, 50.)),
+ PillowSharpness(p=0.3, factor_interval=(0.0, 50.0)),
+ PillowContrast(p=0.3, factor_interval=(0.2, 50.0)),
PillowBrightness(p=0.5, factor_interval=(0.1, 6.0)),
- PillowColor(p=0.3, factor_interval=(0., 20.))
+ PillowColor(p=0.3, factor_interval=(0.0, 20.0)),
]
self.label_to_category_id = label_to_category_id
@@ -58,16 +68,22 @@ def get_data(self, idx):
rgb, mask = to_torch_uint8(rgb), to_torch_uint8(mask)
- categories = torch.tensor([self.label_to_category_id[obj['name']] for obj in state['objects']])
- obj_ids = np.array([obj['id_in_segm'] for obj in state['objects']])
- boxes = np.array([torch.as_tensor(obj['bbox']).tolist() for obj in state['objects']])
+ categories = torch.tensor(
+ [self.label_to_category_id[obj["name"]] for obj in state["objects"]],
+ )
+ obj_ids = np.array([obj["id_in_segm"] for obj in state["objects"]])
+ boxes = np.array(
+ [torch.as_tensor(obj["bbox"]).tolist() for obj in state["objects"]],
+ )
boxes = torch.as_tensor(boxes, dtype=torch.float32).view(-1, 4)
- area = torch.as_tensor((boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]))
+ area = torch.as_tensor(
+ (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]),
+ )
mask = np.array(mask)
masks = mask == obj_ids[:, None, None]
masks = torch.as_tensor(masks)
- keep = (area > self.min_area)
+ keep = area > self.min_area
boxes = boxes[keep]
area = area[keep]
categories = categories[keep]
@@ -96,9 +112,10 @@ def __getitem__(self, index):
n_attempts = 0
while not valid:
if n_attempts > 10:
- raise ValueError('Cannot find valid image in the dataset')
+ msg = "Cannot find valid image in the dataset"
+ raise ValueError(msg)
im, target = self.get_data(try_index)
- valid = len(target['boxes']) > 0
+ valid = len(target["boxes"]) > 0
if not valid:
try_index = random.randint(0, len(self.scene_ds) - 1)
n_attempts += 1
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/pose_dataset.py b/happypose/pose_estimators/cosypose/cosypose/datasets/pose_dataset.py
index 06568697..2ceb6722 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/pose_dataset.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/pose_dataset.py
@@ -1,16 +1,25 @@
-import torch
import random
-import numpy as np
from dataclasses import dataclass
-from happypose.pose_estimators.cosypose.cosypose.lib3d import invert_T
+
+import numpy as np
+import torch
+
from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
+from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import invert_T
-from .wrappers.visibility_wrapper import VisibilityWrapper
from .augmentations import (
- CropResizeToAspectAugmentation, VOCBackgroundAugmentation,
- PillowBlur, PillowSharpness, PillowContrast, PillowBrightness, PillowColor, to_torch_uint8,
- GrayScale
+ CropResizeToAspectAugmentation,
+ GrayScale,
+ PillowBlur,
+ PillowBrightness,
+ PillowColor,
+ PillowContrast,
+ PillowSharpness,
+ VOCBackgroundAugmentation,
+ to_torch_uint8,
)
+from .wrappers.visibility_wrapper import VisibilityWrapper
+
@dataclass
class PoseData:
@@ -33,14 +42,15 @@ class NoObjectError(Exception):
class PoseDataset(torch.utils.data.Dataset):
- def __init__(self,
- scene_ds,
- resize=(640, 480),
- min_area=None,
- rgb_augmentation=False,
- gray_augmentation=False,
- background_augmentation=False):
-
+ def __init__(
+ self,
+ scene_ds,
+ resize=(640, 480),
+ min_area=None,
+ rgb_augmentation=False,
+ gray_augmentation=False,
+ background_augmentation=False,
+ ):
self.scene_ds = VisibilityWrapper(scene_ds)
self.resize_augmentation = CropResizeToAspectAugmentation(resize=resize)
@@ -48,15 +58,17 @@ def __init__(self,
self.background_augmentation = background_augmentation
self.background_augmentations = VOCBackgroundAugmentation(
- voc_root=LOCAL_DATA_DIR, p=0.3)
+ voc_root=LOCAL_DATA_DIR,
+ p=0.3,
+ )
self.rgb_augmentation = rgb_augmentation
self.rgb_augmentations = [
PillowBlur(p=0.4, factor_interval=(1, 3)),
- PillowSharpness(p=0.3, factor_interval=(0., 50.)),
- PillowContrast(p=0.3, factor_interval=(0.2, 50.)),
+ PillowSharpness(p=0.3, factor_interval=(0.0, 50.0)),
+ PillowContrast(p=0.3, factor_interval=(0.2, 50.0)),
PillowBrightness(p=0.5, factor_interval=(0.1, 6.0)),
- PillowColor(p=0.3, factor_interval=(0., 20.)),
+ PillowColor(p=0.3, factor_interval=(0.0, 20.0)),
]
if gray_augmentation:
self.rgb_augmentations.append(GrayScale(p=0.5))
@@ -65,10 +77,10 @@ def __len__(self):
return len(self.scene_ds)
def collate_fn(self, batch):
- data = dict()
+ data = {}
for k in batch[0].__annotations__:
v = [getattr(x, k) for x in batch]
- if k in ('images', 'bboxes', 'TCO', 'K'):
+ if k in ("images", "bboxes", "TCO", "K"):
v = torch.as_tensor(np.stack(v))
data[k] = v
data = PoseData(**data)
@@ -89,13 +101,13 @@ def get_data(self, idx):
rgb, mask = to_torch_uint8(rgb), to_torch_uint8(mask)
mask_uniqs = set(np.unique(mask))
objects_visible = []
- for obj in state['objects']:
+ for obj in state["objects"]:
add = False
- if obj['id_in_segm'] in mask_uniqs and np.all(np.array(obj['bbox']) >= 0):
+ if obj["id_in_segm"] in mask_uniqs and np.all(np.array(obj["bbox"]) >= 0):
add = True
if add and self.min_area is not None:
- bbox = np.array(obj['bbox'])
+ bbox = np.array(obj["bbox"])
area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
if area >= self.min_area:
add = True
@@ -111,15 +123,15 @@ def get_data(self, idx):
assert rgb.shape[0] == 3
obj = random.sample(objects_visible, k=1)[0]
- TWO = torch.as_tensor(obj['TWO'])
- TWC = torch.as_tensor(state['camera']['TWC'])
+ TWO = torch.as_tensor(obj["TWO"])
+ TWC = torch.as_tensor(state["camera"]["TWC"])
TCO = invert_T(TWC) @ TWO
data = PoseData(
images=np.asarray(rgb),
- bboxes=np.asarray(obj['bbox']),
+ bboxes=np.asarray(obj["bbox"]),
TCO=np.asarray(TCO),
- K=np.asarray(state['camera']['K']),
+ K=np.asarray(state["camera"]["K"]),
objects=obj,
)
return data
@@ -130,7 +142,8 @@ def __getitem__(self, index):
n_attempts = 0
while not valid:
if n_attempts > 10:
- raise ValueError('Cannot find valid image in the dataset')
+ msg = "Cannot find valid image in the dataset"
+ raise ValueError(msg)
try:
data = self.get_data(try_index)
valid = True
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/samplers.py b/happypose/pose_estimators/cosypose/cosypose/datasets/samplers.py
index 100c8307..cf524067 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/samplers.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/samplers.py
@@ -1,6 +1,7 @@
-import torch
import numpy as np
+import torch
from torch.utils.data import Sampler
+
from happypose.pose_estimators.cosypose.cosypose.utils.random import temp_numpy_seed
@@ -14,7 +15,7 @@ def __len__(self):
return self.epoch_size
def __iter__(self):
- return (i.item() for i in torch.randperm(self.n_items)[:len(self)])
+ return (i.item() for i in torch.randperm(self.n_items)[: len(self)])
class DistributedSceneSampler(Sampler):
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/synthetic_dataset.py b/happypose/pose_estimators/cosypose/cosypose/datasets/synthetic_dataset.py
index 6be06194..3ec3adc2 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/synthetic_dataset.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/synthetic_dataset.py
@@ -1,15 +1,19 @@
-import torch
-import pandas as pd
-import numpy as np
import pickle as pkl
-import yaml
-import cv2
from io import BytesIO
-from .utils import make_detections_from_segmentation
-from happypose.toolbox.datasets.datasets_cfg import make_urdf_dataset
from pathlib import Path
+
+import cv2
+import numpy as np
+import pandas as pd
+import torch
import torch.multiprocessing
-torch.multiprocessing.set_sharing_strategy('file_system')
+import yaml
+
+from happypose.toolbox.datasets.datasets_cfg import make_urdf_dataset
+
+from .utils import make_detections_from_segmentation
+
+torch.multiprocessing.set_sharing_strategy("file_system")
class SyntheticSceneDataset:
@@ -17,16 +21,18 @@ def __init__(self, ds_dir, train=True):
self.ds_dir = Path(ds_dir)
assert self.ds_dir.exists()
- keys_path = ds_dir / (('train' if train else 'val') + '_keys.pkl')
+ keys_path = ds_dir / (("train" if train else "val") + "_keys.pkl")
keys = pkl.loads(keys_path.read_bytes())
- self.cfg = yaml.load((ds_dir / 'config.yaml').read_text(), Loader=yaml.Loader)
- self.object_set = self.cfg.scene_kwargs['urdf_ds']
+ self.cfg = yaml.load((ds_dir / "config.yaml").read_text(), Loader=yaml.Loader)
+ self.object_set = self.cfg.scene_kwargs["urdf_ds"]
self.keys = keys
- urdf_ds_name = self.cfg.scene_kwargs['urdf_ds']
+ urdf_ds_name = self.cfg.scene_kwargs["urdf_ds"]
urdf_ds = make_urdf_dataset(urdf_ds_name)
self.all_labels = [obj.label for obj in urdf_ds]
- self.frame_index = pd.DataFrame(dict(scene_id=np.arange(len(keys)), view_id=np.arange(len(keys))))
+ self.frame_index = pd.DataFrame(
+ {"scene_id": np.arange(len(keys)), "view_id": np.arange(len(keys))},
+ )
def __len__(self):
return len(self.frame_index)
@@ -45,20 +51,29 @@ def _deserialize_im_cv2(im_buf, rgb=True):
def __getitem__(self, idx):
key = self.keys[idx]
- pkl_path = (self.ds_dir / 'dumps' / key).with_suffix('.pkl')
+ pkl_path = (self.ds_dir / "dumps" / key).with_suffix(".pkl")
dic = pkl.loads(pkl_path.read_bytes())
- cam = dic['camera']
- rgb = self._deserialize_im_cv2(cam['rgb'])
- mask = self._deserialize_im_cv2(cam['mask'], rgb=False)
- cam = {k: v for k, v in cam.items() if k not in {'rgb', 'mask'}}
- objects = dic['objects']
+ cam = dic["camera"]
+ rgb = self._deserialize_im_cv2(cam["rgb"])
+ mask = self._deserialize_im_cv2(cam["mask"], rgb=False)
+ cam = {k: v for k, v in cam.items() if k not in {"rgb", "mask"}}
+ objects = dic["objects"]
for object in objects:
- object['name'] = '{}-{}'.format(self.cfg.scene_kwargs['urdf_ds'], object['name'])
- dets_gt = make_detections_from_segmentation(torch.as_tensor(mask).unsqueeze(0))[0]
+ object["name"] = "{}-{}".format(
+ self.cfg.scene_kwargs["urdf_ds"],
+ object["name"],
+ )
+ dets_gt = make_detections_from_segmentation(torch.as_tensor(mask).unsqueeze(0))[
+ 0
+ ]
mask_uniqs = set(np.unique(mask[mask > 0]))
for obj in objects:
- if obj['id_in_segm'] in mask_uniqs:
- obj['bbox'] = dets_gt[obj['id_in_segm']].numpy()
- state = dict(camera=cam, objects=objects, frame_info=self.frame_index.iloc[idx].to_dict())
+ if obj["id_in_segm"] in mask_uniqs:
+ obj["bbox"] = dets_gt[obj["id_in_segm"]].numpy()
+ state = {
+ "camera": cam,
+ "objects": objects,
+ "frame_info": self.frame_index.iloc[idx].to_dict(),
+ }
return rgb, mask, state
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/texture_dataset.py b/happypose/pose_estimators/cosypose/cosypose/datasets/texture_dataset.py
index 60d3d8bb..77d2662d 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/texture_dataset.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/texture_dataset.py
@@ -1,5 +1,7 @@
-import pandas as pd
from pathlib import Path
+
+import pandas as pd
+
from happypose.pose_estimators.cosypose.cosypose.config import MEMORY
@@ -13,9 +15,9 @@ def __init__(self, ds_dir):
def parse_image_dir(ds_dir):
ds_dir = Path(ds_dir)
index = []
- for im_path in ds_dir.glob('*'):
- if im_path.suffix in {'.png', '.jpg'}:
- index.append(dict(texture_path=im_path))
+ for im_path in ds_dir.glob("*"):
+ if im_path.suffix in {".png", ".jpg"}:
+ index.append({"texture_path": im_path})
index = pd.DataFrame(index)
return index
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/urdf_dataset.py b/happypose/pose_estimators/cosypose/cosypose/datasets/urdf_dataset.py
index 8a3dceb6..499bd975 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/urdf_dataset.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/urdf_dataset.py
@@ -1,20 +1,21 @@
-import pandas as pd
from pathlib import Path
+import pandas as pd
+
class UrdfDataset:
def __init__(self, ds_dir):
ds_dir = Path(ds_dir)
index = []
for urdf_dir in Path(ds_dir).iterdir():
- urdf_paths = list(urdf_dir.glob('*.urdf'))
+ urdf_paths = list(urdf_dir.glob("*.urdf"))
if len(urdf_paths) == 1:
urdf_path = urdf_paths[0]
- infos = dict(
- label=urdf_dir.name,
- urdf_path=urdf_path.as_posix(),
- scale=1.0,
- )
+ infos = {
+ "label": urdf_dir.name,
+ "urdf_path": urdf_path.as_posix(),
+ "scale": 1.0,
+ }
index.append(infos)
self.index = pd.DataFrame(index)
@@ -28,15 +29,13 @@ def __len__(self):
class BOPUrdfDataset(UrdfDataset):
def __init__(self, ds_dir):
super().__init__(ds_dir)
- self.index['scale'] = 0.001
+ self.index["scale"] = 0.001
class OneUrdfDataset:
def __init__(self, urdf_path, label, scale=1.0):
index = [
- dict(urdf_path=urdf_path,
- label=label,
- scale=scale)
+ {"urdf_path": urdf_path, "label": label, "scale": scale},
]
self.index = pd.DataFrame(index)
@@ -51,7 +50,11 @@ class UrdfMultiScaleDataset(UrdfDataset):
def __init__(self, urdf_path, label, scales=[]):
index = []
for scale in scales:
- index.append(dict(urdf_path=urdf_path,
- label=label+f'scale={scale:.3f}',
- scale=scale))
+ index.append(
+ {
+ "urdf_path": urdf_path,
+ "label": label + f"scale={scale:.3f}",
+ "scale": scale,
+ },
+ )
self.index = pd.DataFrame(index)
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/utils.py b/happypose/pose_estimators/cosypose/cosypose/datasets/utils.py
index dd38932f..01fa1ee3 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/utils.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/utils.py
@@ -1,7 +1,10 @@
+import numpy as np
import torch
import torchvision
-import numpy as np
-from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import get_K_crop_resize
+
+from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import (
+ get_K_crop_resize,
+)
def crop_to_aspect_ratio(images, box, masks=None, K=None):
@@ -11,8 +14,12 @@ def crop_to_aspect_ratio(images, box, masks=None, K=None):
assert box.shape[0] == 4
w_output, h_output = box[[2, 3]] - box[[0, 1]]
boxes = torch.cat(
- (torch.arange(bsz).unsqueeze(1).to(box.device).float(), box.unsqueeze(0).repeat(bsz, 1).float()),
- dim=1).to(images.device)
+ (
+ torch.arange(bsz).unsqueeze(1).to(box.device).float(),
+ box.unsqueeze(0).repeat(bsz, 1).float(),
+ ),
+ dim=1,
+ ).to(images.device)
images = torchvision.ops.roi_pool(images, boxes, output_size=(h_output, w_output))
if masks is not None:
assert masks.dim() == 4
@@ -20,7 +27,12 @@ def crop_to_aspect_ratio(images, box, masks=None, K=None):
if K is not None:
assert K.dim() == 3
assert K.shape[0] == bsz
- K = get_K_crop_resize(K, boxes[:, 1:], orig_size=(h, w), crop_resize=(h_output, w_output))
+ K = get_K_crop_resize(
+ K,
+ boxes[:, 1:],
+ orig_size=(h, w),
+ crop_resize=(h_output, w_output),
+ )
return images, masks, K
@@ -31,10 +43,15 @@ def make_detections_from_segmentation(masks):
masks = masks.squeeze(0)
for mask_n in masks:
- dets_n = dict()
+ dets_n = {}
for uniq in torch.unique(mask_n, sorted=True):
ids = np.where((mask_n == uniq).cpu().numpy())
- x1, y1, x2, y2 = np.min(ids[1]), np.min(ids[0]), np.max(ids[1]), np.max(ids[0])
+ x1, y1, x2, y2 = (
+ np.min(ids[1]),
+ np.min(ids[0]),
+ np.max(ids[1]),
+ np.max(ids[0]),
+ )
dets_n[int(uniq.item())] = torch.tensor([x1, y1, x2, y2]).to(mask_n.device)
detections.append(dets_n)
return detections
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/__init__.py b/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/__init__.py
index 16ae25a2..e69de29b 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/__init__.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/__init__.py
@@ -1 +0,0 @@
-from .augmentation_wrapper import AugmentationWrapper
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/multiview_wrapper.py b/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/multiview_wrapper.py
index 995c2ff2..513d6f0a 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/multiview_wrapper.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/multiview_wrapper.py
@@ -1,16 +1,15 @@
+import numpy as np
import pandas as pd
import torch
-import numpy as np
from .base import SceneDatasetWrapper
class MultiViewWrapper(SceneDatasetWrapper):
def __init__(self, scene_ds, n_views=4):
-
n_max_views = n_views
frame_index = scene_ds.frame_index.copy().reset_index(drop=True)
- groups = frame_index.groupby(['scene_id']).groups
+ groups = frame_index.groupby(["scene_id"]).groups
random_state = np.random.RandomState(0)
self.frame_index = []
@@ -18,30 +17,32 @@ def __init__(self, scene_ds, n_views=4):
n_max_views = n_views
group_ids = random_state.permutation(group_ids)
len_group = len(group_ids)
- for k, m in enumerate(np.arange(len_group)[::n_max_views]):
- ids_k = np.arange(len(group_ids))[m:m+n_max_views].tolist()
+ for _k, m in enumerate(np.arange(len_group)[::n_max_views]):
+ ids_k = np.arange(len(group_ids))[m : m + n_max_views].tolist()
ds_ids = group_ids[ids_k]
df_group = frame_index.loc[ds_ids]
- self.frame_index.append(dict(
- scene_id=scene_id,
- view_ids=df_group['view_id'].values.tolist(),
- n_views=len(df_group),
- scene_ds_ids=ds_ids,
- ))
+ self.frame_index.append(
+ {
+ "scene_id": scene_id,
+ "view_ids": df_group["view_id"].values.tolist(),
+ "n_views": len(df_group),
+ "scene_ds_ids": ds_ids,
+ },
+ )
self.frame_index = pd.DataFrame(self.frame_index)
- self.frame_index['group_id'] = np.arange(len(self.frame_index))
+ self.frame_index["group_id"] = np.arange(len(self.frame_index))
self.scene_ds = scene_ds
def __getitem__(self, idx):
row = self.frame_index.iloc[idx]
- ds_ids = row['scene_ds_ids']
+ ds_ids = row["scene_ds_ids"]
rgbs, masks, obss = [], [], []
for ds_id in ds_ids:
rgb, mask, obs = self.scene_ds[ds_id]
rgbs.append(rgb)
masks.append(mask)
- obs['frame_info']['group_id'] = row['group_id']
+ obs["frame_info"]["group_id"] = row["group_id"]
obss.append(obs)
rgbs = torch.stack(rgbs)
diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/visibility_wrapper.py b/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/visibility_wrapper.py
index e41ac6a0..97095775 100644
--- a/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/visibility_wrapper.py
+++ b/happypose/pose_estimators/cosypose/cosypose/datasets/wrappers/visibility_wrapper.py
@@ -1,4 +1,5 @@
import numpy as np
+
from .base import SceneDatasetWrapper
@@ -8,8 +9,8 @@ def process_data(self, data):
ids_visible = np.unique(mask)
ids_visible = set(ids_visible[ids_visible > 0])
visib_objects = []
- for obj in state['objects']:
- if obj['id_in_segm'] in ids_visible:
+ for obj in state["objects"]:
+ if obj["id_in_segm"] in ids_visible:
visib_objects.append(obj)
- state['objects'] = visib_objects
+ state["objects"] = visib_objects
return rgb, mask, state
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/data_utils.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/data_utils.py
index 37e18a3e..d3f0d029 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/data_utils.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/data_utils.py
@@ -1,35 +1,39 @@
+from collections import defaultdict
+
import pandas as pd
import torch
-from collections import defaultdict
-import cosypose.utils.tensor_collection as tc
+
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import invert_T
def parse_obs_data(obs):
data = defaultdict(list)
- frame_info = obs['frame_info']
- TWC = torch.as_tensor(obs['camera']['TWC']).float()
- for n, obj in enumerate(obs['objects']):
- info = dict(frame_obj_id=n,
- label=obj['name'],
- visib_fract=obj.get('visib_fract', 1),
- scene_id=frame_info['scene_id'],
- view_id=frame_info['view_id'])
- data['infos'].append(info)
- data['TWO'].append(obj['TWO'])
- data['bboxes'].append(obj['bbox'])
+ frame_info = obs["frame_info"]
+ TWC = torch.as_tensor(obs["camera"]["TWC"]).float()
+ for n, obj in enumerate(obs["objects"]):
+ info = {
+ "frame_obj_id": n,
+ "label": obj["name"],
+ "visib_fract": obj.get("visib_fract", 1),
+ "scene_id": frame_info["scene_id"],
+ "view_id": frame_info["view_id"],
+ }
+ data["infos"].append(info)
+ data["TWO"].append(obj["TWO"])
+ data["bboxes"].append(obj["bbox"])
for k, v in data.items():
- if k != 'infos':
- data[k] = torch.stack([torch.as_tensor(x) .float()for x in v])
+ if k != "infos":
+ data[k] = torch.stack([torch.as_tensor(x).float() for x in v])
- data['infos'] = pd.DataFrame(data['infos'])
- TCO = invert_T(TWC).unsqueeze(0) @ data['TWO']
+ data["infos"] = pd.DataFrame(data["infos"])
+ TCO = invert_T(TWC).unsqueeze(0) @ data["TWO"]
data = tc.PandasTensorCollection(
- infos=data['infos'],
+ infos=data["infos"],
TCO=TCO,
- bboxes=data['bboxes'],
+ bboxes=data["bboxes"],
poses=TCO,
)
return data
@@ -39,6 +43,6 @@ def data_to_pose_model_inputs(data):
TXO = data.poses
obj_infos = []
for n in range(len(data)):
- obj_info = dict(name=data.infos.loc[n, 'label'])
+ obj_info = {"name": data.infos.loc[n, "label"]}
obj_infos.append(obj_info)
return TXO, obj_infos
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/eval_runner/detection_eval.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/eval_runner/detection_eval.py
index 2858ba43..24b7d9e7 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/eval_runner/detection_eval.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/eval_runner/detection_eval.py
@@ -1,36 +1,54 @@
-from tqdm import tqdm
-import numpy as np
-import torch
-import pandas as pd
-
from collections import OrderedDict
+import numpy as np
+import pandas as pd
+import torch
from torch.utils.data import DataLoader
+from tqdm import tqdm
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_world_size, get_rank, get_tmp_dir
-
-import cosypose.utils.tensor_collection as tc
-from happypose.pose_estimators.cosypose.cosypose.evaluation.data_utils import parse_obs_data
-from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import DistributedSceneSampler
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
+from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import (
+ DistributedSceneSampler,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.data_utils import (
+ parse_obs_data,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_tmp_dir,
+ get_world_size,
+)
class DetectionEvaluation:
- def __init__(self, scene_ds, meters, batch_size=64,
- cache_data=True, n_workers=4, sampler=None):
-
+ def __init__(
+ self,
+ scene_ds,
+ meters,
+ batch_size=64,
+ cache_data=True,
+ n_workers=4,
+ sampler=None,
+ ):
self.rank = get_rank()
self.world_size = get_world_size()
self.tmp_dir = get_tmp_dir()
self.scene_ds = scene_ds
if sampler is None:
- sampler = DistributedSceneSampler(scene_ds,
- num_replicas=self.world_size,
- rank=self.rank,
- shuffle=True)
- dataloader = DataLoader(scene_ds, batch_size=batch_size,
- num_workers=n_workers,
- sampler=sampler, collate_fn=self.collate_fn)
+ sampler = DistributedSceneSampler(
+ scene_ds,
+ num_replicas=self.world_size,
+ rank=self.rank,
+ shuffle=True,
+ )
+ dataloader = DataLoader(
+ scene_ds,
+ batch_size=batch_size,
+ num_workers=n_workers,
+ sampler=sampler,
+ collate_fn=self.collate_fn,
+ )
if cache_data:
self.dataloader = list(tqdm(dataloader))
@@ -38,15 +56,18 @@ def __init__(self, scene_ds, meters, batch_size=64,
self.dataloader = dataloader
self.meters = meters
- self.meters = OrderedDict({k: v for k, v in sorted(self.meters.items(), key=lambda item: item[0])})
-
+ self.meters = OrderedDict(
+ dict(sorted(self.meters.items(), key=lambda item: item[0])),
+ )
@staticmethod
def make_empty_predictions():
- infos = dict(view_id=np.empty(0, dtype=np.int),
- scene_id=np.empty(0, dtype=np.int),
- label=np.empty(0, dtype=np.object),
- score=np.empty(0, dtype=np.float))
+ infos = {
+ "view_id": np.empty(0, dtype=np.int),
+ "scene_id": np.empty(0, dtype=np.int),
+ "label": np.empty(0, dtype=np.object),
+ "score": np.empty(0, dtype=np.float),
+ }
bboxes = torch.empty(0, 4, dtype=torch.float)
return tc.PandasTensorCollection(infos=pd.DataFrame(infos), bboxes=bboxes)
@@ -59,22 +80,22 @@ def collate_fn(self, batch):
obj_data = tc.concatenate(obj_data)
return obj_data
- def evaluate(self, obj_predictions, device='cuda'):
+ def evaluate(self, obj_predictions, device="cuda"):
for meter in self.meters.values():
meter.reset()
obj_predictions = obj_predictions.to(device)
for obj_data_gt in tqdm(self.dataloader):
- for k, meter in self.meters.items():
+ for _k, meter in self.meters.items():
meter.add(obj_predictions, obj_data_gt.to(device))
return self.summary()
def summary(self):
- summary, dfs = dict(), dict()
+ summary, dfs = {}, {}
for meter_k, meter in sorted(self.meters.items()):
meter.gather_distributed(tmp_dir=self.tmp_dir)
if get_rank() == 0 and len(meter.datas) > 0:
summary_, df_ = meter.summary()
dfs[meter_k] = df_
for k, v in summary_.items():
- summary[meter_k + '/' + k] = v
+ summary[meter_k + "/" + k] = v
return summary, dfs
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/eval_runner/pose_eval.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/eval_runner/pose_eval.py
index 0d8b3d0c..971b7125 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/eval_runner/pose_eval.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/eval_runner/pose_eval.py
@@ -1,35 +1,54 @@
-from tqdm import tqdm
-import numpy as np
-import torch
-import pandas as pd
-
from collections import OrderedDict
+import numpy as np
+import pandas as pd
+import torch
from torch.utils.data import DataLoader
+from tqdm import tqdm
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_world_size, get_rank, get_tmp_dir
-
-import cosypose.utils.tensor_collection as tc
-from happypose.pose_estimators.cosypose.cosypose.evaluation.data_utils import parse_obs_data
-from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import DistributedSceneSampler
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
+from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import (
+ DistributedSceneSampler,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.data_utils import (
+ parse_obs_data,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_tmp_dir,
+ get_world_size,
+)
class PoseEvaluation:
- def __init__(self, scene_ds, meters, batch_size=64, cache_data=True, n_workers=4, sampler=None):
-
+ def __init__(
+ self,
+ scene_ds,
+ meters,
+ batch_size=64,
+ cache_data=True,
+ n_workers=4,
+ sampler=None,
+ ):
self.rank = get_rank()
self.world_size = get_world_size()
self.tmp_dir = get_tmp_dir()
self.scene_ds = scene_ds
if sampler is None:
- sampler = DistributedSceneSampler(scene_ds,
- num_replicas=self.world_size,
- rank=self.rank,
- shuffle=True)
- dataloader = DataLoader(scene_ds, batch_size=batch_size,
- num_workers=n_workers,
- sampler=sampler, collate_fn=self.collate_fn)
+ sampler = DistributedSceneSampler(
+ scene_ds,
+ num_replicas=self.world_size,
+ rank=self.rank,
+ shuffle=True,
+ )
+ dataloader = DataLoader(
+ scene_ds,
+ batch_size=batch_size,
+ num_workers=n_workers,
+ sampler=sampler,
+ collate_fn=self.collate_fn,
+ )
if cache_data:
self.dataloader = list(tqdm(dataloader))
@@ -37,15 +56,18 @@ def __init__(self, scene_ds, meters, batch_size=64, cache_data=True, n_workers=4
self.dataloader = dataloader
self.meters = meters
- self.meters = OrderedDict({k: v for k, v in sorted(self.meters.items(), key=lambda item: item[0])})
-
+ self.meters = OrderedDict(
+ dict(sorted(self.meters.items(), key=lambda item: item[0])),
+ )
@staticmethod
def make_empty_predictions():
- infos = dict(view_id=np.empty(0, dtype=np.int),
- scene_id=np.empty(0, dtype=np.int),
- label=np.empty(0, dtype=np.object),
- score=np.empty(0, dtype=np.float))
+ infos = {
+ "view_id": np.empty(0, dtype=np.int),
+ "scene_id": np.empty(0, dtype=np.int),
+ "label": np.empty(0, dtype=np.object),
+ "score": np.empty(0, dtype=np.float),
+ }
poses = torch.empty(0, 4, 4, dtype=torch.float)
return tc.PandasTensorCollection(infos=pd.DataFrame(infos), poses=poses)
@@ -58,22 +80,22 @@ def collate_fn(self, batch):
obj_data = tc.concatenate(obj_data)
return obj_data
- def evaluate(self, obj_predictions, device='cuda'):
+ def evaluate(self, obj_predictions, device="cuda"):
for meter in self.meters.values():
meter.reset()
obj_predictions = obj_predictions.to(device)
for obj_data_gt in tqdm(self.dataloader):
- for k, meter in self.meters.items():
+ for _k, meter in self.meters.items():
meter.add(obj_predictions, obj_data_gt.to(device))
return self.summary()
def summary(self):
- summary, dfs = dict(), dict()
+ summary, dfs = {}, {}
for meter_k, meter in sorted(self.meters.items()):
meter.gather_distributed(tmp_dir=self.tmp_dir)
if get_rank() == 0 and len(meter.datas) > 0:
summary_, df_ = meter.summary()
dfs[meter_k] = df_
for k, v in summary_.items():
- summary[meter_k + '/' + k] = v
+ summary[meter_k + "/" + k] = v
return summary, dfs
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/evaluation.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/evaluation.py
index b77b174e..f5dab43e 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/evaluation.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/evaluation.py
@@ -1,84 +1,77 @@
-
# Standard Library
from pathlib import Path
-from typing import Any, Dict, Optional
+from typing import Any, Optional
# Third Party
import torch
+import yaml
from omegaconf import OmegaConf
# MegaPose
import happypose
-import happypose.pose_estimators.megapose as megapose
import happypose.pose_estimators.megapose.evaluation.evaluation_runner
import happypose.toolbox.datasets.datasets_cfg
import happypose.toolbox.inference.utils
-from happypose.pose_estimators.megapose.evaluation.eval_config import (
- EvalConfig,
+from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR
+from happypose.pose_estimators.cosypose.cosypose.evaluation.prediction_runner import (
+ PredictionRunner,
)
-from happypose.pose_estimators.megapose.evaluation.evaluation_runner import (
- EvaluationRunner,
+from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector
+from happypose.pose_estimators.cosypose.cosypose.integrated.pose_estimator import (
+ PoseEstimator,
)
-from happypose.pose_estimators.megapose.evaluation.meters.modelnet_meters import (
- ModelNetErrorMeter,
+
+# Detection
+from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import (
+ check_update_config as check_update_config_detector,
)
-from happypose.pose_estimators.cosypose.cosypose.evaluation.prediction_runner import (
- PredictionRunner,
+from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import (
+ create_model_detector,
)
-from happypose.pose_estimators.megapose.evaluation.runner_utils import (
- format_results,
+from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
+ check_update_config as check_update_config_pose,
)
-from happypose.pose_estimators.megapose.inference.depth_refiner import (
- DepthRefiner,
+from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
+ create_model_coarse,
+ create_model_refiner,
)
-from happypose.pose_estimators.megapose.inference.icp_refiner import (
- ICPRefiner,
+from happypose.pose_estimators.megapose.evaluation.eval_config import EvalConfig
+from happypose.pose_estimators.megapose.evaluation.evaluation_runner import (
+ EvaluationRunner,
)
-from happypose.pose_estimators.cosypose.cosypose.integrated.pose_estimator import (
- PoseEstimator,
+from happypose.pose_estimators.megapose.evaluation.meters.modelnet_meters import (
+ ModelNetErrorMeter,
)
-from happypose.toolbox.datasets.datasets_cfg import make_object_dataset
+from happypose.pose_estimators.megapose.evaluation.runner_utils import format_results
+from happypose.pose_estimators.megapose.inference.icp_refiner import ICPRefiner
-# from happypose.pose_estimators.megapose.inference.teaserpp_refiner import TeaserppRefiner
+# Pose estimator
+from happypose.pose_estimators.megapose.inference.teaserpp_refiner import (
+ TeaserppRefiner,
+)
+from happypose.toolbox.datasets.datasets_cfg import make_object_dataset
from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
+from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
from happypose.toolbox.utils.distributed import get_rank, get_tmp_dir
from happypose.toolbox.utils.logging import get_logger
# """" Temporary imports
-from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR, RESULTS_DIR
-# Pose estimator
-from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
-from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import create_model_refiner, create_model_coarse
-from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import check_update_config as check_update_config_pose
-from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import BulletBatchRenderer
-from happypose.pose_estimators.cosypose.cosypose.integrated.pose_predictor import CoarseRefinePosePredictor
-from happypose.pose_estimators.cosypose.cosypose.integrated.multiview_predictor import MultiviewScenePredictor
-from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import MultiViewWrapper
-import cosypose.utils.tensor_collection as tc
-# Detection
-from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import create_model_detector
-from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import check_update_config as check_update_config_detector
-from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector
-
-from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
-
-import yaml
-
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger = get_logger(__name__)
+
def load_detector(run_id, ds_name):
run_dir = EXP_DIR / run_id
# cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.UnsafeLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.UnsafeLoader)
cfg = check_update_config_detector(cfg)
label_to_category_id = cfg.label_to_category_id
model = create_model_detector(cfg, len(label_to_category_id))
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar', map_location=device)
- ckpt = ckpt['state_dict']
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar", map_location=device)
+ ckpt = ckpt["state_dict"]
model.load_state_dict(ckpt)
model = model.to(device).eval()
model.cfg = cfg
@@ -86,45 +79,55 @@ def load_detector(run_id, ds_name):
model = Detector(model, ds_name)
return model
+
def load_pose_models(coarse_run_id, refiner_run_id, n_workers):
run_dir = EXP_DIR / coarse_run_id
# cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.UnsafeLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.UnsafeLoader)
cfg = check_update_config_pose(cfg)
# object_ds = BOPObjectDataset(BOP_DS_DIR / 'tless/models_cad')
- #object_ds = make_object_dataset(cfg.object_ds_name)
- #mesh_db = MeshDataBase.from_object_ds(object_ds)
- #renderer = BulletBatchRenderer(object_set=cfg.urdf_ds_name, n_workers=n_workers, gpu_renderer=gpu_renderer)
+ # object_ds = make_object_dataset(cfg.object_ds_name)
+ # mesh_db = MeshDataBase.from_object_ds(object_ds)
+ # renderer = BulletBatchRenderer(
+ # object_set=cfg.urdf_ds_name, n_workers=n_workers, gpu_renderer=gpu_renderer
+ # )
#
-
+
object_dataset = make_object_dataset("ycbv")
mesh_db = MeshDataBase.from_object_ds(object_dataset)
- renderer = Panda3dBatchRenderer(object_dataset, n_workers=n_workers, preload_cache=False)
+ renderer = Panda3dBatchRenderer(
+ object_dataset,
+ n_workers=n_workers,
+ preload_cache=False,
+ )
mesh_db_batched = mesh_db.batched().to(device)
def load_model(run_id):
run_dir = EXP_DIR / run_id
# cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.UnsafeLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.UnsafeLoader)
cfg = check_update_config_pose(cfg)
if cfg.train_refiner:
- model = create_model_refiner(cfg, renderer=renderer, mesh_db=mesh_db_batched)
+ model = create_model_refiner(
+ cfg,
+ renderer=renderer,
+ mesh_db=mesh_db_batched,
+ )
else:
model = create_model_coarse(cfg, renderer=renderer, mesh_db=mesh_db_batched)
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar', map_location=device)
- ckpt = ckpt['state_dict']
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar", map_location=device)
+ ckpt = ckpt["state_dict"]
model.load_state_dict(ckpt)
model = model.to(device).eval()
model.cfg = cfg
model.config = cfg
return model
-
+
coarse_model = load_model(coarse_run_id)
refiner_model = load_model(refiner_run_id)
return coarse_model, refiner_model, mesh_db
-
def generate_save_key(detection_type: str, coarse_estimation_type: str) -> str:
return f"{detection_type}+{coarse_estimation_type}"
@@ -132,15 +135,18 @@ def generate_save_key(detection_type: str, coarse_estimation_type: str) -> str:
def get_save_dir(cfg: EvalConfig) -> Path:
"""Returns a save dir.
- Example
-
+ Example:
+ -------
.../ycbv.bop19/gt+SO3_grid
You must remove the '.bop19' from the name in order for the
bop_toolkit_lib to process it correctly.
"""
- save_key = generate_save_key(cfg.inference.detection_type, cfg.inference.coarse_estimation_type)
+ save_key = generate_save_key(
+ cfg.inference.detection_type,
+ cfg.inference.coarse_estimation_type,
+ )
assert cfg.save_dir is not None
assert cfg.ds_name is not None
@@ -151,7 +157,7 @@ def get_save_dir(cfg: EvalConfig) -> Path:
def run_eval(
cfg: EvalConfig,
save_dir: Optional[Path] = None,
-) -> Dict[str, Any]:
+) -> dict[str, Any]:
"""Run eval for a single setting on a single dataset.
A single setting is a (detection_type, coarse_estimation_type) such
@@ -161,12 +167,15 @@ def run_eval(
cfg.save_dir / ds_name / eval_key / results.pth.tar
- Returns:
+ Returns
+ -------
dict: If you are rank_0 process, otherwise returns None
"""
-
- save_key = generate_save_key(cfg.inference.detection_type, cfg.inference.coarse_estimation_type)
+ save_key = generate_save_key(
+ cfg.inference.detection_type,
+ cfg.inference.coarse_estimation_type,
+ )
if save_dir is None:
save_dir = get_save_dir(cfg)
@@ -175,13 +184,20 @@ def run_eval(
logger.info(f"Running eval on ds_name={cfg.ds_name} with setting={save_key}")
# Load the dataset
- ds_kwargs = dict(load_depth=False)
- scene_ds = happypose.toolbox.datasets.datasets_cfg.make_scene_dataset(cfg.ds_name, **ds_kwargs)
- urdf_ds_name, obj_ds_name = happypose.toolbox.datasets.datasets_cfg.get_obj_ds_info(cfg.ds_name)
+ ds_kwargs = {"load_depth": False}
+ scene_ds = happypose.toolbox.datasets.datasets_cfg.make_scene_dataset(
+ cfg.ds_name,
+ **ds_kwargs,
+ )
+ urdf_ds_name, obj_ds_name = happypose.toolbox.datasets.datasets_cfg.get_obj_ds_info(
+ cfg.ds_name,
+ )
# drop frames if this was specified
if cfg.n_frames is not None:
- scene_ds.frame_index = scene_ds.frame_index[: cfg.n_frames].reset_index(drop=True)
+ scene_ds.frame_index = scene_ds.frame_index[: cfg.n_frames].reset_index(
+ drop=True,
+ )
# Load detector model
if cfg.inference.detection_type == "detector":
@@ -190,16 +206,22 @@ def run_eval(
elif cfg.inference.detection_type == "gt":
detector_model = None
else:
- raise ValueError(f"Unknown detection_type={cfg.inference.detection_type}")
+ msg = f"Unknown detection_type={cfg.inference.detection_type}"
+ raise ValueError(msg)
# Load the coarse and mrefiner models
# Needed to deal with the fact that str and Optional[str] are incompatible types.
# See https://stackoverflow.com/a/53287330
assert cfg.coarse_run_id is not None
assert cfg.refiner_run_id is not None
- # TODO (emaitre): This fuction seems to take the wrong parameters. Trying to fix this
+ # TODO (emaitre): This fuction seems to take the wrong parameters. Trying to fix
+ # this.
"""
- coarse_model, refiner_model, mesh_db = happypose.toolbox.inference.utils.load_pose_models(
+ (
+ coarse_model,
+ refiner_model,
+ mesh_db,
+ ) = happypose.toolbox.inference.utils.load_pose_models(
coarse_run_id=cfg.coarse_run_id,
refiner_run_id=cfg.refiner_run_id,
n_workers=cfg.n_rendering_workers,
@@ -210,25 +232,23 @@ def run_eval(
"""
object_ds = make_object_dataset(obj_ds_name)
-
coarse_model, refiner_model, mesh_db = load_pose_models(
coarse_run_id=cfg.coarse_run_id,
refiner_run_id=cfg.refiner_run_id,
n_workers=8,
)
-
renderer = refiner_model.renderer
if cfg.inference.run_depth_refiner:
if cfg.inference.depth_refiner == "icp":
- depth_refiner: Optional[DepthRefiner] = ICPRefiner(mesh_db, renderer)
+ ICPRefiner(mesh_db, renderer)
elif cfg.inference.depth_refiner == "teaserpp":
- depth_refiner = TeaserppRefiner(mesh_db, renderer)
+ TeaserppRefiner(mesh_db, renderer)
else:
- depth_refiner = None
+ pass
else:
- depth_refiner = None
+ pass
pose_estimator = PoseEstimator(
refiner_model=refiner_model,
@@ -260,7 +280,7 @@ def run_eval(
# Compute eval metrics
# TODO (lmanuelli): Fix this up.
# TODO (ylabbe): Clean this.
- eval_metrics, eval_dfs = dict(), dict()
+ eval_metrics, eval_dfs = {}, {}
if not cfg.skip_evaluation:
assert "modelnet" in cfg.ds_name
object_ds = make_object_dataset(obj_ds_name)
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/base.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/base.py
index d4160d04..5e1bcc09 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/base.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/base.py
@@ -1,8 +1,12 @@
+from collections import defaultdict
from pathlib import Path
+
import torch
-from collections import defaultdict
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_world_size, get_rank
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_world_size,
+)
class Meter:
@@ -22,7 +26,7 @@ def gather_distributed(self, tmp_dir):
tmp_dir = Path(tmp_dir)
tmp_dir.mkdir(exist_ok=True, parents=True)
rank, world_size = get_rank(), get_world_size()
- tmp_file_template = (tmp_dir / 'rank={rank}.pth.tar').as_posix()
+ tmp_file_template = (tmp_dir / "rank={rank}.pth.tar").as_posix()
if rank > 0:
tmp_file = tmp_file_template.format(rank=rank)
torch.save(self.datas, tmp_file)
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/detection_meters.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/detection_meters.py
index 4fb77ddb..02fadf1b 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/detection_meters.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/detection_meters.py
@@ -1,25 +1,32 @@
import numpy as np
-from sklearn.metrics import average_precision_score
-import xarray as xr
-import torchvision
import torch
-from torch.utils.data import TensorDataset, DataLoader
-from .base import Meter
+import torchvision
+import xarray as xr
+from sklearn.metrics import average_precision_score
+from torch.utils.data import DataLoader, TensorDataset
-from .utils import (match_poses, get_top_n_ids,
- add_valid_gt, get_candidate_matches, add_inst_num)
from happypose.pose_estimators.cosypose.cosypose.utils.xarray import xr_merge
+from .base import Meter
+from .utils import (
+ add_inst_num,
+ add_valid_gt,
+ get_candidate_matches,
+ get_top_n_ids,
+ match_poses,
+)
-class DetectionMeter(Meter):
- def __init__(self,
- iou_threshold=0.5,
- errors_bsz=512,
- consider_all_predictions=False,
- targets=None,
- visib_gt_min=-1,
- n_top=-1):
+class DetectionMeter(Meter):
+ def __init__(
+ self,
+ iou_threshold=0.5,
+ errors_bsz=512,
+ consider_all_predictions=False,
+ targets=None,
+ visib_gt_min=-1,
+ n_top=-1,
+ ):
self.iou_threshold = iou_threshold
self.consider_all_predictions = consider_all_predictions
self.targets = targets
@@ -32,7 +39,7 @@ def compute_metrics(self, bbox_pred, bbox_gt):
iou_all = torchvision.ops.box_iou(bbox_pred, bbox_gt)
arange_n = torch.arange(len(bbox_pred))
iou = iou_all[arange_n, arange_n]
- return dict(iou=iou)
+ return {"iou": iou}
def compute_metrics_batch(self, bbox_pred, bbox_gt):
metrics = []
@@ -40,59 +47,82 @@ def compute_metrics_batch(self, bbox_pred, bbox_gt):
ds = TensorDataset(bbox_pred, bbox_gt, ids)
dl = DataLoader(ds, batch_size=self.errors_bsz)
- for (bbox_pred_, bbox_gt_, ids_) in dl:
+ for bbox_pred_, bbox_gt_, ids_ in dl:
metrics.append(self.compute_metrics(bbox_pred_, bbox_gt_))
if len(metrics) == 0:
- metrics.append(dict(
- iou=torch.empty(0, dtype=torch.float),
- ))
+ metrics.append(
+ {
+ "iou": torch.empty(0, dtype=torch.float),
+ },
+ )
- metricsd = dict()
+ metricsd = {}
for k in metrics[0].keys():
metricsd[k] = torch.cat([metrics_n[k] for metrics_n in metrics], dim=0)
return metricsd
def add(self, pred_data, gt_data):
- group_keys = ['scene_id', 'view_id', 'label']
+ group_keys = ["scene_id", "view_id", "label"]
pred_data = pred_data.float()
gt_data = gt_data.float()
# Only keep predictions relevant to gt scene and images.
- gt_infos = gt_data.infos.loc[:, ['scene_id', 'view_id']].drop_duplicates().reset_index(drop=True)
+ gt_infos = (
+ gt_data.infos.loc[:, ["scene_id", "view_id"]]
+ .drop_duplicates()
+ .reset_index(drop=True)
+ )
targets = self.targets
if targets is not None:
targets = gt_infos.merge(targets)
- pred_data.infos['batch_pred_id'] = np.arange(len(pred_data))
- keep_ids = gt_infos.merge(pred_data.infos)['batch_pred_id']
+ pred_data.infos["batch_pred_id"] = np.arange(len(pred_data))
+ keep_ids = gt_infos.merge(pred_data.infos)["batch_pred_id"]
pred_data = pred_data[keep_ids]
# Add inst id to the dataframes
- pred_data.infos = add_inst_num(pred_data.infos, key='pred_inst_id', group_keys=group_keys)
- gt_data.infos = add_inst_num(gt_data.infos, key='gt_inst_id', group_keys=group_keys)
+ pred_data.infos = add_inst_num(
+ pred_data.infos,
+ key="pred_inst_id",
+ group_keys=group_keys,
+ )
+ gt_data.infos = add_inst_num(
+ gt_data.infos,
+ key="gt_inst_id",
+ group_keys=group_keys,
+ )
# Filter predictions according to BOP evaluation.
if not self.consider_all_predictions:
- ids_top_n_pred = get_top_n_ids(pred_data.infos,
- group_keys=group_keys, top_key='score',
- targets=targets, n_top=self.n_top)
+ ids_top_n_pred = get_top_n_ids(
+ pred_data.infos,
+ group_keys=group_keys,
+ top_key="score",
+ targets=targets,
+ n_top=self.n_top,
+ )
pred_data_filtered = pred_data.clone()[ids_top_n_pred]
else:
pred_data_filtered = pred_data.clone()
# Compute valid targets according to BOP evaluation.
- gt_data.infos = add_valid_gt(gt_data.infos,
- group_keys=group_keys,
- targets=targets,
- visib_gt_min=self.visib_gt_min)
+ gt_data.infos = add_valid_gt(
+ gt_data.infos,
+ group_keys=group_keys,
+ targets=targets,
+ visib_gt_min=self.visib_gt_min,
+ )
# Compute tentative candidates
- cand_infos = get_candidate_matches(pred_data_filtered.infos, gt_data.infos,
- group_keys=group_keys,
- only_valids=True)
- pred_ids = cand_infos['pred_id'].values.tolist()
- gt_ids = cand_infos['gt_id'].values.tolist()
+ cand_infos = get_candidate_matches(
+ pred_data_filtered.infos,
+ gt_data.infos,
+ group_keys=group_keys,
+ only_valids=True,
+ )
+ pred_ids = cand_infos["pred_id"].values.tolist()
+ gt_ids = cand_infos["gt_id"].values.tolist()
cand_bbox_gt = gt_data.bboxes[gt_ids]
cand_bbox_pred = pred_data_filtered.bboxes[pred_ids]
@@ -100,82 +130,110 @@ def add(self, pred_data, gt_data):
metrics = self.compute_metrics_batch(cand_bbox_pred, cand_bbox_gt)
# Matches can only be candidates within thresholds
- cand_infos['iou'] = metrics['iou'].cpu().numpy()
- keep = cand_infos['iou'] >= self.iou_threshold
+ cand_infos["iou"] = metrics["iou"].cpu().numpy()
+ keep = cand_infos["iou"] >= self.iou_threshold
cand_infos = cand_infos[keep].reset_index(drop=True)
# Match predictions to ground truth detections
- cand_infos['error'] = - cand_infos['iou']
+ cand_infos["error"] = -cand_infos["iou"]
matches = match_poses(cand_infos, group_keys=group_keys)
# Save all informations in xarray datasets
- gt_keys = group_keys + ['gt_inst_id', 'valid'] + (['visib_fract'] if 'visib_fract' in gt_infos else [])
+ gt_keys = [*group_keys, "gt_inst_id", "valid"] + (
+ ["visib_fract"] if "visib_fract" in gt_infos else []
+ )
gt = gt_data.infos.loc[:, gt_keys]
- preds = pred_data.infos.loc[:, group_keys + ['pred_inst_id', 'score']]
- matches = matches.loc[:, group_keys + ['pred_inst_id', 'gt_inst_id', 'cand_id']]
+ preds = pred_data.infos.loc[:, [*group_keys, "pred_inst_id", "score"]]
+ matches = matches.loc[:, [*group_keys, "pred_inst_id", "gt_inst_id", "cand_id"]]
- gt = xr.Dataset(gt).rename({'dim_0': 'gt_id'})
- matches = xr.Dataset(matches).rename({'dim_0': 'match_id'})
- preds = xr.Dataset(preds).rename({'dim_0': 'pred_id'})
+ gt = xr.Dataset(gt).rename({"dim_0": "gt_id"})
+ matches = xr.Dataset(matches).rename({"dim_0": "match_id"})
+ preds = xr.Dataset(preds).rename({"dim_0": "pred_id"})
- ious = metrics['iou'].cpu().numpy()[matches['cand_id'].values]
- matches['iou'] = 'match_id', ious
- matches['iou_valid'] = 'match_id', ious >= self.iou_threshold
+ ious = metrics["iou"].cpu().numpy()[matches["cand_id"].values]
+ matches["iou"] = "match_id", ious
+ matches["iou_valid"] = "match_id", ious >= self.iou_threshold
fill_values = {
- 'iou': np.nan,
- 'iou_valid': False,
- 'score': np.nan,
+ "iou": np.nan,
+ "iou_valid": False,
+ "score": np.nan,
}
- matches = xr_merge(matches, preds, on=group_keys + ['pred_inst_id'],
- dim1='match_id', dim2='pred_id', fill_value=fill_values)
- gt = xr_merge(gt, matches, on=group_keys + ['gt_inst_id'],
- dim1='gt_id', dim2='match_id', fill_value=fill_values)
-
- preds_match_merge = xr_merge(preds, matches, on=group_keys+['pred_inst_id'],
- dim1='pred_id', dim2='match_id', fill_value=fill_values)
- preds['iou_valid'] = 'pred_id', preds_match_merge['iou_valid']
-
- self.datas['gt_df'].append(gt)
- self.datas['pred_df'].append(preds)
- self.datas['matches_df'].append(matches)
+ matches = xr_merge(
+ matches,
+ preds,
+ on=[*group_keys, "pred_inst_id"],
+ dim1="match_id",
+ dim2="pred_id",
+ fill_value=fill_values,
+ )
+ gt = xr_merge(
+ gt,
+ matches,
+ on=[*group_keys, "gt_inst_id"],
+ dim1="gt_id",
+ dim2="match_id",
+ fill_value=fill_values,
+ )
+
+ preds_match_merge = xr_merge(
+ preds,
+ matches,
+ on=[*group_keys, "pred_inst_id"],
+ dim1="pred_id",
+ dim2="match_id",
+ fill_value=fill_values,
+ )
+ preds["iou_valid"] = "pred_id", preds_match_merge["iou_valid"]
+
+ self.datas["gt_df"].append(gt)
+ self.datas["pred_df"].append(preds)
+ self.datas["matches_df"].append(matches)
def summary(self):
- gt_df = xr.concat(self.datas['gt_df'], dim='gt_id')
- matches_df = xr.concat(self.datas['matches_df'], dim='match_id')
- pred_df = xr.concat(self.datas['pred_df'], dim='pred_id')
- valid_df = gt_df.sel(gt_id=gt_df['valid'])
+ gt_df = xr.concat(self.datas["gt_df"], dim="gt_id")
+ matches_df = xr.concat(self.datas["matches_df"], dim="match_id")
+ pred_df = xr.concat(self.datas["pred_df"], dim="pred_id")
+ valid_df = gt_df.sel(gt_id=gt_df["valid"])
# AP/mAP @ IoU < threshold
- valid_k = 'iou_valid'
- n_gts = dict()
+ valid_k = "iou_valid"
+ n_gts = {}
if self.n_top > 0:
- group_keys = ['scene_id', 'view_id', 'label']
- subdf = gt_df[[*group_keys, 'valid']].to_dataframe().groupby(group_keys).sum().reset_index()
- subdf['gt_count'] = np.minimum(self.n_top, subdf['valid'])
- for label, group in subdf.groupby('label'):
- n_gts[label] = group['gt_count'].sum()
+ group_keys = ["scene_id", "view_id", "label"]
+ subdf = (
+ gt_df[[*group_keys, "valid"]]
+ .to_dataframe()
+ .groupby(group_keys)
+ .sum()
+ .reset_index()
+ )
+ subdf["gt_count"] = np.minimum(self.n_top, subdf["valid"])
+ for label, group in subdf.groupby("label"):
+ n_gts[label] = group["gt_count"].sum()
else:
- subdf = gt_df[['label', 'valid']].groupby('label').sum()
- for label in subdf['label'].values:
- n_gts[label] = subdf.sel(label=label)['valid'].item()
+ subdf = gt_df[["label", "valid"]].groupby("label").sum()
+ for label in subdf["label"].values:
+ n_gts[label] = subdf.sel(label=label)["valid"].item()
- ap_dfs = dict()
+ ap_dfs = {}
def compute_ap(label_df, label_n_gt):
- label_df = label_df.sort_values('score', ascending=False).reset_index(drop=True)
- label_df['n_tp'] = np.cumsum(label_df[valid_k].values.astype(np.float))
- label_df['prec'] = label_df['n_tp'] / (np.arange(len(label_df)) + 1)
- label_df['recall'] = label_df['n_tp'] / label_n_gt
+ label_df = label_df.sort_values("score", ascending=False).reset_index(
+ drop=True,
+ )
+ label_df["n_tp"] = np.cumsum(label_df[valid_k].values.astype(np.float))
+ label_df["prec"] = label_df["n_tp"] / (np.arange(len(label_df)) + 1)
+ label_df["recall"] = label_df["n_tp"] / label_n_gt
y_true = label_df[valid_k]
- y_score = label_df['score']
+ y_score = label_df["score"]
ap = average_precision_score(y_true, y_score) * y_true.sum() / label_n_gt
- label_df['AP'] = ap
- label_df['n_gt'] = label_n_gt
+ label_df["AP"] = ap
+ label_df["n_gt"] = label_n_gt
return ap, label_df
- df = pred_df[['label', valid_k, 'score']].to_dataframe().set_index(['label'])
+ df = pred_df[["label", valid_k, "score"]].to_dataframe().set_index(["label"])
for label, label_n_gt in n_gts.items():
if df.index.contains(label):
label_df = df.loc[[label]]
@@ -184,26 +242,29 @@ def compute_ap(label_df, label_n_gt):
ap_dfs[label] = label_df
if len(ap_dfs) > 0:
- mAP = np.mean([np.unique(ap_df['AP']).item() for ap_df in ap_dfs.values()])
- AP, ap_dfs['all'] = compute_ap(df.reset_index(), sum(list(n_gts.values())))
+ mAP = np.mean([np.unique(ap_df["AP"]).item() for ap_df in ap_dfs.values()])
+ AP, ap_dfs["all"] = compute_ap(df.reset_index(), sum(list(n_gts.values())))
else:
- AP, mAP = 0., 0.
+ AP, mAP = 0.0, 0.0
n_gt_valid = int(sum(list(n_gts.values())))
summary = {
- 'n_gt': len(gt_df['gt_id']),
- 'n_gt_valid': n_gt_valid,
- 'n_pred': len(pred_df['pred_id']),
- 'n_matched': len(matches_df['match_id']),
- 'matched_gt_ratio': len(matches_df['match_id']) / n_gt_valid,
- 'pred_matched_ratio': len(pred_df['pred_id']) / max(len(matches_df['match_id']), 1),
- 'iou_valid_recall': valid_df['iou_valid'].sum('gt_id').item() / n_gt_valid,
+ "n_gt": len(gt_df["gt_id"]),
+ "n_gt_valid": n_gt_valid,
+ "n_pred": len(pred_df["pred_id"]),
+ "n_matched": len(matches_df["match_id"]),
+ "matched_gt_ratio": len(matches_df["match_id"]) / n_gt_valid,
+ "pred_matched_ratio": len(pred_df["pred_id"])
+ / max(len(matches_df["match_id"]), 1),
+ "iou_valid_recall": valid_df["iou_valid"].sum("gt_id").item() / n_gt_valid,
}
- summary.update({
- 'AP': AP,
- 'mAP': mAP,
- })
+ summary.update(
+ {
+ "AP": AP,
+ "mAP": mAP,
+ },
+ )
- dfs = dict(gt=gt_df, matches=matches_df, preds=pred_df, ap=ap_dfs)
+ dfs = {"gt": gt_df, "matches": matches_df, "preds": pred_df, "ap": ap_dfs}
return summary, dfs
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/pose_meters.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/pose_meters.py
index dca34b5e..9365672a 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/pose_meters.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/pose_meters.py
@@ -1,36 +1,46 @@
-from sklearn.metrics import average_precision_score
-import numpy as np
-import xarray as xr
from collections import OrderedDict
+
+import numpy as np
import torch
-from torch.utils.data import TensorDataset, DataLoader
+import xarray as xr
+from sklearn.metrics import average_precision_score
+from torch.utils.data import DataLoader, TensorDataset
-from happypose.pose_estimators.cosypose.cosypose.lib3d.distances import dists_add, dists_add_symmetric
+from happypose.pose_estimators.cosypose.cosypose.lib3d.distances import (
+ dists_add,
+ dists_add_symmetric,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.xarray import xr_merge
-from .utils import (match_poses, get_top_n_ids,
- add_valid_gt, get_candidate_matches, add_inst_num,
- compute_auc_posecnn)
from .base import Meter
+from .utils import (
+ add_inst_num,
+ add_valid_gt,
+ compute_auc_posecnn,
+ get_candidate_matches,
+ get_top_n_ids,
+ match_poses,
+)
class PoseErrorMeter(Meter):
- def __init__(self,
- mesh_db,
- error_type='ADD',
- report_AP=False,
- report_error_AUC=False,
- report_error_stats=False,
- sample_n_points=None,
- errors_bsz=1,
- match_threshold=0.1,
- exact_meshes=True,
- spheres_overlap_check=True,
- consider_all_predictions=False,
- targets=None,
- visib_gt_min=-1,
- n_top=-1):
-
+ def __init__(
+ self,
+ mesh_db,
+ error_type="ADD",
+ report_AP=False,
+ report_error_AUC=False,
+ report_error_stats=False,
+ sample_n_points=None,
+ errors_bsz=1,
+ match_threshold=0.1,
+ exact_meshes=True,
+ spheres_overlap_check=True,
+ consider_all_predictions=False,
+ targets=None,
+ visib_gt_min=-1,
+ n_top=-1,
+ ):
self.sample_n_points = sample_n_points
self.mesh_db = mesh_db.batched().cuda().float()
self.error_type = error_type.upper()
@@ -55,7 +65,7 @@ def compute_errors(self, TXO_pred, TXO_gt, labels):
if self.exact_meshes:
assert len(labels) == 1
- n_points = self.mesh_db.infos[labels[0]]['n_points']
+ n_points = self.mesh_db.infos[labels[0]]["n_points"]
points = meshes.points[:, :n_points]
else:
if self.sample_n_points is not None:
@@ -63,32 +73,49 @@ def compute_errors(self, TXO_pred, TXO_gt, labels):
else:
points = meshes.points
- if self.error_type.upper() == 'ADD':
+ if self.error_type.upper() == "ADD":
dists = dists_add(TXO_pred, TXO_gt, points)
- elif self.error_type.upper() == 'ADD-S':
+ elif self.error_type.upper() == "ADD-S":
dists = dists_add_symmetric(TXO_pred, TXO_gt, points)
- elif self.error_type.upper() == 'ADD(-S)':
+ elif self.error_type.upper() == "ADD(-S)":
ids_nosym, ids_sym = [], []
for n, label in enumerate(labels):
- if self.mesh_db.infos[label]['is_symmetric']:
+ if self.mesh_db.infos[label]["is_symmetric"]:
ids_sym.append(n)
else:
ids_nosym.append(n)
- dists = torch.empty((len(TXO_pred), points.shape[1], 3), dtype=TXO_pred.dtype, device=TXO_pred.device)
+ dists = torch.empty(
+ (len(TXO_pred), points.shape[1], 3),
+ dtype=TXO_pred.dtype,
+ device=TXO_pred.device,
+ )
if len(ids_nosym) > 0:
- dists[ids_nosym] = dists_add(TXO_pred[ids_nosym], TXO_gt[ids_nosym], points[ids_nosym])
+ dists[ids_nosym] = dists_add(
+ TXO_pred[ids_nosym],
+ TXO_gt[ids_nosym],
+ points[ids_nosym],
+ )
if len(ids_sym) > 0:
- dists[ids_sym] = dists_add_symmetric(TXO_pred[ids_sym], TXO_gt[ids_sym], points[ids_sym])
+ dists[ids_sym] = dists_add_symmetric(
+ TXO_pred[ids_sym],
+ TXO_gt[ids_sym],
+ points[ids_sym],
+ )
else:
- raise ValueError("Error not supported", self.error_type)
-
- errors = dict()
- errors['norm_avg'] = torch.norm(dists, dim=-1, p=2).mean(-1)
- errors['xyz_avg'] = dists.abs().mean(dim=-2)
- errors['TCO_xyz'] = (TXO_pred[:, :3, -1] - TXO_gt[:, :3, -1]).abs()
- errors['TCO_norm'] = torch.norm(TXO_pred[:, :3, -1] - TXO_gt[:, :3, -1], dim=-1, p=2)
+ msg = "Error not supported"
+ raise ValueError(msg, self.error_type)
+
+ errors = {}
+ errors["norm_avg"] = torch.norm(dists, dim=-1, p=2).mean(-1)
+ errors["xyz_avg"] = dists.abs().mean(dim=-2)
+ errors["TCO_xyz"] = (TXO_pred[:, :3, -1] - TXO_gt[:, :3, -1]).abs()
+ errors["TCO_norm"] = torch.norm(
+ TXO_pred[:, :3, -1] - TXO_gt[:, :3, -1],
+ dim=-1,
+ p=2,
+ )
return errors
def compute_errors_batch(self, TXO_pred, TXO_gt, labels):
@@ -96,184 +123,255 @@ def compute_errors_batch(self, TXO_pred, TXO_gt, labels):
ids = torch.arange(len(labels))
ds = TensorDataset(TXO_pred, TXO_gt, ids)
dl = DataLoader(ds, batch_size=self.errors_bsz)
- for (TXO_pred_, TXO_gt_, ids_) in dl:
+ for TXO_pred_, TXO_gt_, ids_ in dl:
labels_ = labels[ids_.numpy()]
errors.append(self.compute_errors(TXO_pred_, TXO_gt_, labels_))
if len(errors) == 0:
- errors.append(dict(
- norm_avg=torch.empty(0, dtype=torch.float),
- xyz_avg=torch.empty(0, 3, dtype=torch.float),
- TCO_xyz=torch.empty((0, 3), dtype=torch.float),
- TCO_norm=torch.empty(0, dtype=torch.float),
- ))
-
- errorsd = dict()
+ errors.append(
+ {
+ "norm_avg": torch.empty(0, dtype=torch.float),
+ "xyz_avg": torch.empty(0, 3, dtype=torch.float),
+ "TCO_xyz": torch.empty((0, 3), dtype=torch.float),
+ "TCO_norm": torch.empty(0, dtype=torch.float),
+ },
+ )
+
+ errorsd = {}
for k in errors[0].keys():
errorsd[k] = torch.cat([errors_n[k] for errors_n in errors], dim=0)
return errorsd
def add(self, pred_data, gt_data):
- group_keys = ['scene_id', 'view_id', 'label']
+ group_keys = ["scene_id", "view_id", "label"]
pred_data = pred_data.float()
gt_data = gt_data.float()
# Only keep predictions relevant to gt scene and images.
- gt_infos = gt_data.infos.loc[:, ['scene_id', 'view_id']].drop_duplicates().reset_index(drop=True)
+ gt_infos = (
+ gt_data.infos.loc[:, ["scene_id", "view_id"]]
+ .drop_duplicates()
+ .reset_index(drop=True)
+ )
targets = self.targets
if targets is not None:
targets = gt_infos.merge(targets)
- pred_data.infos['batch_pred_id'] = np.arange(len(pred_data))
- keep_ids = gt_infos.merge(pred_data.infos)['batch_pred_id']
+ pred_data.infos["batch_pred_id"] = np.arange(len(pred_data))
+ keep_ids = gt_infos.merge(pred_data.infos)["batch_pred_id"]
pred_data = pred_data[keep_ids]
# Add inst id to the dataframes
- pred_data.infos = add_inst_num(pred_data.infos, key='pred_inst_id', group_keys=group_keys)
- gt_data.infos = add_inst_num(gt_data.infos, key='gt_inst_id', group_keys=group_keys)
+ pred_data.infos = add_inst_num(
+ pred_data.infos,
+ key="pred_inst_id",
+ group_keys=group_keys,
+ )
+ gt_data.infos = add_inst_num(
+ gt_data.infos,
+ key="gt_inst_id",
+ group_keys=group_keys,
+ )
# Filter predictions according to BOP evaluation.
if not self.consider_all_predictions:
- ids_top_n_pred = get_top_n_ids(pred_data.infos,
- group_keys=group_keys, top_key='score',
- targets=targets, n_top=self.n_top)
+ ids_top_n_pred = get_top_n_ids(
+ pred_data.infos,
+ group_keys=group_keys,
+ top_key="score",
+ targets=targets,
+ n_top=self.n_top,
+ )
pred_data_filtered = pred_data.clone()[ids_top_n_pred]
else:
pred_data_filtered = pred_data.clone()
# Compute valid targets according to BOP evaluation.
- gt_data.infos = add_valid_gt(gt_data.infos,
- group_keys=group_keys,
- targets=targets,
- visib_gt_min=self.visib_gt_min)
+ gt_data.infos = add_valid_gt(
+ gt_data.infos,
+ group_keys=group_keys,
+ targets=targets,
+ visib_gt_min=self.visib_gt_min,
+ )
# Compute tentative candidates
- cand_infos = get_candidate_matches(pred_data_filtered.infos, gt_data.infos,
- group_keys=group_keys,
- only_valids=True)
+ cand_infos = get_candidate_matches(
+ pred_data_filtered.infos,
+ gt_data.infos,
+ group_keys=group_keys,
+ only_valids=True,
+ )
# Filter out tentative matches that are too far.
if self.spheres_overlap_check:
- diameters = [self.mesh_db.infos[k]['diameter_m'] for k in cand_infos['label']]
- dists = pred_data_filtered[cand_infos['pred_id'].values.tolist()].poses[:, :3, -1] - \
- gt_data[cand_infos['gt_id'].values.tolist()].poses[:, :3, -1]
- spheres_overlap = torch.norm(dists, dim=-1) < torch.as_tensor(diameters).to(dists.dtype).to(dists.device)
+ diameters = [
+ self.mesh_db.infos[k]["diameter_m"] for k in cand_infos["label"]
+ ]
+ dists = (
+ pred_data_filtered[cand_infos["pred_id"].values.tolist()].poses[
+ :,
+ :3,
+ -1,
+ ]
+ - gt_data[cand_infos["gt_id"].values.tolist()].poses[:, :3, -1]
+ )
+ spheres_overlap = torch.norm(dists, dim=-1) < torch.as_tensor(diameters).to(
+ dists.dtype,
+ ).to(dists.device)
keep_ids = np.where(spheres_overlap.cpu().numpy())[0]
cand_infos = cand_infos.iloc[keep_ids].reset_index(drop=True)
- cand_infos['cand_id'] = np.arange(len(cand_infos))
+ cand_infos["cand_id"] = np.arange(len(cand_infos))
- pred_ids = cand_infos['pred_id'].values.tolist()
- gt_ids = cand_infos['gt_id'].values.tolist()
+ pred_ids = cand_infos["pred_id"].values.tolist()
+ gt_ids = cand_infos["gt_id"].values.tolist()
cand_TXO_gt = gt_data.poses[gt_ids]
cand_TXO_pred = pred_data_filtered.poses[pred_ids]
# Compute errors for tentative matches
- errors = self.compute_errors_batch(cand_TXO_pred, cand_TXO_gt,
- cand_infos['label'].values)
+ errors = self.compute_errors_batch(
+ cand_TXO_pred,
+ cand_TXO_gt,
+ cand_infos["label"].values,
+ )
# Matches can only be objects within thresholds (following BOP).
- cand_infos['error'] = errors['norm_avg'].cpu().numpy()
- cand_infos['obj_diameter'] = [self.mesh_db.infos[k]['diameter_m'] for k in cand_infos['label']]
- keep = cand_infos['error'] <= self.match_threshold * cand_infos['obj_diameter']
+ cand_infos["error"] = errors["norm_avg"].cpu().numpy()
+ cand_infos["obj_diameter"] = [
+ self.mesh_db.infos[k]["diameter_m"] for k in cand_infos["label"]
+ ]
+ keep = cand_infos["error"] <= self.match_threshold * cand_infos["obj_diameter"]
cand_infos = cand_infos[keep].reset_index(drop=True)
# Match predictions to ground truth poses
matches = match_poses(cand_infos, group_keys=group_keys)
# Save all informations in xarray datasets
- gt_keys = group_keys + ['gt_inst_id', 'valid'] + (['visib_fract'] if 'visib_fract' in gt_infos else [])
+ gt_keys = [*group_keys, "gt_inst_id", "valid"] + (
+ ["visib_fract"] if "visib_fract" in gt_infos else []
+ )
gt = gt_data.infos.loc[:, gt_keys]
- preds = pred_data.infos.loc[:, group_keys + ['pred_inst_id', 'score']]
- matches = matches.loc[:, group_keys + ['pred_inst_id', 'gt_inst_id', 'cand_id']]
+ preds = pred_data.infos.loc[:, [*group_keys, "pred_inst_id", "score"]]
+ matches = matches.loc[:, [*group_keys, "pred_inst_id", "gt_inst_id", "cand_id"]]
- gt = xr.Dataset(gt).rename({'dim_0': 'gt_id'})
- matches = xr.Dataset(matches).rename({'dim_0': 'match_id'})
- preds = xr.Dataset(preds).rename({'dim_0': 'pred_id'})
+ gt = xr.Dataset(gt).rename({"dim_0": "gt_id"})
+ matches = xr.Dataset(matches).rename({"dim_0": "match_id"})
+ preds = xr.Dataset(preds).rename({"dim_0": "pred_id"})
- errors_norm = errors['norm_avg'].cpu().numpy()[matches['cand_id'].values]
- errors_xyz = errors['xyz_avg'].cpu().numpy()[matches['cand_id'].values]
- errors_TCO_xyz = errors['TCO_xyz'].cpu().numpy()[matches['cand_id'].values]
- errors_TCO_norm = errors['TCO_norm'].cpu().numpy()[matches['cand_id'].values]
+ errors_norm = errors["norm_avg"].cpu().numpy()[matches["cand_id"].values]
+ errors_xyz = errors["xyz_avg"].cpu().numpy()[matches["cand_id"].values]
+ errors_TCO_xyz = errors["TCO_xyz"].cpu().numpy()[matches["cand_id"].values]
+ errors_TCO_norm = errors["TCO_norm"].cpu().numpy()[matches["cand_id"].values]
- matches['obj_diameter'] = 'match_id', [self.mesh_db.infos[k.item()]['diameter_m'] for k in matches['label']]
- matches['norm'] = 'match_id', errors_norm
- matches['0.1d'] = 'match_id', errors_norm < 0.1 * matches['obj_diameter']
- matches['xyz'] = ('match_id', 'dim3'), errors_xyz
- matches['TCO_xyz'] = ('match_id', 'dim3'), errors_TCO_xyz
- matches['TCO_norm'] = 'match_id', errors_TCO_norm
+ matches["obj_diameter"] = "match_id", [
+ self.mesh_db.infos[k.item()]["diameter_m"] for k in matches["label"]
+ ]
+ matches["norm"] = "match_id", errors_norm
+ matches["0.1d"] = "match_id", errors_norm < 0.1 * matches["obj_diameter"]
+ matches["xyz"] = ("match_id", "dim3"), errors_xyz
+ matches["TCO_xyz"] = ("match_id", "dim3"), errors_TCO_xyz
+ matches["TCO_norm"] = "match_id", errors_TCO_norm
- preds['TXO_pred'] = ('pred_id', 'Trow', 'Tcol'), pred_data.poses.cpu().numpy()
+ preds["TXO_pred"] = ("pred_id", "Trow", "Tcol"), pred_data.poses.cpu().numpy()
fill_values = {
- 'norm': np.inf,
- '0.1d': False,
- 'xyz': np.inf,
- 'TCO_xyz': np.inf,
- 'TCO_norm': np.inf,
- 'obj_diameter': np.nan,
- 'TXO_pred': np.nan,
- 'score': np.nan,
+ "norm": np.inf,
+ "0.1d": False,
+ "xyz": np.inf,
+ "TCO_xyz": np.inf,
+ "TCO_norm": np.inf,
+ "obj_diameter": np.nan,
+ "TXO_pred": np.nan,
+ "score": np.nan,
}
- matches = xr_merge(matches, preds, on=group_keys + ['pred_inst_id'],
- dim1='match_id', dim2='pred_id', fill_value=fill_values)
- gt = xr_merge(gt, matches, on=group_keys + ['gt_inst_id'],
- dim1='gt_id', dim2='match_id', fill_value=fill_values)
-
- preds_match_merge = xr_merge(preds, matches, on=group_keys+['pred_inst_id'],
- dim1='pred_id', dim2='match_id', fill_value=fill_values)
- preds['0.1d'] = 'pred_id', preds_match_merge['0.1d']
-
- self.datas['gt_df'].append(gt)
- self.datas['pred_df'].append(preds)
- self.datas['matches_df'].append(matches)
+ matches = xr_merge(
+ matches,
+ preds,
+ on=[*group_keys, "pred_inst_id"],
+ dim1="match_id",
+ dim2="pred_id",
+ fill_value=fill_values,
+ )
+ gt = xr_merge(
+ gt,
+ matches,
+ on=[*group_keys, "gt_inst_id"],
+ dim1="gt_id",
+ dim2="match_id",
+ fill_value=fill_values,
+ )
+
+ preds_match_merge = xr_merge(
+ preds,
+ matches,
+ on=[*group_keys, "pred_inst_id"],
+ dim1="pred_id",
+ dim2="match_id",
+ fill_value=fill_values,
+ )
+ preds["0.1d"] = "pred_id", preds_match_merge["0.1d"]
+
+ self.datas["gt_df"].append(gt)
+ self.datas["pred_df"].append(preds)
+ self.datas["matches_df"].append(matches)
def summary(self):
- gt_df = xr.concat(self.datas['gt_df'], dim='gt_id')
- matches_df = xr.concat(self.datas['matches_df'], dim='match_id')
- pred_df = xr.concat(self.datas['pred_df'], dim='pred_id')
+ gt_df = xr.concat(self.datas["gt_df"], dim="gt_id")
+ matches_df = xr.concat(self.datas["matches_df"], dim="match_id")
+ pred_df = xr.concat(self.datas["pred_df"], dim="pred_id")
# ADD-S AUC
- valid_df = gt_df.sel(gt_id=gt_df['valid'])
+ valid_df = gt_df.sel(gt_id=gt_df["valid"])
AUC = OrderedDict()
- for (label, ids) in valid_df.groupby('label').groups.items():
- errors = valid_df['norm'].values[ids]
+ for label, ids in valid_df.groupby("label").groups.items():
+ errors = valid_df["norm"].values[ids]
assert np.all(~np.isnan(errors))
AUC[label] = compute_auc_posecnn(errors)
- gt_df['AUC/objects'] = xr.DataArray(
- list(AUC.values()), [('objects', list(AUC.keys()))], dims=['objects'])
- gt_df['AUC/objects/mean'] = gt_df['AUC/objects'].mean('objects')
- gt_df['AUC'] = compute_auc_posecnn(valid_df['norm'])
+ gt_df["AUC/objects"] = xr.DataArray(
+ list(AUC.values()),
+ [("objects", list(AUC.keys()))],
+ dims=["objects"],
+ )
+ gt_df["AUC/objects/mean"] = gt_df["AUC/objects"].mean("objects")
+ gt_df["AUC"] = compute_auc_posecnn(valid_df["norm"])
# AP/mAP@0.1d
- valid_k = '0.1d'
- n_gts = dict()
+ valid_k = "0.1d"
+ n_gts = {}
if self.n_top > 0:
- group_keys = ['scene_id', 'view_id', 'label']
- subdf = gt_df[[*group_keys, 'valid']].to_dataframe().groupby(group_keys).sum().reset_index()
- subdf['gt_count'] = np.minimum(self.n_top, subdf['valid'])
- for label, group in subdf.groupby('label'):
- n_gts[label] = group['gt_count'].sum()
+ group_keys = ["scene_id", "view_id", "label"]
+ subdf = (
+ gt_df[[*group_keys, "valid"]]
+ .to_dataframe()
+ .groupby(group_keys)
+ .sum()
+ .reset_index()
+ )
+ subdf["gt_count"] = np.minimum(self.n_top, subdf["valid"])
+ for label, group in subdf.groupby("label"):
+ n_gts[label] = group["gt_count"].sum()
else:
- subdf = gt_df[['label', 'valid']].groupby('label').sum()
- for label in subdf['label'].values:
- n_gts[label] = subdf.sel(label=label)['valid'].item()
+ subdf = gt_df[["label", "valid"]].groupby("label").sum()
+ for label in subdf["label"].values:
+ n_gts[label] = subdf.sel(label=label)["valid"].item()
- ap_dfs = dict()
+ ap_dfs = {}
def compute_ap(label_df, label_n_gt):
- label_df = label_df.sort_values('score', ascending=False).reset_index(drop=True)
- label_df['n_tp'] = np.cumsum(label_df[valid_k].values.astype(np.float))
- label_df['prec'] = label_df['n_tp'] / (np.arange(len(label_df)) + 1)
- label_df['recall'] = label_df['n_tp'] / label_n_gt
+ label_df = label_df.sort_values("score", ascending=False).reset_index(
+ drop=True,
+ )
+ label_df["n_tp"] = np.cumsum(label_df[valid_k].values.astype(np.float))
+ label_df["prec"] = label_df["n_tp"] / (np.arange(len(label_df)) + 1)
+ label_df["recall"] = label_df["n_tp"] / label_n_gt
y_true = label_df[valid_k]
- y_score = label_df['score']
+ y_score = label_df["score"]
ap = average_precision_score(y_true, y_score) * y_true.sum() / label_n_gt
- label_df['AP'] = ap
- label_df['n_gt'] = label_n_gt
+ label_df["AP"] = ap
+ label_df["n_gt"] = label_n_gt
return ap, label_df
- df = pred_df[['label', valid_k, 'score']].to_dataframe().set_index(['label'])
+ df = pred_df[["label", valid_k, "score"]].to_dataframe().set_index(["label"])
for label, label_n_gt in n_gts.items():
if df.index.contains(label):
label_df = df.loc[[label]]
@@ -282,41 +380,48 @@ def compute_ap(label_df, label_n_gt):
ap_dfs[label] = label_df
if len(ap_dfs) > 0:
- mAP = np.mean([np.unique(ap_df['AP']).item() for ap_df in ap_dfs.values()])
- AP, ap_dfs['all'] = compute_ap(df.reset_index(), sum(list(n_gts.values())))
+ mAP = np.mean([np.unique(ap_df["AP"]).item() for ap_df in ap_dfs.values()])
+ AP, ap_dfs["all"] = compute_ap(df.reset_index(), sum(list(n_gts.values())))
else:
- AP, mAP = 0., 0.
+ AP, mAP = 0.0, 0.0
n_gt_valid = int(sum(list(n_gts.values())))
summary = {
- 'n_gt': len(gt_df['gt_id']),
- 'n_gt_valid': n_gt_valid,
- 'n_pred': len(pred_df['pred_id']),
- 'n_matched': len(matches_df['match_id']),
- 'matched_gt_ratio': len(matches_df['match_id']) / n_gt_valid,
- 'pred_matched_ratio': len(pred_df['pred_id']) / max(len(matches_df['match_id']), 1),
- '0.1d': valid_df['0.1d'].sum('gt_id').item() / n_gt_valid,
+ "n_gt": len(gt_df["gt_id"]),
+ "n_gt_valid": n_gt_valid,
+ "n_pred": len(pred_df["pred_id"]),
+ "n_matched": len(matches_df["match_id"]),
+ "matched_gt_ratio": len(matches_df["match_id"]) / n_gt_valid,
+ "pred_matched_ratio": len(pred_df["pred_id"])
+ / max(len(matches_df["match_id"]), 1),
+ "0.1d": valid_df["0.1d"].sum("gt_id").item() / n_gt_valid,
}
if self.report_error_stats:
- summary.update({
- 'norm': matches_df['norm'].mean('match_id').item(),
- 'xyz': matches_df['xyz'].mean('match_id').values.tolist(),
- 'TCO_xyz': matches_df['TCO_xyz'].mean('match_id').values.tolist(),
- 'TCO_norm': matches_df['TCO_norm'].mean('match_id').values.tolist(),
- })
+ summary.update(
+ {
+ "norm": matches_df["norm"].mean("match_id").item(),
+ "xyz": matches_df["xyz"].mean("match_id").values.tolist(),
+ "TCO_xyz": matches_df["TCO_xyz"].mean("match_id").values.tolist(),
+ "TCO_norm": matches_df["TCO_norm"].mean("match_id").values.tolist(),
+ },
+ )
if self.report_AP:
- summary.update({
- 'AP': AP,
- 'mAP': mAP,
- })
+ summary.update(
+ {
+ "AP": AP,
+ "mAP": mAP,
+ },
+ )
if self.report_error_AUC:
- summary.update({
- 'AUC/objects/mean': gt_df['AUC/objects/mean'].item(),
- 'AUC': gt_df['AUC'].item(),
- })
-
- dfs = dict(gt=gt_df, matches=matches_df, preds=pred_df, ap=ap_dfs)
+ summary.update(
+ {
+ "AUC/objects/mean": gt_df["AUC/objects/mean"].item(),
+ "AUC": gt_df["AUC"].item(),
+ },
+ )
+
+ dfs = {"gt": gt_df, "matches": matches_df, "preds": pred_df, "ap": ap_dfs}
return summary, dfs
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/utils.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/utils.py
index cba83283..18fc197e 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/utils.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/meters/utils.py
@@ -1,46 +1,51 @@
import numpy as np
import pandas as pd
-from collections import OrderedDict
-def one_to_one_matching(pred_infos, gt_infos,
- keys=('scene_id', 'view_id'),
- allow_pred_missing=False):
+def one_to_one_matching(
+ pred_infos,
+ gt_infos,
+ keys=("scene_id", "view_id"),
+ allow_pred_missing=False,
+):
keys = list(keys)
- pred_infos['pred_id'] = np.arange(len(pred_infos))
- gt_infos['gt_id'] = np.arange(len(gt_infos))
+ pred_infos["pred_id"] = np.arange(len(pred_infos))
+ gt_infos["gt_id"] = np.arange(len(gt_infos))
matches = pred_infos.merge(gt_infos, on=keys)
matches_gb = matches.groupby(keys).groups
- assert all([len(v) == 1 for v in matches_gb.values()])
+ assert all(len(v) == 1 for v in matches_gb.values())
if not allow_pred_missing:
assert len(matches) == len(gt_infos)
return matches
-def add_inst_num(infos,
- group_keys=['scene_id', 'view_id', 'label'],
- key='pred_inst_num'):
-
+def add_inst_num(
+ infos,
+ group_keys=["scene_id", "view_id", "label"],
+ key="pred_inst_num",
+):
inst_num = np.empty(len(infos), dtype=np.int)
- for group_name, group_ids in infos.groupby(group_keys).groups.items():
+ for _group_name, group_ids in infos.groupby(group_keys).groups.items():
inst_num[group_ids.values] = np.arange(len(group_ids))
infos[key] = inst_num
return infos
-def get_top_n_ids(infos,
- group_keys=('scene_id', 'view_id', 'label'),
- top_key='score',
- n_top=-1, targets=None):
-
- infos['id_before_top_n'] = np.arange(len(infos))
+def get_top_n_ids(
+ infos,
+ group_keys=("scene_id", "view_id", "label"),
+ top_key="score",
+ n_top=-1,
+ targets=None,
+):
+ infos["id_before_top_n"] = np.arange(len(infos))
group_keys = list(group_keys)
if targets is not None:
- targets_inst_count = dict()
+ targets_inst_count = {}
for k, ids in targets.groupby(group_keys).groups.items():
- targets_inst_count[k] = targets.loc[ids[0], 'inst_count']
+ targets_inst_count[k] = targets.loc[ids[0], "inst_count"]
def get_top_n(group_k):
if n_top > 0:
@@ -56,69 +61,83 @@ def get_top_n(group_k):
top_n = get_top_n(k)
if top_n is None:
top_n = len(group)
- keep_ids.append(group['id_before_top_n'].values[:top_n])
+ keep_ids.append(group["id_before_top_n"].values[:top_n])
if len(keep_ids) > 0:
keep_ids = np.concatenate(keep_ids)
else:
keep_ids = []
- del infos['id_before_top_n']
+ del infos["id_before_top_n"]
return keep_ids
-def add_valid_gt(gt_infos,
- group_keys=('scene_id', 'view_id', 'label'),
- visib_gt_min=-1, targets=None):
-
+def add_valid_gt(
+ gt_infos,
+ group_keys=("scene_id", "view_id", "label"),
+ visib_gt_min=-1,
+ targets=None,
+):
if visib_gt_min > 0:
- gt_infos['valid'] = gt_infos['visib_fract'] >= visib_gt_min
+ gt_infos["valid"] = gt_infos["visib_fract"] >= visib_gt_min
if targets is not None:
- gt_infos['valid'] = np.logical_and(gt_infos['valid'], np.isin(gt_infos['label'], targets['label']))
+ gt_infos["valid"] = np.logical_and(
+ gt_infos["valid"],
+ np.isin(gt_infos["label"], targets["label"]),
+ )
elif targets is not None:
- valid_ids = get_top_n_ids(gt_infos, group_keys=group_keys,
- top_key='visib_fract', targets=targets)
- gt_infos['valid'] = False
- gt_infos.loc[valid_ids, 'valid'] = True
+ valid_ids = get_top_n_ids(
+ gt_infos,
+ group_keys=group_keys,
+ top_key="visib_fract",
+ targets=targets,
+ )
+ gt_infos["valid"] = False
+ gt_infos.loc[valid_ids, "valid"] = True
else:
- gt_infos['valid'] = True
+ gt_infos["valid"] = True
return gt_infos
-def get_candidate_matches(pred_infos, gt_infos,
- group_keys=['scene_id', 'view_id', 'label'],
- only_valids=True):
- pred_infos['pred_id'] = np.arange(len(pred_infos))
- gt_infos['gt_id'] = np.arange(len(gt_infos))
+def get_candidate_matches(
+ pred_infos,
+ gt_infos,
+ group_keys=["scene_id", "view_id", "label"],
+ only_valids=True,
+):
+ pred_infos["pred_id"] = np.arange(len(pred_infos))
+ gt_infos["gt_id"] = np.arange(len(gt_infos))
group_keys = list(group_keys)
cand_infos = pred_infos.merge(gt_infos, on=group_keys)
if only_valids:
- cand_infos = cand_infos[cand_infos['valid']].reset_index(drop=True)
- cand_infos['cand_id'] = np.arange(len(cand_infos))
+ cand_infos = cand_infos[cand_infos["valid"]].reset_index(drop=True)
+ cand_infos["cand_id"] = np.arange(len(cand_infos))
return cand_infos
-def match_poses(cand_infos, group_keys=['scene_id', 'view_id', 'label']):
- assert 'error' in cand_infos
+def match_poses(cand_infos, group_keys=["scene_id", "view_id", "label"]):
+ assert "error" in cand_infos
matches = []
def match_label_preds(group):
gt_ids_matched = set()
group = group.reset_index(drop=True)
- gb_pred = group.groupby('pred_id', sort=False)
- ids_sorted = gb_pred.first().sort_values('score', ascending=False)
+ gb_pred = group.groupby("pred_id", sort=False)
+ ids_sorted = gb_pred.first().sort_values("score", ascending=False)
gb_pred_groups = gb_pred.groups
for idx, _ in ids_sorted.iterrows():
pred_group = group.iloc[gb_pred_groups[idx]]
best_error = np.inf
best_match = None
for _, tentative_match in pred_group.iterrows():
- if tentative_match['error'] < best_error and \
- tentative_match['gt_id'] not in gt_ids_matched:
+ if (
+ tentative_match["error"] < best_error
+ and tentative_match["gt_id"] not in gt_ids_matched
+ ):
best_match = tentative_match
- best_error = tentative_match['error']
+ best_error = tentative_match["error"]
if best_match is not None:
- gt_ids_matched.add(best_match['gt_id'])
+ gt_ids_matched.add(best_match["gt_id"])
matches.append(best_match)
if len(cand_infos) > 0:
@@ -145,8 +164,8 @@ def compute_auc_posecnn(errors):
mrec = np.concatenate(([0], rec, [0.1]))
mpre = np.concatenate(([0], prec, [prec[-1]]))
for i in np.arange(1, len(mpre)):
- mpre[i] = max(mpre[i], mpre[i-1])
+ mpre[i] = max(mpre[i], mpre[i - 1])
i = np.arange(1, len(mpre))
ids = np.where(mrec[1:] != mrec[:-1])[0] + 1
- ap = ((mrec[ids] - mrec[ids-1]) * mpre[ids]).sum() * 10
+ ap = ((mrec[ids] - mrec[ids - 1]) * mpre[ids]).sum() * 10
return ap
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/bop_predictions.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/bop_predictions.py
index 6cebf9a0..475f4cc0 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/bop_predictions.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/bop_predictions.py
@@ -1,36 +1,46 @@
-import pandas as pd
import time
+from collections import defaultdict
+
import numpy as np
-from tqdm import tqdm
+import pandas as pd
import torch
-from collections import defaultdict
+from torch.utils.data import DataLoader
+from tqdm import tqdm
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
+from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import (
+ DistributedSceneSampler,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_tmp_dir,
+ get_world_size,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
-from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import DistributedSceneSampler
-import cosypose.utils.tensor_collection as tc
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_world_size, get_rank, get_tmp_dir
-
-from torch.utils.data import DataLoader
logger = get_logger(__name__)
class BopPredictionRunner:
def __init__(self, scene_ds, batch_size=1, cache_data=False, n_workers=4):
-
self.rank = get_rank()
self.world_size = get_world_size()
self.tmp_dir = get_tmp_dir()
assert batch_size == 1
- sampler = DistributedSceneSampler(scene_ds,
- num_replicas=self.world_size,
- rank=self.rank)
+ sampler = DistributedSceneSampler(
+ scene_ds,
+ num_replicas=self.world_size,
+ rank=self.rank,
+ )
self.sampler = sampler
- dataloader = DataLoader(scene_ds, batch_size=batch_size,
- num_workers=n_workers,
- sampler=sampler,
- collate_fn=self.collate_fn)
+ dataloader = DataLoader(
+ scene_ds,
+ batch_size=batch_size,
+ num_workers=n_workers,
+ sampler=sampler,
+ collate_fn=self.collate_fn,
+ )
if cache_data:
self.dataloader = list(tqdm(dataloader))
@@ -46,50 +56,53 @@ def collate_fn(self, batch):
for n, data in enumerate(batch):
assert n == 0
images, masks, obss = data
- for c, obs in enumerate(obss):
+ for _c, obs in enumerate(obss):
batch_im_id += 1
- frame_info = obs['frame_info']
- im_info = {k: frame_info[k] for k in ('scene_id', 'view_id', 'group_id')}
+ frame_info = obs["frame_info"]
+ im_info = {
+ k: frame_info[k] for k in ("scene_id", "view_id", "group_id")
+ }
im_info.update(batch_im_id=batch_im_id)
im_infos.append(im_info)
cam_info = im_info.copy()
- K.append(obs['camera']['K'])
+ K.append(obs["camera"]["K"])
cam_infos.append(cam_info)
if self.load_depth:
- depth.append(torch.tensor(obs['camera']['depth']))
+ depth.append(torch.tensor(obs["camera"]["depth"]))
cameras = tc.PandasTensorCollection(
infos=pd.DataFrame(cam_infos),
K=torch.as_tensor(np.stack(K)),
)
- data = dict(
- cameras=cameras,
- images=images,
- im_infos=im_infos,
- )
+ data = {
+ "cameras": cameras,
+ "images": images,
+ "im_infos": im_infos,
+ }
if self.load_depth:
- data['depth'] = torch.stack(depth)
+ data["depth"] = torch.stack(depth)
return data
- def get_predictions(self,
- detector,
- pose_predictor,
- icp_refiner=None,
- mv_predictor=None,
- n_coarse_iterations=1,
- n_refiner_iterations=1,
- detection_th=0.0):
-
+ def get_predictions(
+ self,
+ detector,
+ pose_predictor,
+ icp_refiner=None,
+ mv_predictor=None,
+ n_coarse_iterations=1,
+ n_refiner_iterations=1,
+ detection_th=0.0,
+ ):
predictions = defaultdict(list)
use_icp = icp_refiner is not None
for n, data in enumerate(tqdm(self.dataloader)):
- images = data['images'].cuda().float().permute(0, 3, 1, 2) / 255
- cameras = data['cameras'].cuda().float()
- im_infos = data['im_infos']
+ images = data["images"].cuda().float().permute(0, 3, 1, 2) / 255
+ cameras = data["cameras"].cuda().float()
+ im_infos = data["im_infos"]
depth = None
if self.load_depth:
- depth = data['depth'].cuda().float()
+ depth = data["depth"].cuda().float()
logger.debug(f"{'-'*80}")
logger.debug(f"Predictions on {data['im_infos']}")
@@ -97,35 +110,51 @@ def get_preds():
torch.cuda.synchronize()
start = time.time()
this_batch_detections = detector.get_detections(
- images=images, one_instance_per_class=False, detection_th=detection_th,
- output_masks=use_icp, mask_th=0.9
+ images=images,
+ one_instance_per_class=False,
+ detection_th=detection_th,
+ output_masks=use_icp,
+ mask_th=0.9,
)
- for key in ('scene_id', 'view_id', 'group_id'):
- this_batch_detections.infos[key] = this_batch_detections.infos['batch_im_id'].apply(lambda idx: im_infos[idx][key])
+ for key in ("scene_id", "view_id", "group_id"):
+ this_batch_detections.infos[key] = this_batch_detections.infos[
+ "batch_im_id"
+ ].apply(lambda idx: im_infos[idx][key])
- all_preds = dict()
+ all_preds = {}
if len(this_batch_detections) > 0:
final_preds, all_preds = pose_predictor.get_predictions(
- images, cameras.K, detections=this_batch_detections,
+ images,
+ cameras.K,
+ detections=this_batch_detections,
n_coarse_iterations=n_coarse_iterations,
n_refiner_iterations=n_refiner_iterations,
)
if len(images) > 1:
mv_preds = mv_predictor.predict_scene_state(
- final_preds, cameras,
+ final_preds,
+ cameras,
)
- all_preds['multiview'] = mv_preds['ba_output+all_cand']
- final_preds = all_preds['multiview']
+ all_preds["multiview"] = mv_preds["ba_output+all_cand"]
+ final_preds = all_preds["multiview"]
if use_icp:
- all_preds['icp'] = icp_refiner.refine_poses(final_preds, this_batch_detections.masks, depth, cameras)
+ all_preds["icp"] = icp_refiner.refine_poses(
+ final_preds,
+ this_batch_detections.masks,
+ depth,
+ cameras,
+ )
torch.cuda.synchronize()
duration = time.time() - start
n_dets = len(this_batch_detections)
- logger.debug(f'Full predictions: {n_dets} detections + pose estimation in {duration:.3f} s')
+ logger.debug(
+ f"Full predictions: {n_dets} detections + pose estimation "
+ f"in {duration:.3f} s",
+ )
logger.debug(f"{'-'*80}")
return this_batch_detections, all_preds, duration
@@ -136,14 +165,14 @@ def get_preds():
duration = duration / len(images) # Divide by number of views in multi-view
if use_icp:
- this_batch_detections.delete_tensor('masks') # Saves memory when saving
+ this_batch_detections.delete_tensor("masks") # Saves memory when saving
# NOTE: time isn't correct for n iterations < max number of iterations
for k, v in all_preds.items():
- v.infos = v.infos.loc[:, ['scene_id', 'view_id', 'label', 'score']]
- v.infos['time'] = duration
+ v.infos = v.infos.loc[:, ["scene_id", "view_id", "label", "score"]]
+ v.infos["time"] = duration
predictions[k].append(v.cpu())
- predictions['detections'].append(this_batch_detections.cpu())
+ predictions["detections"].append(this_batch_detections.cpu())
predictions = dict(predictions)
for k, v in predictions.items():
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/detections.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/detections.py
index f4222010..e35ef461 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/detections.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/detections.py
@@ -1,14 +1,20 @@
-import pandas as pd
-import numpy as np
-from tqdm import tqdm
-import torch
from collections import defaultdict
-from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import DistributedSceneSampler
-import cosypose.utils.tensor_collection as tc
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_world_size, get_rank, get_tmp_dir
-
+import numpy as np
+import pandas as pd
+import torch
from torch.utils.data import DataLoader
+from tqdm import tqdm
+
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
+from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import (
+ DistributedSceneSampler,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_tmp_dir,
+ get_world_size,
+)
class DetectionRunner:
@@ -17,11 +23,19 @@ def __init__(self, scene_ds, batch_size=8, cache_data=False, n_workers=4):
self.world_size = get_world_size()
self.tmp_dir = get_tmp_dir()
- sampler = DistributedSceneSampler(scene_ds, num_replicas=self.world_size, rank=self.rank)
+ sampler = DistributedSceneSampler(
+ scene_ds,
+ num_replicas=self.world_size,
+ rank=self.rank,
+ )
self.sampler = sampler
- dataloader = DataLoader(scene_ds, batch_size=batch_size,
- num_workers=n_workers,
- sampler=sampler, collate_fn=self.collate_fn)
+ dataloader = DataLoader(
+ scene_ds,
+ batch_size=batch_size,
+ num_workers=n_workers,
+ sampler=sampler,
+ collate_fn=self.collate_fn,
+ )
if cache_data:
self.dataloader = list(tqdm(dataloader))
@@ -33,58 +47,57 @@ def collate_fn(self, batch):
det_infos, bboxes = [], []
images = []
im_infos = []
- for n, data in enumerate(batch):
+ for _n, data in enumerate(batch):
rgb, masks, obs = data
batch_im_id += 1
- frame_info = obs['frame_info']
- im_info = {k: frame_info[k] for k in ('scene_id', 'view_id')}
+ frame_info = obs["frame_info"]
+ im_info = {k: frame_info[k] for k in ("scene_id", "view_id")}
im_info.update(batch_im_id=batch_im_id)
im_infos.append(im_info)
images.append(rgb)
- for o, obj in enumerate(obs['objects']):
- obj_info = dict(
- label=obj['name'],
- score=1.0,
- )
+ for _o, obj in enumerate(obs["objects"]):
+ obj_info = {
+ "label": obj["name"],
+ "score": 1.0,
+ }
obj_info.update(im_info)
- bboxes.append(obj['bbox'])
+ bboxes.append(obj["bbox"])
det_infos.append(obj_info)
gt_detections = tc.PandasTensorCollection(
infos=pd.DataFrame(det_infos),
bboxes=torch.as_tensor(np.stack(bboxes)).float(),
)
- data = dict(
- images=torch.stack(images),
- gt_detections=gt_detections,
- im_infos=im_infos,
- )
+ data = {
+ "images": torch.stack(images),
+ "gt_detections": gt_detections,
+ "im_infos": im_infos,
+ }
return data
- def get_predictions(self,
- detector,
- gt_detections=False):
-
+ def get_predictions(self, detector, gt_detections=False):
predictions = defaultdict(list)
for data in tqdm(self.dataloader):
- images = data['images'].cuda().float().permute(0, 3, 1, 2) / 255
+ images = data["images"].cuda().float().permute(0, 3, 1, 2) / 255
if gt_detections:
- preds = data['gt_detections']
+ preds = data["gt_detections"]
else:
preds = detector.get_detections(
images=images,
one_instance_per_class=False,
)
- im_infos = data['im_infos']
- for k in ('scene_id', 'view_id'):
- preds.infos[k] = preds.infos['batch_im_id'].apply(lambda idx: im_infos[idx][k])
+ im_infos = data["im_infos"]
+ for k in ("scene_id", "view_id"):
+ preds.infos[k] = preds.infos["batch_im_id"].apply(
+ lambda idx: im_infos[idx][k],
+ )
- predictions['detections'].append(preds)
+ predictions["detections"].append(preds)
- for k, v in predictions.items():
+ for k, _v in predictions.items():
predictions[k] = tc.concatenate(predictions[k])
return predictions
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/multiview_predictions.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/multiview_predictions.py
index b0e8fe95..92ed174d 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/multiview_predictions.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/multiview_predictions.py
@@ -1,33 +1,45 @@
-import pandas as pd
+from collections import defaultdict
+
import numpy as np
-from tqdm import tqdm
+import pandas as pd
import torch
-from collections import defaultdict
+from torch.utils.data import DataLoader
+from tqdm import tqdm
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
+from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import (
+ DistributedSceneSampler,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_tmp_dir,
+ get_world_size,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
-from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import DistributedSceneSampler
-import cosypose.utils.tensor_collection as tc
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_world_size, get_rank, get_tmp_dir
-
-from torch.utils.data import DataLoader
logger = get_logger(__name__)
class MultiviewPredictionRunner:
def __init__(self, scene_ds, batch_size=1, cache_data=False, n_workers=4):
-
self.rank = get_rank()
self.world_size = get_world_size()
self.tmp_dir = get_tmp_dir()
- assert batch_size == 1, 'Multiple view groups not supported for now.'
- sampler = DistributedSceneSampler(scene_ds, num_replicas=self.world_size, rank=self.rank)
+ assert batch_size == 1, "Multiple view groups not supported for now."
+ sampler = DistributedSceneSampler(
+ scene_ds,
+ num_replicas=self.world_size,
+ rank=self.rank,
+ )
self.sampler = sampler
- dataloader = DataLoader(scene_ds, batch_size=batch_size,
- num_workers=n_workers,
- sampler=sampler,
- collate_fn=self.collate_fn)
+ dataloader = DataLoader(
+ scene_ds,
+ batch_size=batch_size,
+ num_workers=n_workers,
+ sampler=sampler,
+ collate_fn=self.collate_fn,
+ )
if cache_data:
self.dataloader = list(tqdm(dataloader))
@@ -42,23 +54,25 @@ def collate_fn(self, batch):
for n, data in enumerate(batch):
assert n == 0
images, masks, obss = data
- for c, obs in enumerate(obss):
+ for _c, obs in enumerate(obss):
batch_im_id += 1
- frame_info = obs['frame_info']
- im_info = {k: frame_info[k] for k in ('scene_id', 'view_id', 'group_id')}
+ frame_info = obs["frame_info"]
+ im_info = {
+ k: frame_info[k] for k in ("scene_id", "view_id", "group_id")
+ }
im_info.update(batch_im_id=batch_im_id)
cam_info = im_info.copy()
- K.append(obs['camera']['K'])
+ K.append(obs["camera"]["K"])
cam_infos.append(cam_info)
- for o, obj in enumerate(obs['objects']):
- obj_info = dict(
- label=obj['name'],
- score=1.0,
- )
+ for _o, obj in enumerate(obs["objects"]):
+ obj_info = {
+ "label": obj["name"],
+ "score": 1.0,
+ }
obj_info.update(im_info)
- bboxes.append(obj['bbox'])
+ bboxes.append(obj["bbox"])
det_infos.append(obj_info)
gt_detections = tc.PandasTensorCollection(
@@ -69,76 +83,85 @@ def collate_fn(self, batch):
infos=pd.DataFrame(cam_infos),
K=torch.as_tensor(np.stack(K)),
)
- data = dict(
- images=images,
- cameras=cameras,
- gt_detections=gt_detections,
- )
+ data = {
+ "images": images,
+ "cameras": cameras,
+ "gt_detections": gt_detections,
+ }
return data
- def get_predictions(self, pose_predictor, mv_predictor,
- detections=None,
- n_coarse_iterations=1, n_refiner_iterations=1,
- sv_score_th=0.0, skip_mv=True,
- use_detections_TCO=False):
-
+ def get_predictions(
+ self,
+ pose_predictor,
+ mv_predictor,
+ detections=None,
+ n_coarse_iterations=1,
+ n_refiner_iterations=1,
+ sv_score_th=0.0,
+ skip_mv=True,
+ use_detections_TCO=False,
+ ):
assert detections is not None
if detections is not None:
- mask = (detections.infos['score'] >= sv_score_th)
+ mask = detections.infos["score"] >= sv_score_th
detections = detections[np.where(mask)[0]]
- detections.infos['det_id'] = np.arange(len(detections))
- det_index = detections.infos.set_index(['scene_id', 'view_id']).sort_index()
+ detections.infos["det_id"] = np.arange(len(detections))
+ det_index = detections.infos.set_index(["scene_id", "view_id"]).sort_index()
predictions = defaultdict(list)
for data in tqdm(self.dataloader):
- images = data['images'].cuda().float().permute(0, 3, 1, 2) / 255
- cameras = data['cameras'].cuda().float()
- gt_detections = data['gt_detections'].cuda().float()
+ images = data["images"].cuda().float().permute(0, 3, 1, 2) / 255
+ cameras = data["cameras"].cuda().float()
+ gt_detections = data["gt_detections"].cuda().float()
- scene_id = np.unique(gt_detections.infos['scene_id'])
- view_ids = np.unique(gt_detections.infos['view_id'])
- group_id = np.unique(gt_detections.infos['group_id'])
+ scene_id = np.unique(gt_detections.infos["scene_id"])
+ view_ids = np.unique(gt_detections.infos["view_id"])
+ group_id = np.unique(gt_detections.infos["group_id"])
n_gt_dets = len(gt_detections)
logger.debug(f"{'-'*80}")
- logger.debug(f'Scene: {scene_id}')
- logger.debug(f'Views: {view_ids}')
- logger.debug(f'Group: {group_id}')
- logger.debug(f'Image has {n_gt_dets} gt detections. (not used)')
+ logger.debug(f"Scene: {scene_id}")
+ logger.debug(f"Views: {view_ids}")
+ logger.debug(f"Group: {group_id}")
+ logger.debug(f"Image has {n_gt_dets} gt detections. (not used)")
if detections is not None:
keep_ids, batch_im_ids = [], []
- for group_name, group in cameras.infos.groupby(['scene_id', 'view_id']):
+ for group_name, group in cameras.infos.groupby(["scene_id", "view_id"]):
if group_name in det_index.index:
other_group = det_index.loc[group_name]
- keep_ids_ = other_group['det_id']
- batch_im_id = np.unique(group['batch_im_id']).item()
+ keep_ids_ = other_group["det_id"]
+ batch_im_id = np.unique(group["batch_im_id"]).item()
batch_im_ids.append(np.ones(len(keep_ids_)) * batch_im_id)
keep_ids.append(keep_ids_)
if len(keep_ids) > 0:
keep_ids = np.concatenate(keep_ids)
batch_im_ids = np.concatenate(batch_im_ids)
detections_ = detections[keep_ids]
- detections_.infos['batch_im_id'] = np.array(batch_im_ids).astype(np.int)
+ detections_.infos["batch_im_id"] = np.array(batch_im_ids).astype(np.int)
else:
- raise ValueError('No detections')
+ msg = "No detections"
+ raise ValueError(msg)
detections_ = detections_.cuda().float()
- detections_.infos['group_id'] = group_id.item()
+ detections_.infos["group_id"] = group_id.item()
- sv_preds, mv_preds = dict(), dict()
+ sv_preds, mv_preds = {}, {}
if len(detections_) > 0:
data_TCO_init = detections_ if use_detections_TCO else None
detections__ = detections_ if not use_detections_TCO else None
candidates, sv_preds = pose_predictor.get_predictions(
- images, cameras.K, detections=detections__,
+ images,
+ cameras.K,
+ detections=detections__,
n_coarse_iterations=n_coarse_iterations,
data_TCO_init=data_TCO_init,
n_refiner_iterations=n_refiner_iterations,
)
- candidates.register_tensor('initial_bboxes', detections_.bboxes)
+ candidates.register_tensor("initial_bboxes", detections_.bboxes)
if not skip_mv:
mv_preds = mv_predictor.predict_scene_state(
- candidates, cameras,
+ candidates,
+ cameras,
)
logger.debug(f"{'-'*80}")
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py
index 7c950b98..e1178393 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -18,7 +17,7 @@
# Standard Library
import time
from collections import defaultdict
-from typing import Dict, Optional
+from typing import Optional
# Third Party
import numpy as np
@@ -55,13 +54,16 @@ def __init__(
batch_size: int = 1,
n_workers: int = 4,
) -> None:
-
self.inference_cfg = inference_cfg
self.rank = get_rank()
self.world_size = get_world_size()
self.tmp_dir = get_tmp_dir()
- sampler = DistributedSceneSampler(scene_ds, num_replicas=self.world_size, rank=self.rank)
+ sampler = DistributedSceneSampler(
+ scene_ds,
+ num_replicas=self.world_size,
+ rank=self.rank,
+ )
self.sampler = sampler
self.scene_ds = scene_ds
dataloader = DataLoader(
@@ -82,17 +84,17 @@ def run_inference_pipeline(
obs_tensor: ObservationTensor,
gt_detections: DetectionsType,
initial_estimates: Optional[PoseEstimatesType] = None,
- ) -> Dict[str, PoseEstimatesType]:
+ ) -> dict[str, PoseEstimatesType]:
"""Runs inference pipeline, extracts the results.
Returns: A dict with keys
- 'final': final preds
- - 'refiner/final': preds at final refiner iteration (before depth refinement)
+ - 'refiner/final': preds at final refiner iteration (before depth
+ refinement)
- 'depth_refinement': preds after depth refinement.
"""
-
if self.inference_cfg.detection_type == "gt":
detections = gt_detections
run_detector = False
@@ -100,13 +102,16 @@ def run_inference_pipeline(
detections = None
run_detector = True
else:
- raise ValueError(f"Unknown detection type {self.inference_cfg.detection_type}")
+ msg = f"Unknown detection type {self.inference_cfg.detection_type}"
+ raise ValueError(msg)
coarse_estimates = None
if self.inference_cfg.coarse_estimation_type == "external":
# TODO (ylabbe): This is hacky, clean this for modelnet eval.
coarse_estimates = initial_estimates
- coarse_estimates = happypose.toolbox.inference.utils.add_instance_id(coarse_estimates)
+ coarse_estimates = happypose.toolbox.inference.utils.add_instance_id(
+ coarse_estimates,
+ )
coarse_estimates.infos["instance_id"] = 0
run_detector = False
@@ -117,32 +122,34 @@ def run_inference_pipeline(
run_detector=run_detector,
data_TCO_init=None,
n_coarse_iterations=1,
- n_refiner_iterations=4
+ n_refiner_iterations=4,
)
- elapsed = time.time() - t
+ time.time() - t
# TODO (lmanuelli): Process this into a dict with keys like
# - 'refiner/iteration=1`
# - 'refiner/iteration=5`
# - `depth_refiner`
# Note: Since we support multi-hypotheses we need to potentially
- # go back and extract out the 'refiner/iteration=1`, `refiner/iteration=5` things for the ones that were actually the highest scoring at the end.
+ # go back and extract out the 'refiner/iteration=1`, `refiner/iteration=5`
+ # things for the ones that were actually the highest scoring at the end.
- all_preds = dict()
+ all_preds = {}
data_TCO_refiner = extra_data["refiner"]["preds"]
+ k_0 = f"refiner/iteration={self.inference_cfg.n_refiner_iterations}"
all_preds = {
"final": preds,
- f"refiner/iteration={self.inference_cfg.n_refiner_iterations}": data_TCO_refiner,
+ k_0: data_TCO_refiner,
"refiner/final": data_TCO_refiner,
"coarse": extra_data["coarse"]["preds"],
}
if self.inference_cfg.run_depth_refiner:
- all_preds[f"depth_refiner"] = extra_data["depth_refiner"]["preds"]
+ all_preds["depth_refiner"] = extra_data["depth_refiner"]["preds"]
# Remove any mask tensors
- for k, v in all_preds.items():
+ for _k, v in all_preds.items():
v.infos["scene_id"] = np.unique(gt_detections.infos["scene_id"]).item()
v.infos["view_id"] = np.unique(gt_detections.infos["view_id"]).item()
if "mask" in v.tensors:
@@ -150,8 +157,11 @@ def run_inference_pipeline(
return all_preds
- def get_predictions(self, pose_estimator: PoseEstimator) -> Dict[str, PoseEstimatesType]:
- """Runs predictions
+ def get_predictions(
+ self,
+ pose_estimator: PoseEstimator,
+ ) -> dict[str, PoseEstimatesType]:
+ """Runs predictions.
Returns: A dict with keys
- 'refiner/iteration=1`
@@ -162,10 +172,8 @@ def get_predictions(self, pose_estimator: PoseEstimator) -> Dict[str, PoseEstima
"""
-
predictions_list = defaultdict(list)
for n, data in enumerate(tqdm(self.dataloader)):
-
# data is a dict
rgb = data["rgb"]
depth = None
@@ -183,23 +191,29 @@ def get_predictions(self, pose_estimator: PoseEstimator) -> Dict[str, PoseEstima
if n == 0:
with torch.no_grad():
self.run_inference_pipeline(
- pose_estimator, obs_tensor, gt_detections, initial_estimates=initial_data
+ pose_estimator,
+ obs_tensor,
+ gt_detections,
+ initial_estimates=initial_data,
)
cuda_timer = CudaTimer()
cuda_timer.start()
with torch.no_grad():
all_preds = self.run_inference_pipeline(
- pose_estimator, obs_tensor, gt_detections, initial_estimates=initial_data
+ pose_estimator,
+ obs_tensor,
+ gt_detections,
+ initial_estimates=initial_data,
)
cuda_timer.end()
- duration = cuda_timer.elapsed()
+ cuda_timer.elapsed()
for k, v in all_preds.items():
predictions_list[k].append(v)
# Concatenate the lists of PandasTensorCollections
- predictions = dict()
+ predictions = {}
for k, v in predictions_list.items():
predictions[k] = tc.concatenate(v)
diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/runner_utils.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/runner_utils.py
index 8ecebce9..fb48bfff 100644
--- a/happypose/pose_estimators/cosypose/cosypose/evaluation/runner_utils.py
+++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/runner_utils.py
@@ -1,23 +1,28 @@
-from collections import OrderedDict
+from collections import OrderedDict, defaultdict
+
import pandas as pd
-from collections import defaultdict
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_tmp_dir, get_rank
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_tmp_dir,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
logger = get_logger(__name__)
def run_pred_eval(pred_runner, pred_kwargs, eval_runner, eval_preds=None):
- all_predictions = dict()
+ all_predictions = {}
for pred_prefix, pred_kwargs_n in pred_kwargs.items():
print("Prediction :", pred_prefix)
preds = pred_runner.get_predictions(**pred_kwargs_n)
for preds_name, preds_n in preds.items():
- all_predictions[f'{pred_prefix}/{preds_name}'] = preds_n
+ all_predictions[f"{pred_prefix}/{preds_name}"] = preds_n
- all_predictions = OrderedDict({k: v for k, v in sorted(all_predictions.items(), key=lambda item: item[0])})
- eval_metrics, eval_dfs = dict(), dict()
+ all_predictions = OrderedDict(
+ dict(sorted(all_predictions.items(), key=lambda item: item[0])),
+ )
+ eval_metrics, eval_dfs = {}, {}
for preds_k, preds in all_predictions.items():
print("Evaluation :", preds_k)
@@ -27,9 +32,7 @@ def run_pred_eval(pred_runner, pred_kwargs, eval_runner, eval_preds=None):
all_predictions = gather_predictions(all_predictions)
if get_rank() == 0:
- results = format_results(all_predictions,
- eval_metrics,
- eval_dfs)
+ results = format_results(all_predictions, eval_metrics, eval_dfs)
else:
results = None
return results
@@ -41,32 +44,29 @@ def gather_predictions(all_predictions):
return all_predictions
-def format_results(predictions,
- eval_metrics,
- eval_dfs,
- print_metrics=True):
- summary = dict()
+def format_results(predictions, eval_metrics, eval_dfs, print_metrics=True):
+ summary = {}
df = defaultdict(list)
- summary_txt = ''
+ summary_txt = ""
for k, v in eval_metrics.items():
summary_txt += f"\n{k}\n{'-'*80}\n"
for k_, v_ in v.items():
- summary[f'{k}/{k_}'] = v_
- df['method'].append(k)
- df['metric'].append(k_)
- df['value'].append(v_)
- summary_txt += f'{k}/{k_}: {v_}\n'
+ summary[f"{k}/{k_}"] = v_
+ df["method"].append(k)
+ df["metric"].append(k_)
+ df["value"].append(v_)
+ summary_txt += f"{k}/{k_}: {v_}\n"
summary_txt += f"{'-'*80}"
if print_metrics:
logger.info(summary_txt)
df = pd.DataFrame(df)
- results = dict(
- summary=summary,
- summary_txt=summary_txt,
- predictions=predictions,
- metrics=eval_metrics,
- summary_df=df,
- dfs=eval_dfs,
- )
+ results = {
+ "summary": summary,
+ "summary_txt": summary_txt,
+ "predictions": predictions,
+ "metrics": eval_metrics,
+ "summary_df": df,
+ "dfs": eval_dfs,
+ }
return results
diff --git a/happypose/pose_estimators/cosypose/cosypose/integrated/detector.py b/happypose/pose_estimators/cosypose/cosypose/integrated/detector.py
index dca85850..4b9a36ac 100644
--- a/happypose/pose_estimators/cosypose/cosypose/integrated/detector.py
+++ b/happypose/pose_estimators/cosypose/cosypose/integrated/detector.py
@@ -1,16 +1,14 @@
-from typing import Any, Optional
+from typing import Optional
-import cosypose.utils.tensor_collection as tc
import numpy as np
import pandas as pd
import torch
# MegaPose
-import happypose.pose_estimators.megapose
import happypose.toolbox.utils.tensor_collection as tc
from happypose.toolbox.inference.detector import DetectorModule
from happypose.toolbox.inference.types import DetectionsType, ObservationTensor
-from happypose.toolbox.inference.utils import filter_detections, add_instance_id
+from happypose.toolbox.inference.utils import add_instance_id, filter_detections
class Detector(DetectorModule):
@@ -19,14 +17,16 @@ def __init__(self, model, ds_name):
self.model = model
self.model.eval()
self.config = model.config
- self.category_id_to_label = {v: k for k, v in self.config.label_to_category_id.items()}
+ self.category_id_to_label = {
+ v: k for k, v in self.config.label_to_category_id.items()
+ }
if ds_name == "ycbv.bop19":
- ds_name="ycbv"
+ ds_name = "ycbv"
for k, v in self.category_id_to_label.items():
- if k ==0:
+ if k == 0:
continue
- self.category_id_to_label[k] = '{}-'.format(ds_name) + v
- self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+ self.category_id_to_label[k] = f"{ds_name}-" + v
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@torch.no_grad()
def get_detections(
@@ -40,6 +40,7 @@ def get_detections(
"""Runs the detector on the given images.
Args:
+ ----
detection_th: If specified only keep detections above this
threshold.
mask_th: Threshold to use when computing masks
@@ -48,28 +49,28 @@ def get_detections(
"""
-
# [B,3,H,W]
RGB_DIMS = [0, 1, 2]
images = observation.images[:, RGB_DIMS]
# TODO (lmanuelli): Why are we splitting this up into a list of tensors?
- outputs_ = self.model([image_n for image_n in images])
+ outputs_ = self.model(list(images))
infos = []
bboxes = []
masks = []
for n, outputs_n in enumerate(outputs_):
outputs_n["labels"] = [
- self.category_id_to_label[category_id.item()] for category_id in outputs_n["labels"]
+ self.category_id_to_label[category_id.item()]
+ for category_id in outputs_n["labels"]
]
for obj_id in range(len(outputs_n["boxes"])):
bbox = outputs_n["boxes"][obj_id]
- info = dict(
- batch_im_id=n,
- label=outputs_n["labels"][obj_id],
- score=outputs_n["scores"][obj_id].item(),
- )
+ info = {
+ "batch_im_id": n,
+ "label": outputs_n["labels"][obj_id],
+ "score": outputs_n["scores"][obj_id].item(),
+ }
mask = outputs_n["masks"][obj_id, 0] > mask_th
bboxes.append(torch.as_tensor(bbox))
masks.append(torch.as_tensor(mask))
@@ -83,13 +84,23 @@ def get_detections(
bboxes = torch.stack(bboxes).float()
masks = torch.stack(masks)
else:
- infos = dict(score=[], label=[], batch_im_id=[])
+ infos = {"score": [], "label": [], "batch_im_id": []}
if torch.cuda.is_available():
bboxes = torch.empty(0, 4).cuda().float()
- masks = torch.empty(0, images.shape[1], images.shape[2], dtype=torch.bool).cuda()
+ masks = torch.empty(
+ 0,
+ images.shape[1],
+ images.shape[2],
+ dtype=torch.bool,
+ ).cuda()
else:
bboxes = torch.empty(0, 4).float()
- masks = torch.empty(0, images.shape[1], images.shape[2], dtype=torch.bool)
+ masks = torch.empty(
+ 0,
+ images.shape[1],
+ images.shape[2],
+ dtype=torch.bool,
+ )
outputs = tc.PandasTensorCollection(
infos=pd.DataFrame(infos),
@@ -104,7 +115,8 @@ def get_detections(
# Keep only the top-detection for each class label
if one_instance_per_class:
outputs = filter_detections(
- outputs, one_instance_per_class=True
+ outputs,
+ one_instance_per_class=True,
)
# Add instance_id column to dataframe
@@ -112,6 +124,6 @@ def get_detections(
# identifies multiple instances of the same object
outputs = add_instance_id(outputs)
return outputs
-
+
def __call__(self, *args, **kwargs):
return self.get_detections(*args, **kwargs)
diff --git a/happypose/pose_estimators/cosypose/cosypose/integrated/icp_refiner.py b/happypose/pose_estimators/cosypose/cosypose/integrated/icp_refiner.py
index 326b138b..d2180d27 100644
--- a/happypose/pose_estimators/cosypose/cosypose/integrated/icp_refiner.py
+++ b/happypose/pose_estimators/cosypose/cosypose/integrated/icp_refiner.py
@@ -1,110 +1,166 @@
-from happypose.pose_estimators.cosypose.cosypose.config import DEBUG_DATA_DIR
-import torch
import cv2
import numpy as np
+import torch
from scipy import ndimage
-def get_normal(depth_refine,fx=-1,fy=-1,cx=-1,cy=-1,bbox=np.array([0]),refine=True):
+def get_normal(
+ depth_refine,
+ fx=-1,
+ fy=-1,
+ cx=-1,
+ cy=-1,
+ bbox=np.array([0]),
+ refine=True,
+):
# Copied from https://github.com/kirumang/Pix2Pose/blob/master/pix2pose_util/common_util.py
- '''
- fast normal computation
- '''
+ """Fast normal computation."""
res_y = depth_refine.shape[0]
res_x = depth_refine.shape[1]
- centerX=cx
- centerY=cy
- constant_x = 1/fx
- constant_y = 1/fy
+ centerX = cx
+ centerY = cy
+ constant_x = 1 / fx
+ constant_y = 1 / fy
- if(refine):
+ if refine:
depth_refine = np.nan_to_num(depth_refine)
mask = np.zeros_like(depth_refine).astype(np.uint8)
- mask[depth_refine==0]=1
+ mask[depth_refine == 0] = 1
depth_refine = depth_refine.astype(np.float32)
- depth_refine = cv2.inpaint(depth_refine,mask,2,cv2.INPAINT_NS)
+ depth_refine = cv2.inpaint(depth_refine, mask, 2, cv2.INPAINT_NS)
depth_refine = depth_refine.astype(np.float)
- depth_refine = ndimage.gaussian_filter(depth_refine,2)
-
- uv_table = np.zeros((res_y,res_x,2),dtype=np.int16)
- column = np.arange(0,res_y)
- uv_table[:,:,1] = np.arange(0,res_x) - centerX #x-c_x (u)
- uv_table[:,:,0] = column[:,np.newaxis] - centerY #y-c_y (v)
-
- if(bbox.shape[0]==4):
- uv_table = uv_table[bbox[0]:bbox[2],bbox[1]:bbox[3]]
- v_x = np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
- v_y = np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
- normals = np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
- depth_refine=depth_refine[bbox[0]:bbox[2],bbox[1]:bbox[3]]
+ depth_refine = ndimage.gaussian_filter(depth_refine, 2)
+
+ uv_table = np.zeros((res_y, res_x, 2), dtype=np.int16)
+ column = np.arange(0, res_y)
+ uv_table[:, :, 1] = np.arange(0, res_x) - centerX # x-c_x (u)
+ uv_table[:, :, 0] = column[:, np.newaxis] - centerY # y-c_y (v)
+
+ if bbox.shape[0] == 4:
+ uv_table = uv_table[bbox[0] : bbox[2], bbox[1] : bbox[3]]
+ v_x = np.zeros((bbox[2] - bbox[0], bbox[3] - bbox[1], 3))
+ v_y = np.zeros((bbox[2] - bbox[0], bbox[3] - bbox[1], 3))
+ np.zeros((bbox[2] - bbox[0], bbox[3] - bbox[1], 3))
+ depth_refine = depth_refine[bbox[0] : bbox[2], bbox[1] : bbox[3]]
else:
- v_x = np.zeros((res_y,res_x,3))
- v_y = np.zeros((res_y,res_x,3))
- normals = np.zeros((res_y,res_x,3))
-
- uv_table_sign= np.copy(uv_table)
- uv_table=np.abs(np.copy(uv_table))
-
-
- dig=np.gradient(depth_refine,2,edge_order=2)
- v_y[:,:,0]=uv_table_sign[:,:,1]*constant_x*dig[0]
- v_y[:,:,1]=depth_refine*constant_y+(uv_table_sign[:,:,0]*constant_y)*dig[0]
- v_y[:,:,2]=dig[0]
-
- v_x[:,:,0]=depth_refine*constant_x+uv_table_sign[:,:,1]*constant_x*dig[1]
- v_x[:,:,1]=uv_table_sign[:,:,0]*constant_y*dig[1]
- v_x[:,:,2]=dig[1]
-
- cross = np.cross(v_x.reshape(-1,3),v_y.reshape(-1,3))
- norm = np.expand_dims(np.linalg.norm(cross,axis=1),axis=1)
- norm[norm==0]=1
- cross = cross/norm
- if(bbox.shape[0]==4):
- cross =cross.reshape((bbox[2]-bbox[0],bbox[3]-bbox[1],3))
+ v_x = np.zeros((res_y, res_x, 3))
+ v_y = np.zeros((res_y, res_x, 3))
+ np.zeros((res_y, res_x, 3))
+
+ uv_table_sign = np.copy(uv_table)
+ uv_table = np.abs(np.copy(uv_table))
+
+ dig = np.gradient(depth_refine, 2, edge_order=2)
+ v_y[:, :, 0] = uv_table_sign[:, :, 1] * constant_x * dig[0]
+ v_y[:, :, 1] = (
+ depth_refine * constant_y + (uv_table_sign[:, :, 0] * constant_y) * dig[0]
+ )
+ v_y[:, :, 2] = dig[0]
+
+ v_x[:, :, 0] = (
+ depth_refine * constant_x + uv_table_sign[:, :, 1] * constant_x * dig[1]
+ )
+ v_x[:, :, 1] = uv_table_sign[:, :, 0] * constant_y * dig[1]
+ v_x[:, :, 2] = dig[1]
+
+ cross = np.cross(v_x.reshape(-1, 3), v_y.reshape(-1, 3))
+ norm = np.expand_dims(np.linalg.norm(cross, axis=1), axis=1)
+ norm[norm == 0] = 1
+ cross = cross / norm
+ if bbox.shape[0] == 4:
+ cross = cross.reshape((bbox[2] - bbox[0], bbox[3] - bbox[1], 3))
else:
- cross =cross.reshape(res_y,res_x,3)
- cross= np.nan_to_num(cross)
+ cross = cross.reshape(res_y, res_x, 3)
+ cross = np.nan_to_num(cross)
return cross
-def getXYZ(depth,fx,fy,cx,cy,bbox=np.array([0])):
+def getXYZ(depth, fx, fy, cx, cy, bbox=np.array([0])):
# Copied from https://github.com/kirumang/Pix2Pose/blob/master/pix2pose_util/common_util.py
- uv_table = np.zeros((depth.shape[0],depth.shape[1],2),dtype=np.int16)
- column = np.arange(0,depth.shape[0])
- uv_table[:,:,1] = np.arange(0,depth.shape[1]) - cx #x-c_x (u)
- uv_table[:,:,0] = column[:,np.newaxis] - cy #y-c_y (v)
-
- if(bbox.shape[0]==1):
- xyz=np.zeros((depth.shape[0],depth.shape[1],3)) #x,y,z
- xyz[:,:,0] = uv_table[:,:,1]*depth*1/fx
- xyz[:,:,1] = uv_table[:,:,0]*depth*1/fy
- xyz[:,:,2] = depth
- else: #when boundry region is given
- xyz=np.zeros((bbox[2]-bbox[0],bbox[3]-bbox[1],3)) #x,y,z
- xyz[:,:,0] = uv_table[bbox[0]:bbox[2],bbox[1]:bbox[3],1]*depth[bbox[0]:bbox[2],bbox[1]:bbox[3]]*1/fx
- xyz[:,:,1] = uv_table[bbox[0]:bbox[2],bbox[1]:bbox[3],0]*depth[bbox[0]:bbox[2],bbox[1]:bbox[3]]*1/fy
- xyz[:,:,2] = depth[bbox[0]:bbox[2],bbox[1]:bbox[3]]
+ uv_table = np.zeros((depth.shape[0], depth.shape[1], 2), dtype=np.int16)
+ column = np.arange(0, depth.shape[0])
+ uv_table[:, :, 1] = np.arange(0, depth.shape[1]) - cx # x-c_x (u)
+ uv_table[:, :, 0] = column[:, np.newaxis] - cy # y-c_y (v)
+
+ if bbox.shape[0] == 1:
+ xyz = np.zeros((depth.shape[0], depth.shape[1], 3)) # x,y,z
+ xyz[:, :, 0] = uv_table[:, :, 1] * depth * 1 / fx
+ xyz[:, :, 1] = uv_table[:, :, 0] * depth * 1 / fy
+ xyz[:, :, 2] = depth
+ else: # when boundry region is given
+ xyz = np.zeros((bbox[2] - bbox[0], bbox[3] - bbox[1], 3)) # x,y,z
+ xyz[:, :, 0] = (
+ uv_table[bbox[0] : bbox[2], bbox[1] : bbox[3], 1]
+ * depth[bbox[0] : bbox[2], bbox[1] : bbox[3]]
+ * 1
+ / fx
+ )
+ xyz[:, :, 1] = (
+ uv_table[bbox[0] : bbox[2], bbox[1] : bbox[3], 0]
+ * depth[bbox[0] : bbox[2], bbox[1] : bbox[3]]
+ * 1
+ / fy
+ )
+ xyz[:, :, 2] = depth[bbox[0] : bbox[2], bbox[1] : bbox[3]]
return xyz
-def icp_refinement(depth_measured, depth_rendered,
- object_mask_measured, cam_K, TCO_pred, n_min_points=1000):
+def icp_refinement(
+ depth_measured,
+ depth_rendered,
+ object_mask_measured,
+ cam_K,
+ TCO_pred,
+ n_min_points=1000,
+):
# Inspired from https://github.com/kirumang/Pix2Pose/blob/843effe0097e9982f4b07dd90b04ede2b9ee9294/tools/5_evaluation_bop_icp3d.py#L57
- points_tgt = np.zeros((depth_measured.shape[0],depth_measured.shape[1],6),np.float32)
- points_tgt[:,:,:3] = getXYZ(depth_measured,fx=cam_K[0,0],fy=cam_K[1,1],cx=cam_K[0,2],cy=cam_K[1,2])
- points_tgt[:,:,3:] = get_normal(depth_measured,fx=cam_K[0,0],fy=cam_K[1,1],cx=cam_K[0,2],cy=cam_K[1,2],refine=True)
+ points_tgt = np.zeros(
+ (depth_measured.shape[0], depth_measured.shape[1], 6),
+ np.float32,
+ )
+ points_tgt[:, :, :3] = getXYZ(
+ depth_measured,
+ fx=cam_K[0, 0],
+ fy=cam_K[1, 1],
+ cx=cam_K[0, 2],
+ cy=cam_K[1, 2],
+ )
+ points_tgt[:, :, 3:] = get_normal(
+ depth_measured,
+ fx=cam_K[0, 0],
+ fy=cam_K[1, 1],
+ cx=cam_K[0, 2],
+ cy=cam_K[1, 2],
+ refine=True,
+ )
depth_valid = np.logical_and(depth_measured > 0.2, depth_measured < 5)
depth_valid = np.logical_and(depth_valid, object_mask_measured)
points_tgt = points_tgt[depth_valid]
- points_src = np.zeros((depth_measured.shape[0],depth_measured.shape[1],6),np.float32)
- points_src[:,:,:3] = getXYZ(depth_rendered,cam_K[0,0],cam_K[1,1],cam_K[0,2],cam_K[1,2])
- points_src[:,:,3:] = get_normal(depth_rendered,fx=cam_K[0,0],fy=cam_K[1,1],cx=cam_K[0,2],cy=cam_K[1,2],refine=True)
+ points_src = np.zeros(
+ (depth_measured.shape[0], depth_measured.shape[1], 6),
+ np.float32,
+ )
+ points_src[:, :, :3] = getXYZ(
+ depth_rendered,
+ cam_K[0, 0],
+ cam_K[1, 1],
+ cam_K[0, 2],
+ cam_K[1, 2],
+ )
+ points_src[:, :, 3:] = get_normal(
+ depth_rendered,
+ fx=cam_K[0, 0],
+ fy=cam_K[1, 1],
+ cx=cam_K[0, 2],
+ cy=cam_K[1, 2],
+ refine=True,
+ )
points_src = points_src[np.logical_and(depth_valid, depth_rendered > 0)]
if len(points_tgt) < n_min_points or len(points_src) < n_min_points:
- return np.eye(4) * float('nan'), -1
+ return np.eye(4) * float("nan"), -1
TCO_pred_refined = TCO_pred.copy()
@@ -117,13 +173,20 @@ def icp_refinement(depth_measured, depth_rendered,
# import trimesh
# print(points_src.shape, points_tgt.shape)
- # trimesh.Trimesh(vertices=points_src[:, :3], normals=points_src[:, 3:]).export(DEBUG_DATA_DIR / 'src.ply')
- # trimesh.Trimesh(vertices=points_tgt[:, :3], normals=points_tgt[:, 3:]).export(DEBUG_DATA_DIR / 'tgt.ply')
+ # trimesh.Trimesh(vertices=points_src[:, :3], normals=points_src[:, 3:]).export(
+ # DEBUG_DATA_DIR / "src.ply"
+ # )
+ # trimesh.Trimesh(vertices=points_tgt[:, :3], normals=points_tgt[:, 3:]).export(
+ # DEBUG_DATA_DIR / "tgt.ply"
+ # )
# raise ValueError
tolerence = 0.05
icp_fnc = cv2.ppf_match_3d_ICP(100, tolerence=tolerence, numLevels=4)
- retval, residual, pose = icp_fnc.registerModelToScene(points_src.reshape(-1,6), points_tgt.reshape(-1,6))
+ retval, residual, pose = icp_fnc.registerModelToScene(
+ points_src.reshape(-1, 6),
+ points_tgt.reshape(-1, 6),
+ )
TCO_pred_refined = pose @ TCO_pred_refined
TCO_pred_refined = torch.tensor(TCO_pred_refined, dtype=torch.float32).cuda()
@@ -142,27 +205,35 @@ def __init__(self, mesh_db, renderer, resolution):
self.renderer = renderer
def refine_poses(self, predictions, masks, depth, cameras):
- view_id_to_K = {cameras.infos.loc[n, 'batch_im_id']: cameras.K[n].cpu().numpy() for n in range(len(cameras))}
- view_id_to_depth = {cameras.infos.loc[n, 'batch_im_id']: depth[n].cpu().numpy() for n in range(len(cameras))}
+ view_id_to_K = {
+ cameras.infos.loc[n, "batch_im_id"]: cameras.K[n].cpu().numpy()
+ for n in range(len(cameras))
+ }
+ view_id_to_depth = {
+ cameras.infos.loc[n, "batch_im_id"]: depth[n].cpu().numpy()
+ for n in range(len(cameras))
+ }
predictions_refined = predictions.clone()
obj_infos = []
Ks = []
for n in range(len(predictions)):
- obj = dict(
- name=predictions.infos.loc[n, 'label'],
- TCO=predictions.poses[n].cpu().numpy(),
- )
+ obj = {
+ "name": predictions.infos.loc[n, "label"],
+ "TCO": predictions.poses[n].cpu().numpy(),
+ }
obj_infos.append(obj)
- Ks.append(view_id_to_K[predictions.infos.loc[n, 'batch_im_id']])
- _, all_depth_rendered = self.renderer.render(obj_infos,
- TCO=predictions.poses,
- K=torch.tensor(np.stack(Ks)),
- resolution=self.resolution,
- render_depth=True)
+ Ks.append(view_id_to_K[predictions.infos.loc[n, "batch_im_id"]])
+ _, all_depth_rendered = self.renderer.render(
+ obj_infos,
+ TCO=predictions.poses,
+ K=torch.tensor(np.stack(Ks)),
+ resolution=self.resolution,
+ render_depth=True,
+ )
for n in range(len(predictions)):
- view_id = predictions.infos.loc[n, 'batch_im_id']
+ view_id = predictions.infos.loc[n, "batch_im_id"]
TCO_pred = predictions.poses[n].cpu().numpy()
depth_measured = view_id_to_depth[view_id]
cam_K = Ks[n]
@@ -170,7 +241,12 @@ def refine_poses(self, predictions, masks, depth, cameras):
depth_rendered = all_depth_rendered[n].cpu().numpy()
TCO_refined, retval = icp_refinement(
- depth_measured, depth_rendered, mask, cam_K, TCO_pred, n_min_points=1000
+ depth_measured,
+ depth_rendered,
+ mask,
+ cam_K,
+ TCO_pred,
+ n_min_points=1000,
)
if retval != -1:
diff --git a/happypose/pose_estimators/cosypose/cosypose/integrated/multiview_predictor.py b/happypose/pose_estimators/cosypose/cosypose/integrated/multiview_predictor.py
index af5753d3..0ec6f306 100644
--- a/happypose/pose_estimators/cosypose/cosypose/integrated/multiview_predictor.py
+++ b/happypose/pose_estimators/cosypose/cosypose/integrated/multiview_predictor.py
@@ -1,21 +1,28 @@
import numpy as np
import pandas as pd
-import cosypose.utils.tensor_collection as tc
-
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import invert_T
-from happypose.pose_estimators.cosypose.cosypose.multiview.ransac import multiview_candidate_matching
-from happypose.pose_estimators.cosypose.cosypose.multiview.bundle_adjustment import make_view_groups, MultiviewRefinement
-
+from happypose.pose_estimators.cosypose.cosypose.multiview.bundle_adjustment import (
+ MultiviewRefinement,
+ make_view_groups,
+)
+from happypose.pose_estimators.cosypose.cosypose.multiview.ransac import (
+ multiview_candidate_matching,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+
logger = get_logger(__name__)
class MultiviewScenePredictor:
def __init__(self, mesh_db, n_sym=64, ba_aabb=True, ba_n_points=None):
self.mesh_db_ransac = mesh_db.batched(n_sym=n_sym, aabb=True).cuda().float()
- self.mesh_db_ba = mesh_db.batched(
- aabb=ba_aabb, resample_n_points=ba_n_points, n_sym=n_sym).cuda().float()
+ self.mesh_db_ba = (
+ mesh_db.batched(aabb=ba_aabb, resample_n_points=ba_n_points, n_sym=n_sym)
+ .cuda()
+ .float()
+ )
def reproject_scene(self, objects, cameras):
TCO_data = []
@@ -23,16 +30,16 @@ def reproject_scene(self, objects, cameras):
for v in range(len(cameras)):
obj = objects[[o]]
cam = cameras[[v]]
- infos = dict(
- scene_id=cam.infos['scene_id'].values,
- view_id=cam.infos['view_id'].values,
- score=obj.infos['score'].values + 1.0,
- view_group=obj.infos['view_group'].values,
- label=obj.infos['label'].values,
- batch_im_id=cam.infos['batch_im_id'].values,
- obj_id=obj.infos['obj_id'].values,
- from_ba=[True],
- )
+ infos = {
+ "scene_id": cam.infos["scene_id"].values,
+ "view_id": cam.infos["view_id"].values,
+ "score": obj.infos["score"].values + 1.0,
+ "view_group": obj.infos["view_group"].values,
+ "label": obj.infos["label"].values,
+ "batch_im_id": cam.infos["batch_im_id"].values,
+ "obj_id": obj.infos["obj_id"].values,
+ "from_ba": [True],
+ }
data_ = tc.PandasTensorCollection(
infos=pd.DataFrame(infos),
poses=invert_T(cam.TWC) @ obj.TWO,
@@ -41,87 +48,102 @@ def reproject_scene(self, objects, cameras):
return tc.concatenate(TCO_data)
def predict_scene_state(
- self, candidates, cameras,
- score_th=0.3, use_known_camera_poses=False,
- ransac_n_iter=2000, ransac_dist_threshold=0.02,
- ba_n_iter=100):
-
- predictions = dict()
+ self,
+ candidates,
+ cameras,
+ score_th=0.3,
+ use_known_camera_poses=False,
+ ransac_n_iter=2000,
+ ransac_dist_threshold=0.02,
+ ba_n_iter=100,
+ ):
+ predictions = {}
cand_inputs = candidates
- assert len(np.unique(candidates.infos['scene_id'])) == 1
- scene_id = np.unique(candidates.infos['scene_id']).item()
- group_id = np.unique(candidates.infos['group_id']).item()
- keep = np.where(candidates.infos['score'] >= score_th)[0]
+ assert len(np.unique(candidates.infos["scene_id"])) == 1
+ scene_id = np.unique(candidates.infos["scene_id"]).item()
+ group_id = np.unique(candidates.infos["group_id"]).item()
+ keep = np.where(candidates.infos["score"] >= score_th)[0]
candidates = candidates[keep]
- predictions['cand_inputs'] = candidates
+ predictions["cand_inputs"] = candidates
- logger.debug(f'Num candidates: {len(candidates)}')
- logger.debug(f'Num views: {len(cameras)}')
+ logger.debug(f"Num candidates: {len(candidates)}")
+ logger.debug(f"Num views: {len(cameras)}")
matching_outputs = multiview_candidate_matching(
- candidates=candidates, mesh_db=self.mesh_db_ransac,
- n_ransac_iter=ransac_n_iter, dist_threshold=ransac_dist_threshold,
- cameras=cameras if use_known_camera_poses else None
+ candidates=candidates,
+ mesh_db=self.mesh_db_ransac,
+ n_ransac_iter=ransac_n_iter,
+ dist_threshold=ransac_dist_threshold,
+ cameras=cameras if use_known_camera_poses else None,
)
- pairs_TC1C2 = matching_outputs['pairs_TC1C2']
- candidates = matching_outputs['filtered_candidates']
+ pairs_TC1C2 = matching_outputs["pairs_TC1C2"]
+ candidates = matching_outputs["filtered_candidates"]
- logger.debug(f'Matched candidates: {len(candidates)}')
+ logger.debug(f"Matched candidates: {len(candidates)}")
for k, v in matching_outputs.items():
- if 'time' in k:
- logger.debug(f'RANSAC {k}: {v}')
+ if "time" in k:
+ logger.debug(f"RANSAC {k}: {v}")
- predictions['cand_matched'] = candidates
+ predictions["cand_matched"] = candidates
group_infos = make_view_groups(pairs_TC1C2)
- candidates = candidates.merge_df(group_infos, on='view_id').cuda()
+ candidates = candidates.merge_df(group_infos, on="view_id").cuda()
pred_objects, pred_cameras, pred_reproj = [], [], []
pred_reproj_init = []
- for (view_group, candidate_ids) in candidates.infos.groupby('view_group').groups.items():
+ for view_group, candidate_ids in candidates.infos.groupby(
+ "view_group",
+ ).groups.items():
candidates_n = candidates[candidate_ids]
- problem = MultiviewRefinement(candidates=candidates_n,
- cameras=cameras,
- pairs_TC1C2=pairs_TC1C2,
- mesh_db=self.mesh_db_ba)
+ problem = MultiviewRefinement(
+ candidates=candidates_n,
+ cameras=cameras,
+ pairs_TC1C2=pairs_TC1C2,
+ mesh_db=self.mesh_db_ba,
+ )
ba_outputs = problem.solve(
n_iterations=ba_n_iter,
optimize_cameras=not use_known_camera_poses,
)
- pred_objects_, pred_cameras_ = ba_outputs['objects'], ba_outputs['cameras']
+ pred_objects_, pred_cameras_ = ba_outputs["objects"], ba_outputs["cameras"]
for x in (pred_objects_, pred_cameras_):
- x.infos['view_group'] = view_group
- x.infos['group_id'] = group_id
- x.infos['scene_id'] = scene_id
+ x.infos["view_group"] = view_group
+ x.infos["group_id"] = group_id
+ x.infos["scene_id"] = scene_id
pred_reproj.append(self.reproject_scene(pred_objects_, pred_cameras_))
pred_objects.append(pred_objects_)
pred_cameras.append(pred_cameras_)
- pred_objects_init, pred_cameras_init = ba_outputs['objects_init'], ba_outputs['cameras_init']
+ pred_objects_init, pred_cameras_init = (
+ ba_outputs["objects_init"],
+ ba_outputs["cameras_init"],
+ )
for x in (pred_objects_init, pred_cameras_init):
- x.infos['view_group'] = view_group
- x.infos['group_id'] = group_id
- x.infos['scene_id'] = scene_id
- pred_reproj_init.append(self.reproject_scene(pred_objects_init, pred_cameras_init))
+ x.infos["view_group"] = view_group
+ x.infos["group_id"] = group_id
+ x.infos["scene_id"] = scene_id
+ pred_reproj_init.append(
+ self.reproject_scene(pred_objects_init, pred_cameras_init),
+ )
for k, v in ba_outputs.items():
- if 'time' in k:
- logger.debug(f'BA {k}: {v}')
+ if "time" in k:
+ logger.debug(f"BA {k}: {v}")
- predictions['scene/objects'] = tc.concatenate(pred_objects)
- predictions['scene/cameras'] = tc.concatenate(pred_cameras)
+ predictions["scene/objects"] = tc.concatenate(pred_objects)
+ predictions["scene/cameras"] = tc.concatenate(pred_cameras)
- predictions['ba_output'] = tc.concatenate(pred_reproj)
- predictions['ba_input'] = tc.concatenate(pred_reproj_init)
+ predictions["ba_output"] = tc.concatenate(pred_reproj)
+ predictions["ba_input"] = tc.concatenate(pred_reproj_init)
cand_inputs = tc.PandasTensorCollection(
infos=cand_inputs.infos,
poses=cand_inputs.poses,
)
- predictions['ba_output+all_cand'] = tc.concatenate(
- [predictions['ba_output'], cand_inputs],
+ predictions["ba_output+all_cand"] = tc.concatenate(
+ [predictions["ba_output"], cand_inputs],
)
return predictions
diff --git a/happypose/pose_estimators/cosypose/cosypose/integrated/pose_estimator.py b/happypose/pose_estimators/cosypose/cosypose/integrated/pose_estimator.py
index 452107b8..f2cfca6d 100644
--- a/happypose/pose_estimators/cosypose/cosypose/integrated/pose_estimator.py
+++ b/happypose/pose_estimators/cosypose/cosypose/integrated/pose_estimator.py
@@ -1,22 +1,19 @@
import time
from collections import defaultdict
-from typing import Any, Optional, Tuple
+from typing import Any, Optional
-import cosypose.utils.tensor_collection as tc
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
from happypose.pose_estimators.cosypose.cosypose.lib3d.cosypose_ops import (
TCO_init_from_boxes,
TCO_init_from_boxes_zup_autodepth,
)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
from happypose.pose_estimators.cosypose.cosypose.utils.timer import Timer
-from happypose.pose_estimators.megapose.training.utils import (
- CudaTimer,
- SimpleTimer,
-)
+from happypose.pose_estimators.megapose.training.utils import CudaTimer, SimpleTimer
from happypose.toolbox.inference.pose_estimator import PoseEstimationModule
from happypose.toolbox.inference.types import (
DetectionsType,
@@ -27,7 +24,8 @@
logger = get_logger(__name__)
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
class PoseEstimator(PoseEstimationModule):
"""Performs inference for pose estimation."""
@@ -37,22 +35,21 @@ def __init__(
refiner_model: Optional[torch.nn.Module] = None,
coarse_model: Optional[torch.nn.Module] = None,
detector_model: Optional[torch.nn.Module] = None,
- #depth_refiner: Optional[DepthRefiner] = None,
+ # depth_refiner: Optional[DepthRefiner] = None,
bsz_objects: int = 8,
bsz_images: int = 256,
- #SO3_grid_size: int = 576,
+ # SO3_grid_size: int = 576,
) -> None:
-
super().__init__()
self.coarse_model = coarse_model
self.refiner_model = refiner_model
self.detector_model = detector_model
- #self.depth_refiner = depth_refiner
+ # self.depth_refiner = depth_refiner
self.bsz_objects = bsz_objects
self.bsz_images = bsz_images
# Load the SO3 grid if was passed in
- #if SO3_grid_size is not None:
+ # if SO3_grid_size is not None:
# self.load_SO3_grid(SO3_grid_size)
# load cfg and mesh_db from refiner model
@@ -63,7 +60,8 @@ def __init__(
self.cfg = self.coarse_model.cfg
self.mesh_db = self.coarse_model.mesh_db
else:
- raise ValueError("At least one of refiner_model or " " coarse_model must be specified.")
+ msg = "At least one of refiner_model or coarse_model must be specified."
+ raise ValueError(msg)
self.eval()
@@ -71,7 +69,7 @@ def __init__(
self.keep_all_coarse_outputs = False
self.refiner_outputs = None
self.coarse_outputs = None
- self.debug_dict: dict = dict()
+ self.debug_dict: dict = {}
@torch.no_grad()
def batched_model_predictions(self, model, images, K, obj_data, n_iterations=1):
@@ -84,40 +82,50 @@ def batched_model_predictions(self, model, images, K, obj_data, n_iterations=1):
dl = DataLoader(ds, batch_size=self.bsz_objects)
preds = defaultdict(list)
- for (batch_ids, ) in dl:
+ for (batch_ids,) in dl:
timer.resume()
obj_inputs = obj_data[batch_ids.numpy()]
- labels = obj_inputs.infos['label'].values
- im_ids = obj_inputs.infos['batch_im_id'].values
+ labels = obj_inputs.infos["label"].values
+ im_ids = obj_inputs.infos["batch_im_id"].values
images_ = images[im_ids]
K_ = K[im_ids]
TCO_input = obj_inputs.poses
- outputs = model(images=images_, K=K_, TCO=TCO_input,
- n_iterations=n_iterations, labels=labels)
+ outputs = model(
+ images=images_,
+ K=K_,
+ TCO=TCO_input,
+ n_iterations=n_iterations,
+ labels=labels,
+ )
timer.pause()
- for n in range(1, n_iterations+1):
- iter_outputs = outputs[f'iteration={n}']
+ for n in range(1, n_iterations + 1):
+ iter_outputs = outputs[f"iteration={n}"]
infos = obj_inputs.infos
- batch_preds = tc.PandasTensorCollection(infos,
- poses=iter_outputs['TCO_output'],
- poses_input=iter_outputs['TCO_input'],
- K_crop=iter_outputs['K_crop'],
- boxes_rend=iter_outputs['boxes_rend'],
- boxes_crop=iter_outputs['boxes_crop'])
- preds[f'iteration={n}'].append(batch_preds)
-
- logger.debug(f'Pose prediction on {len(obj_data)} detections (n_iterations={n_iterations}): {timer.stop()}')
+ batch_preds = tc.PandasTensorCollection(
+ infos,
+ poses=iter_outputs["TCO_output"],
+ poses_input=iter_outputs["TCO_input"],
+ K_crop=iter_outputs["K_crop"],
+ boxes_rend=iter_outputs["boxes_rend"],
+ boxes_crop=iter_outputs["boxes_crop"],
+ )
+ preds[f"iteration={n}"].append(batch_preds)
+
+ logger.debug(
+ f"Pose prediction on {len(obj_data)} detections "
+ f"(n_iterations={n_iterations}): {timer.stop()}",
+ )
preds = dict(preds)
for k, v in preds.items():
preds[k] = tc.concatenate(v)
return preds
def make_TCO_init(self, detections, K):
- K = K[detections.infos['batch_im_id'].values]
+ K = K[detections.infos["batch_im_id"].values]
boxes = detections.bboxes
- if self.coarse_model.cfg.init_method == 'z-up+auto-depth':
- meshes = self.coarse_model.mesh_db.select(detections.infos['label'])
+ if self.coarse_model.cfg.init_method == "z-up+auto-depth":
+ meshes = self.coarse_model.mesh_db.select(detections.infos["label"])
points_3d = meshes.sample_points(2000, deterministic=True)
TCO_init = TCO_init_from_boxes_zup_autodepth(boxes, points_3d, K)
else:
@@ -138,8 +146,7 @@ def run_inference_pipeline(
coarse_estimates: Optional[PoseEstimatesType] = None,
detection_th: float = 0.7,
mask_th: float = 0.8,
- ) -> Tuple[PoseEstimatesType, dict]:
-
+ ) -> tuple[PoseEstimatesType, dict]:
timing_str = ""
timer = SimpleTimer()
timer.start()
@@ -156,52 +163,65 @@ def run_inference_pipeline(
)
if detections is None and run_detector:
start_time = time.time()
- detections = self.forward_detection_model(observation, detection_th, mask_th)
+ detections = self.forward_detection_model(
+ observation,
+ detection_th,
+ mask_th,
+ )
if torch.cuda.is_available():
detections = detections.cuda()
else:
detections = detections
elapsed = time.time() - start_time
timing_str += f"detection={elapsed:.2f}, "
-
- preds = dict()
+
+ preds = {}
if data_TCO_init is None:
assert detections is not None
assert self.coarse_model is not None
assert n_coarse_iterations > 0
K = observation.K
data_TCO_init = self.make_TCO_init(detections, K)
- coarse_preds, coarse_extra_data = self.forward_coarse_model(observation, data_TCO_init,
- n_iterations=n_coarse_iterations)
+ coarse_preds, coarse_extra_data = self.forward_coarse_model(
+ observation,
+ data_TCO_init,
+ n_iterations=n_coarse_iterations,
+ )
for n in range(1, n_coarse_iterations + 1):
- preds[f'coarse/iteration={n}'] = coarse_preds[f'iteration={n}']
- data_TCO_coarse = coarse_preds[f'iteration={n_coarse_iterations}']
+ preds[f"coarse/iteration={n}"] = coarse_preds[f"iteration={n}"]
+ data_TCO_coarse = coarse_preds[f"iteration={n_coarse_iterations}"]
else:
assert n_coarse_iterations == 0
data_TCO = data_TCO_init
- preds[f'external_coarse'] = data_TCO
+ preds["external_coarse"] = data_TCO
data_TCO_coarse = data_TCO
if n_refiner_iterations >= 1:
assert self.refiner_model is not None
- refiner_preds, refiner_extra_data = self.forward_refiner(observation, data_TCO_coarse,
- n_iterations=n_refiner_iterations)
+ refiner_preds, refiner_extra_data = self.forward_refiner(
+ observation,
+ data_TCO_coarse,
+ n_iterations=n_refiner_iterations,
+ )
for n in range(1, n_refiner_iterations + 1):
- preds[f'refiner/iteration={n}'] = refiner_preds[f'iteration={n}']
- data_TCO = refiner_preds[f'iteration={n_refiner_iterations}']
-
+ preds[f"refiner/iteration={n}"] = refiner_preds[f"iteration={n}"]
+ data_TCO = refiner_preds[f"iteration={n_refiner_iterations}"]
+
timer.stop()
timing_str = f"total={timer.elapsed():.2f}, {timing_str}"
- extra_data: dict = dict()
+ extra_data: dict = {}
extra_data["coarse"] = {"preds": data_TCO_coarse, "data": coarse_extra_data}
- extra_data["refiner_all_hypotheses"] = {"preds": preds, "data": refiner_extra_data}
+ extra_data["refiner_all_hypotheses"] = {
+ "preds": preds,
+ "data": refiner_extra_data,
+ }
extra_data["refiner"] = {"preds": data_TCO, "data": refiner_extra_data}
extra_data["timing_str"] = timing_str
extra_data["time"] = timer.elapsed()
return data_TCO, extra_data
-
+
def forward_detection_model(
self,
observation: ObservationTensor,
@@ -210,19 +230,16 @@ def forward_detection_model(
*args: Any,
**kwargs: Any,
) -> DetectionsType:
-
"""Runs the detector."""
-
detections = self.detector_model.get_detections(
observation=observation,
one_instance_per_class=False,
- detection_th=detection_th,
+ detection_th=detection_th,
output_masks=False,
- mask_th=mask_th
+ mask_th=mask_th,
)
return detections
-
@torch.no_grad()
def forward_coarse_model(
self,
@@ -231,14 +248,14 @@ def forward_coarse_model(
n_iterations: int = 5,
keep_all_outputs: bool = False,
cuda_timer: bool = False,
- ) -> Tuple[dict, dict]:
+ ) -> tuple[dict, dict]:
"""Runs the refiner model for the specified number of iterations.
-
Will actually use the batched_model_predictions to stay within
batch size limit.
- Returns:
+ Returns
+ -------
(preds, extra_data)
preds:
@@ -251,7 +268,6 @@ def forward_coarse_model(
A dict containing additional information such as timing
"""
-
timer = Timer()
timer.start()
@@ -268,7 +284,7 @@ def forward_coarse_model(
model_time = 0.0
- for (batch_idx, (batch_ids,)) in enumerate(dl):
+ for batch_idx, (batch_ids,) in enumerate(dl):
data_TCO_input_ = data_TCO_input[batch_ids]
df_ = data_TCO_input_.infos
TCO_input_ = data_TCO_input_.poses
@@ -284,7 +300,7 @@ def forward_coarse_model(
K_ = observation.K[batch_im_ids_]
if torch.cuda.is_available():
timer_ = CudaTimer(enabled=cuda_timer)
- else:
+ else:
timer_ = SimpleTimer()
timer_.start()
outputs_ = self.coarse_model(
@@ -341,14 +357,14 @@ def forward_refiner(
n_iterations: int = 5,
keep_all_outputs: bool = False,
cuda_timer: bool = False,
- ) -> Tuple[dict, dict]:
+ ) -> tuple[dict, dict]:
"""Runs the refiner model for the specified number of iterations.
-
Will actually use the batched_model_predictions to stay within
batch size limit.
- Returns:
+ Returns
+ -------
(preds, extra_data)
preds:
@@ -361,7 +377,6 @@ def forward_refiner(
A dict containing additional information such as timing
"""
-
timer = Timer()
timer.start()
@@ -378,7 +393,7 @@ def forward_refiner(
model_time = 0.0
- for (batch_idx, (batch_ids,)) in enumerate(dl):
+ for batch_idx, (batch_ids,) in enumerate(dl):
data_TCO_input_ = data_TCO_input[batch_ids]
df_ = data_TCO_input_.infos
TCO_input_ = data_TCO_input_.poses
@@ -394,7 +409,7 @@ def forward_refiner(
K_ = observation.K[batch_im_ids_]
if torch.cuda.is_available():
timer_ = CudaTimer(enabled=cuda_timer)
- else:
+ else:
timer_ = SimpleTimer()
timer_.start()
outputs_ = self.refiner_model(
@@ -442,7 +457,8 @@ def forward_refiner(
}
logger.debug(
- f"Pose prediction on {B} poses (n_iterations={n_iterations}):" f" {timer.stop()}"
+ f"Pose prediction on {B} poses (n_iterations={n_iterations}):"
+ f" {timer.stop()}",
)
- return preds, extra_data
\ No newline at end of file
+ return preds, extra_data
diff --git a/happypose/pose_estimators/cosypose/cosypose/integrated/pose_predictor.py b/happypose/pose_estimators/cosypose/cosypose/integrated/pose_predictor.py
index b9fc4978..3dc06210 100644
--- a/happypose/pose_estimators/cosypose/cosypose/integrated/pose_predictor.py
+++ b/happypose/pose_estimators/cosypose/cosypose/integrated/pose_predictor.py
@@ -1,25 +1,23 @@
-from typing import Tuple
-import torch
-
from collections import defaultdict
-from torch.utils.data import TensorDataset, DataLoader
-from happypose.pose_estimators.cosypose.cosypose.lib3d.cosypose_ops import TCO_init_from_boxes, TCO_init_from_boxes_zup_autodepth
-import cosypose.utils.tensor_collection as tc
+import torch
+from torch.utils.data import DataLoader, TensorDataset
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
+from happypose.pose_estimators.cosypose.cosypose.lib3d.cosypose_ops import (
+ TCO_init_from_boxes,
+ TCO_init_from_boxes_zup_autodepth,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
from happypose.pose_estimators.cosypose.cosypose.utils.timer import Timer
-
from happypose.toolbox.inference.pose_estimator import PoseEstimationModule
from happypose.toolbox.inference.types import PoseEstimatesType
+
logger = get_logger(__name__)
class CoarseRefinePosePredictor(PoseEstimationModule):
- def __init__(self,
- coarse_model=None,
- refiner_model=None,
- bsz_objects=64):
+ def __init__(self, coarse_model=None, refiner_model=None, bsz_objects=64):
super().__init__()
self.coarse_model = coarse_model
self.refiner_model = refiner_model
@@ -37,83 +35,124 @@ def batched_model_predictions(self, model, images, K, obj_data, n_iterations=1):
dl = DataLoader(ds, batch_size=self.bsz_objects)
preds = defaultdict(list)
- for (batch_ids, ) in dl:
+ for (batch_ids,) in dl:
timer.resume()
obj_inputs = obj_data[batch_ids.numpy()]
- labels = obj_inputs.infos['label'].values
- im_ids = obj_inputs.infos['batch_im_id'].values
+ labels = obj_inputs.infos["label"].values
+ im_ids = obj_inputs.infos["batch_im_id"].values
images_ = images[im_ids]
K_ = K[im_ids]
TCO_input = obj_inputs.poses
- outputs = model(images=images_, K=K_, TCO=TCO_input,
- n_iterations=n_iterations, labels=labels)
+ outputs = model(
+ images=images_,
+ K=K_,
+ TCO=TCO_input,
+ n_iterations=n_iterations,
+ labels=labels,
+ )
timer.pause()
- for n in range(1, n_iterations+1):
- iter_outputs = outputs[f'iteration={n}']
+ for n in range(1, n_iterations + 1):
+ iter_outputs = outputs[f"iteration={n}"]
infos = obj_inputs.infos
- batch_preds = tc.PandasTensorCollection(infos,
- poses=iter_outputs['TCO_output'],
- poses_input=iter_outputs['TCO_input'],
- K_crop=iter_outputs['K_crop'],
- boxes_rend=iter_outputs['boxes_rend'],
- boxes_crop=iter_outputs['boxes_crop'])
- preds[f'iteration={n}'].append(batch_preds)
-
- logger.debug(f'Pose prediction on {len(obj_data)} detections (n_iterations={n_iterations}): {timer.stop()}')
+ batch_preds = tc.PandasTensorCollection(
+ infos,
+ poses=iter_outputs["TCO_output"],
+ poses_input=iter_outputs["TCO_input"],
+ K_crop=iter_outputs["K_crop"],
+ boxes_rend=iter_outputs["boxes_rend"],
+ boxes_crop=iter_outputs["boxes_crop"],
+ )
+ preds[f"iteration={n}"].append(batch_preds)
+
+ logger.debug(
+ f"Pose prediction on {len(obj_data)} detections "
+ f"(n_iterations={n_iterations}): {timer.stop()}",
+ )
preds = dict(preds)
for k, v in preds.items():
preds[k] = tc.concatenate(v)
return preds
def make_TCO_init(self, detections, K):
- K = K[detections.infos['batch_im_id'].values]
+ K = K[detections.infos["batch_im_id"].values]
boxes = detections.bboxes
- if self.coarse_model.cfg.init_method == 'z-up+auto-depth':
- meshes = self.coarse_model.mesh_db.select(detections.infos['label'])
+ if self.coarse_model.cfg.init_method == "z-up+auto-depth":
+ meshes = self.coarse_model.mesh_db.select(detections.infos["label"])
points_3d = meshes.sample_points(2000, deterministic=True)
TCO_init = TCO_init_from_boxes_zup_autodepth(boxes, points_3d, K)
else:
TCO_init = TCO_init_from_boxes(z_range=(1.0, 1.0), boxes=boxes, K=K)
return tc.PandasTensorCollection(infos=detections.infos, poses=TCO_init)
- def run_inference_pipeline(self, images, K,
- detections=None,
- data_TCO_init=None,
- n_coarse_iterations=1,
- n_refiner_iterations=1):
-
- preds = dict()
+ def run_inference_pipeline(
+ self,
+ images,
+ K,
+ detections=None,
+ data_TCO_init=None,
+ n_coarse_iterations=1,
+ n_refiner_iterations=1,
+ ):
+ preds = {}
if data_TCO_init is None:
assert detections is not None
assert self.coarse_model is not None
assert n_coarse_iterations > 0
data_TCO_init = self.make_TCO_init(detections, K)
- coarse_preds = self.forward_coarse_model(images, K, data_TCO_init,
- n_coarse_iterations=n_coarse_iterations)
+ coarse_preds = self.forward_coarse_model(
+ images,
+ K,
+ data_TCO_init,
+ n_coarse_iterations=n_coarse_iterations,
+ )
for n in range(1, n_coarse_iterations + 1):
- preds[f'coarse/iteration={n}'] = coarse_preds[f'iteration={n}']
- data_TCO = coarse_preds[f'iteration={n_coarse_iterations}']
+ preds[f"coarse/iteration={n}"] = coarse_preds[f"iteration={n}"]
+ data_TCO = coarse_preds[f"iteration={n_coarse_iterations}"]
else:
assert n_coarse_iterations == 0
data_TCO = data_TCO_init
- preds[f'external_coarse'] = data_TCO
+ preds["external_coarse"] = data_TCO
if n_refiner_iterations >= 1:
assert self.refiner_model is not None
- refiner_preds = self.forward_refiner(images, K, data_TCO,
- n_refiner_iterations=n_refiner_iterations)
+ refiner_preds = self.forward_refiner(
+ images,
+ K,
+ data_TCO,
+ n_refiner_iterations=n_refiner_iterations,
+ )
for n in range(1, n_refiner_iterations + 1):
- preds[f'refiner/iteration={n}'] = refiner_preds[f'iteration={n}']
- data_TCO = refiner_preds[f'iteration={n_refiner_iterations}']
+ preds[f"refiner/iteration={n}"] = refiner_preds[f"iteration={n}"]
+ data_TCO = refiner_preds[f"iteration={n_refiner_iterations}"]
return data_TCO, preds
- def forward_coarse_model(self, images, K, data_TCO_init, n_coarse_iterations) -> Tuple[PoseEstimatesType, dict]:
- return self.batched_model_predictions(self.coarse_model,
- images, K, data_TCO_init,
- n_iterations=n_coarse_iterations)
-
- def forward_refiner(self, images, K, data_TCO, n_refiner_iterations) -> Tuple[dict, dict]:
- return self.batched_model_predictions(self.refiner_model,
- images, K, data_TCO,
- n_iterations=n_refiner_iterations)
\ No newline at end of file
+ def forward_coarse_model(
+ self,
+ images,
+ K,
+ data_TCO_init,
+ n_coarse_iterations,
+ ) -> tuple[PoseEstimatesType, dict]:
+ return self.batched_model_predictions(
+ self.coarse_model,
+ images,
+ K,
+ data_TCO_init,
+ n_iterations=n_coarse_iterations,
+ )
+
+ def forward_refiner(
+ self,
+ images,
+ K,
+ data_TCO,
+ n_refiner_iterations,
+ ) -> tuple[dict, dict]:
+ return self.batched_model_predictions(
+ self.refiner_model,
+ images,
+ K,
+ data_TCO,
+ n_iterations=n_refiner_iterations,
+ )
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/__init__.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/__init__.py
index 2dc898a6..e69de29b 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/__init__.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/__init__.py
@@ -1,3 +0,0 @@
-from .transform import Transform, parse_pose_args
-from .rotations import compute_rotation_matrix_from_ortho6d
-from .transform_ops import transform_pts, invert_T
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/camera_geometry.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/camera_geometry.py
index 27360aa8..3decbb23 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/camera_geometry.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/camera_geometry.py
@@ -8,7 +8,10 @@ def project_points(points_3d, K, TCO):
n_points = points_3d.shape[1]
device = points_3d.device
if points_3d.shape[-1] == 3:
- points_3d = torch.cat((points_3d, torch.ones(batch_size, n_points, 1).to(device)), dim=-1)
+ points_3d = torch.cat(
+ (points_3d, torch.ones(batch_size, n_points, 1).to(device)),
+ dim=-1,
+ )
P = K @ TCO[:, :3]
suv = (P.unsqueeze(1) @ points_3d.unsqueeze(-1)).squeeze(-1)
suv = suv / suv[..., [-1]]
@@ -22,7 +25,10 @@ def project_points_robust(points_3d, K, TCO, z_min=0.1):
n_points = points_3d.shape[1]
device = points_3d.device
if points_3d.shape[-1] == 3:
- points_3d = torch.cat((points_3d, torch.ones(batch_size, n_points, 1).to(device)), dim=-1)
+ points_3d = torch.cat(
+ (points_3d, torch.ones(batch_size, n_points, 1).to(device)),
+ dim=-1,
+ )
P = K @ TCO[:, :3]
suv = (P.unsqueeze(1) @ points_3d.unsqueeze(-1)).squeeze(-1)
z = suv[..., -1]
@@ -43,12 +49,11 @@ def boxes_from_uv(uv):
def get_K_crop_resize(K, boxes, orig_size, crop_resize):
- """
- Adapted from https://github.com/BerkeleyAutomation/perception/blob/master/perception/camera_intrinsics.py
- Skew is not handled !
+ """Adapted from https://github.com/BerkeleyAutomation/perception/blob/master/perception/camera_intrinsics.py
+ Skew is not handled !.
"""
assert K.shape[1:] == (3, 3)
- assert boxes.shape[1:] == (4, )
+ assert boxes.shape[1:] == (4,)
K = K.float()
boxes = boxes.float()
new_K = K.clone()
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/cosypose_ops.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/cosypose_ops.py
index dddf5497..eaa08db4 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/cosypose_ops.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/cosypose_ops.py
@@ -1,10 +1,18 @@
import torch
-from .rotations import compute_rotation_matrix_from_ortho6d, compute_rotation_matrix_from_quaternions
+from .rotations import (
+ compute_rotation_matrix_from_ortho6d,
+ compute_rotation_matrix_from_quaternions,
+)
from .transform_ops import transform_pts
-l1 = lambda diff: diff.abs()
-l2 = lambda diff: diff ** 2
+
+def l1(diff):
+ return diff.abs()
+
+
+def l2(diff):
+ return diff**2
def apply_imagespace_predictions(TCO, K, vxvyvz, dRCO):
@@ -23,7 +31,10 @@ def apply_imagespace_predictions(TCO, K, vxvyvz, dRCO):
fxfy = K[:, [0, 1], [0, 1]]
xsrcysrc = TCO[:, :2, 3]
TCO_out[:, 2, 3] = ztgt.flatten()
- TCO_out[:, :2, 3] = ((vxvy / fxfy) + (xsrcysrc / zsrc.repeat(1, 2))) * ztgt.repeat(1, 2)
+ TCO_out[:, :2, 3] = ((vxvy / fxfy) + (xsrcysrc / zsrc.repeat(1, 2))) * ztgt.repeat(
+ 1,
+ 2,
+ )
# Rotation in camera frame
# TC1' = TC2' @ T2'1' where TC2' = T22' = dCRO is predicted and T2'1'=T21=TC1
@@ -40,15 +51,21 @@ def loss_CO_symmetric(TCO_possible_gt, TCO_pred, points, l1_or_l2=l1):
TCO_points_possible_gt = transform_pts(TCO_possible_gt, points)
TCO_pred_points = transform_pts(TCO_pred, points)
- losses_possible = l1_or_l2((TCO_pred_points.unsqueeze(1) - TCO_points_possible_gt).flatten(-2, -1)).mean(-1)
+ losses_possible = l1_or_l2(
+ (TCO_pred_points.unsqueeze(1) - TCO_points_possible_gt).flatten(-2, -1),
+ ).mean(-1)
loss, min_id = losses_possible.min(dim=1)
TCO_assign = TCO_possible_gt[torch.arange(bsz), min_id]
return loss, TCO_assign
-def loss_refiner_CO_disentangled(TCO_possible_gt,
- TCO_input, refiner_outputs,
- K_crop, points):
+def loss_refiner_CO_disentangled(
+ TCO_possible_gt,
+ TCO_input,
+ refiner_outputs,
+ K_crop,
+ points,
+):
bsz = TCO_possible_gt.shape[0]
assert TCO_possible_gt.shape[0] == bsz
assert TCO_input.shape[0] == bsz
@@ -70,7 +87,9 @@ def loss_refiner_CO_disentangled(TCO_possible_gt,
vxvy = vxvyvz[:, :2]
fxfy = K_crop[:, [0, 1], [0, 1]]
xsrcysrc = TCO_input[:, :2, 3]
- TCO_pred_xy[:, :2, 3] = ((vxvy / fxfy) + (xsrcysrc / z_input.repeat(1, 2))) * z_gt.repeat(1, 2)
+ TCO_pred_xy[:, :2, 3] = (
+ (vxvy / fxfy) + (xsrcysrc / z_input.repeat(1, 2))
+ ) * z_gt.repeat(1, 2)
TCO_pred_z = TCO_gt.clone()
vz = vxvyvz[:, [2]]
@@ -82,9 +101,13 @@ def loss_refiner_CO_disentangled(TCO_possible_gt,
return loss_orn + loss_xy + loss_z
-def loss_refiner_CO_disentangled_quaternions(TCO_possible_gt,
- TCO_input, refiner_outputs,
- K_crop, points):
+def loss_refiner_CO_disentangled_quaternions(
+ TCO_possible_gt,
+ TCO_input,
+ refiner_outputs,
+ K_crop,
+ points,
+):
bsz = TCO_possible_gt.shape[0]
assert TCO_possible_gt.shape[0] == bsz
assert TCO_input.shape[0] == bsz
@@ -106,7 +129,9 @@ def loss_refiner_CO_disentangled_quaternions(TCO_possible_gt,
vxvy = vxvyvz[:, :2]
fxfy = K_crop[:, [0, 1], [0, 1]]
xsrcysrc = TCO_input[:, :2, 3]
- TCO_pred_xy[:, :2, 3] = ((vxvy / fxfy) + (xsrcysrc / z_input.repeat(1, 2))) * z_gt.repeat(1, 2)
+ TCO_pred_xy[:, :2, 3] = (
+ (vxvy / fxfy) + (xsrcysrc / z_input.repeat(1, 2))
+ ) * z_gt.repeat(1, 2)
TCO_pred_z = TCO_gt.clone()
vz = vxvyvz[:, [2]]
@@ -125,7 +150,15 @@ def TCO_init_from_boxes(z_range, boxes, K):
assert boxes.dim() == 2
bsz = boxes.shape[0]
uv_centers = (boxes[:, [0, 1]] + boxes[:, [2, 3]]) / 2
- z = torch.as_tensor(z_range).mean().unsqueeze(0).unsqueeze(0).repeat(bsz, 1).to(boxes.device).to(boxes.dtype)
+ z = (
+ torch.as_tensor(z_range)
+ .mean()
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(bsz, 1)
+ .to(boxes.device)
+ .to(boxes.dtype)
+ )
fxfy = K[:, [0, 1], [0, 1]]
cxcy = K[:, [0, 1], [2, 2]]
xy_init = ((uv_centers - cxcy) * z) / fxfy
@@ -143,19 +176,30 @@ def TCO_init_from_boxes_zup_autodepth(boxes_2d, model_points_3d, K):
z_guess = 1.0
fxfy = K[:, [0, 1], [0, 1]]
cxcy = K[:, [0, 1], [2, 2]]
- TCO = torch.tensor([
- [0, 1, 0, 0],
- [0, 0, -1, 0],
- [-1, 0, 0, z_guess],
- [0, 0, 0, 1]
- ]).to(torch.float).to(boxes_2d.device).repeat(bsz, 1, 1)
+ TCO = (
+ torch.tensor(
+ [
+ [0, 1, 0, 0],
+ [0, 0, -1, 0],
+ [-1, 0, 0, z_guess],
+ [0, 0, 0, 1],
+ ],
+ )
+ .to(torch.float)
+ .to(boxes_2d.device)
+ .repeat(bsz, 1, 1)
+ )
bb_xy_centers = (boxes_2d[:, [0, 1]] + boxes_2d[:, [2, 3]]) / 2
xy_init = ((bb_xy_centers - cxcy) * z_guess) / fxfy
TCO[:, :2, 3] = xy_init
C_pts_3d = transform_pts(TCO, model_points_3d)
- deltax_3d = C_pts_3d[:, :, 0].max(dim=1).values - C_pts_3d[:, :, 0].min(dim=1).values
- deltay_3d = C_pts_3d[:, :, 1].max(dim=1).values - C_pts_3d[:, :, 1].min(dim=1).values
+ deltax_3d = (
+ C_pts_3d[:, :, 0].max(dim=1).values - C_pts_3d[:, :, 0].min(dim=1).values
+ )
+ deltay_3d = (
+ C_pts_3d[:, :, 1].max(dim=1).values - C_pts_3d[:, :, 1].min(dim=1).values
+ )
bb_deltax = (boxes_2d[:, 2] - boxes_2d[:, 0]) + 1
bb_deltay = (boxes_2d[:, 3] - boxes_2d[:, 1]) + 1
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/cropping.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/cropping.py
index 1267e3be..2cf5ee9e 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/cropping.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/cropping.py
@@ -1,13 +1,19 @@
import torch
import torchvision
-from .camera_geometry import project_points, boxes_from_uv, project_points_robust
+from .camera_geometry import boxes_from_uv, project_points, project_points_robust
-def deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, lamb=1.4, im_size=(240, 320), clamp=False):
- """
- gt_boxes: N x 4
- crop_boxes: N x 4
+def deepim_boxes(
+ rend_center_uv,
+ obs_boxes,
+ rend_boxes,
+ lamb=1.4,
+ im_size=(240, 320),
+ clamp=False,
+):
+ """gt_boxes: N x 4
+ crop_boxes: N x 4.
"""
lobs, robs, uobs, dobs = obs_boxes[:, [0, 2, 1, 3]].t()
lrend, rrend, urend, drend = rend_boxes[:, [0, 2, 1, 3]].t()
@@ -24,22 +30,24 @@ def deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, lamb=1.4, im_size=(240,
r = w / h
xdists = torch.cat(
- ((lobs - xc).abs(), (lrend - xc).abs(),
- (robs - xc).abs(), (rrend - xc).abs()),
- dim=1)
+ ((lobs - xc).abs(), (lrend - xc).abs(), (robs - xc).abs(), (rrend - xc).abs()),
+ dim=1,
+ )
ydists = torch.cat(
- ((uobs - yc).abs(), (urend - yc).abs(),
- (dobs - yc).abs(), (drend - yc).abs()),
- dim=1)
+ ((uobs - yc).abs(), (urend - yc).abs(), (dobs - yc).abs(), (drend - yc).abs()),
+ dim=1,
+ )
xdist = xdists.max(dim=1)[0]
ydist = ydists.max(dim=1)[0]
width = torch.max(xdist, ydist * r) * 2 * lamb
height = torch.max(xdist / r, ydist) * 2 * lamb
xc, yc = xc.squeeze(-1), yc.squeeze(-1)
- x1, y1, x2, y2 = xc - width/2, yc - height / 2, xc + width / 2, yc + height / 2
+ x1, y1, x2, y2 = xc - width / 2, yc - height / 2, xc + width / 2, yc + height / 2
boxes = torch.cat(
- (x1.unsqueeze(1), y1.unsqueeze(1), x2.unsqueeze(1), y2.unsqueeze(1)), dim=1)
+ (x1.unsqueeze(1), y1.unsqueeze(1), x2.unsqueeze(1), y2.unsqueeze(1)),
+ dim=1,
+ )
assert not clamp
if clamp:
boxes[:, [0, 2]] = torch.clamp(boxes[:, [0, 2]], 0, w - 1)
@@ -47,29 +55,81 @@ def deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, lamb=1.4, im_size=(240,
return boxes
-def deepim_crops(images, obs_boxes, K, TCO_pred, O_vertices, output_size=None, lamb=1.4):
+def deepim_crops(
+ images,
+ obs_boxes,
+ K,
+ TCO_pred,
+ O_vertices,
+ output_size=None,
+ lamb=1.4,
+):
batch_size, _, h, w = images.shape
device = images.device
if output_size is None:
output_size = (h, w)
uv = project_points(O_vertices, K, TCO_pred)
rend_boxes = boxes_from_uv(uv)
- rend_center_uv = project_points(torch.zeros(batch_size, 1, 3).to(device), K, TCO_pred)
- boxes = deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, im_size=(h, w), lamb=lamb)
- boxes = torch.cat((torch.arange(batch_size).unsqueeze(1).to(device).float(), boxes), dim=1)
- crops = torchvision.ops.roi_align(images, boxes, output_size=output_size, sampling_ratio=4)
+ rend_center_uv = project_points(
+ torch.zeros(batch_size, 1, 3).to(device),
+ K,
+ TCO_pred,
+ )
+ boxes = deepim_boxes(
+ rend_center_uv,
+ obs_boxes,
+ rend_boxes,
+ im_size=(h, w),
+ lamb=lamb,
+ )
+ boxes = torch.cat(
+ (torch.arange(batch_size).unsqueeze(1).to(device).float(), boxes),
+ dim=1,
+ )
+ crops = torchvision.ops.roi_align(
+ images,
+ boxes,
+ output_size=output_size,
+ sampling_ratio=4,
+ )
return boxes[:, 1:], crops
-def deepim_crops_robust(images, obs_boxes, K, TCO_pred, O_vertices, output_size=None, lamb=1.4):
+def deepim_crops_robust(
+ images,
+ obs_boxes,
+ K,
+ TCO_pred,
+ O_vertices,
+ output_size=None,
+ lamb=1.4,
+):
batch_size, _, h, w = images.shape
device = images.device
if output_size is None:
output_size = (h, w)
uv = project_points_robust(O_vertices, K, TCO_pred)
rend_boxes = boxes_from_uv(uv)
- rend_center_uv = project_points_robust(torch.zeros(batch_size, 1, 3).to(device), K, TCO_pred)
- boxes = deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, im_size=(h, w), lamb=lamb)
- boxes = torch.cat((torch.arange(batch_size).unsqueeze(1).to(device).float(), boxes), dim=1)
- crops = torchvision.ops.roi_align(images, boxes, output_size=output_size, sampling_ratio=4)
+ rend_center_uv = project_points_robust(
+ torch.zeros(batch_size, 1, 3).to(device),
+ K,
+ TCO_pred,
+ )
+ boxes = deepim_boxes(
+ rend_center_uv,
+ obs_boxes,
+ rend_boxes,
+ im_size=(h, w),
+ lamb=lamb,
+ )
+ boxes = torch.cat(
+ (torch.arange(batch_size).unsqueeze(1).to(device).float(), boxes),
+ dim=1,
+ )
+ crops = torchvision.ops.roi_align(
+ images,
+ boxes,
+ output_size=output_size,
+ sampling_ratio=4,
+ )
return boxes[:, 1:], crops
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/distances.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/distances.py
index 9eb320c8..de755e18 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/distances.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/distances.py
@@ -1,5 +1,8 @@
import torch
-from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import transform_pts
+
+from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import (
+ transform_pts,
+)
def dists_add(TXO_pred, TXO_gt, points):
@@ -13,7 +16,7 @@ def dists_add_symmetric(TXO_pred, TXO_gt, points):
TXO_pred_points = transform_pts(TXO_pred, points)
TXO_gt_points = transform_pts(TXO_gt, points)
dists = TXO_gt_points.unsqueeze(1) - TXO_pred_points.unsqueeze(2)
- dists_norm_squared = (dists ** 2).sum(dim=-1)
+ dists_norm_squared = (dists**2).sum(dim=-1)
assign = dists_norm_squared.argmin(dim=1)
ids_row = torch.arange(dists.shape[0]).unsqueeze(1).repeat(1, dists.shape[1])
ids_col = torch.arange(dists.shape[1]).unsqueeze(0).repeat(dists.shape[0], 1)
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/mesh_losses.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/mesh_losses.py
index 564ba082..3840a06c 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/mesh_losses.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/mesh_losses.py
@@ -1,4 +1,5 @@
import torch
+
from .transform_ops import transform_pts
@@ -22,5 +23,9 @@ def compute_ADD_L1_loss(TCO_gt, TCO_pred, points):
bsz = len(TCO_gt)
assert TCO_pred.shape == (bsz, 4, 4) and TCO_gt.shape == (bsz, 4, 4)
assert points.dim() == 3 and points.shape[-1] == 3
- dists = (transform_pts(TCO_gt, points) - transform_pts(TCO_pred, points)).abs().mean(dim=(-1, -2))
+ dists = (
+ (transform_pts(TCO_gt, points) - transform_pts(TCO_pred, points))
+ .abs()
+ .mean(dim=(-1, -2))
+ )
return dists
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/mesh_ops.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/mesh_ops.py
index b22503c4..050a4a61 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/mesh_ops.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/mesh_ops.py
@@ -1,5 +1,5 @@
-import torch
import numpy as np
+import torch
def get_meshes_center(pts):
@@ -13,9 +13,18 @@ def get_meshes_center(pts):
def get_meshes_bounding_boxes(pts):
- xmin, xmax = pts[..., 0].min(dim=-1, keepdim=True).values, pts[..., 0].max(dim=-1, keepdim=True).values
- ymin, ymax = pts[..., 1].min(dim=-1, keepdim=True).values, pts[..., 1].max(dim=-1, keepdim=True).values
- zmin, zmax = pts[..., 2].min(dim=-1, keepdim=True).values, pts[..., 2].max(dim=-1, keepdim=True).values
+ xmin, xmax = (
+ pts[..., 0].min(dim=-1, keepdim=True).values,
+ pts[..., 0].max(dim=-1, keepdim=True).values,
+ )
+ ymin, ymax = (
+ pts[..., 1].min(dim=-1, keepdim=True).values,
+ pts[..., 1].max(dim=-1, keepdim=True).values,
+ )
+ zmin, zmax = (
+ pts[..., 2].min(dim=-1, keepdim=True).values,
+ pts[..., 2].max(dim=-1, keepdim=True).values,
+ )
v0 = torch.cat((xmin, ymax, zmax), dim=-1).unsqueeze(1)
v1 = torch.cat((xmax, ymax, zmax), dim=-1).unsqueeze(1)
v2 = torch.cat((xmax, ymin, zmax), dim=-1).unsqueeze(1)
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/rigid_mesh_database.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/rigid_mesh_database.py
index fed286dc..db8ac1b3 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/rigid_mesh_database.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/rigid_mesh_database.py
@@ -1,17 +1,23 @@
+from copy import deepcopy
+
import numpy as np
-import trimesh
import torch
-from copy import deepcopy
+import trimesh
+
+from happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection import (
+ TensorCollection,
+)
from .mesh_ops import get_meshes_bounding_boxes, sample_points
from .symmetries import make_bop_symmetries
-from happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection import TensorCollection
class MeshDataBase:
def __init__(self, obj_list):
- self.infos = {obj['label']: obj for obj in obj_list}
- self.meshes = {l: trimesh.load(obj['mesh_path']) for l, obj in self.infos.items()}
+ self.infos = {obj["label"]: obj for obj in obj_list}
+ self.meshes = {
+ m: trimesh.load(obj["mesh_path"]) for m, obj in self.infos.items()
+ }
@staticmethod
def from_object_ds(object_ds):
@@ -26,33 +32,49 @@ def batched(self, aabb=False, resample_n_points=None, n_sym=64):
new_infos = deepcopy(self.infos)
for label, mesh in self.meshes.items():
if aabb:
- points_n = get_meshes_bounding_boxes(torch.as_tensor(mesh.vertices).unsqueeze(0))[0]
+ points_n = get_meshes_bounding_boxes(
+ torch.as_tensor(mesh.vertices).unsqueeze(0),
+ )[0]
elif resample_n_points:
- points_n = torch.tensor(trimesh.sample.sample_surface(mesh, resample_n_points)[0])
+ points_n = torch.tensor(
+ trimesh.sample.sample_surface(mesh, resample_n_points)[0],
+ )
else:
points_n = torch.tensor(mesh.vertices)
points_n = points_n.clone()
infos = self.infos[label]
- if infos['mesh_units'] == 'mm':
+ if infos["mesh_units"] == "mm":
scale = 0.001
- elif infos['mesh_units'] == 'm':
+ elif infos["mesh_units"] == "m":
scale = 1.0
else:
- raise ValueError('Unit not supported', infos['mesh_units'])
+ msg = "Unit not supported"
+ raise ValueError(msg, infos["mesh_units"])
points_n *= scale
- dict_symmetries = {k: infos.get(k, []) for k in ('symmetries_discrete', 'symmetries_continuous')}
- symmetries_n = make_bop_symmetries(dict_symmetries, n_symmetries_continuous=n_sym, scale=scale)
-
- new_infos[label]['n_points'] = points_n.shape[0]
- new_infos[label]['n_sym'] = symmetries_n.shape[0]
+ dict_symmetries = {
+ k: infos.get(k, [])
+ for k in ("symmetries_discrete", "symmetries_continuous")
+ }
+ symmetries_n = make_bop_symmetries(
+ dict_symmetries,
+ n_symmetries_continuous=n_sym,
+ scale=scale,
+ )
+
+ new_infos[label]["n_points"] = points_n.shape[0]
+ new_infos[label]["n_sym"] = symmetries_n.shape[0]
symmetries.append(torch.as_tensor(symmetries_n))
points.append(torch.as_tensor(points_n))
labels.append(label)
labels = np.array(labels)
- points = pad_stack_tensors(points, fill='select_random', deterministic=True)
- symmetries = pad_stack_tensors(symmetries, fill=torch.eye(4), deterministic=True)
+ points = pad_stack_tensors(points, fill="select_random", deterministic=True)
+ symmetries = pad_stack_tensors(
+ symmetries,
+ fill=torch.eye(4),
+ deterministic=True,
+ )
return BatchedMeshes(new_infos, labels, points, symmetries).float()
@@ -62,17 +84,17 @@ def __init__(self, infos, labels, points, symmetries):
self.infos = infos
self.label_to_id = {label: n for n, label in enumerate(labels)}
self.labels = np.asarray(labels)
- self.register_tensor('points', points)
- self.register_tensor('symmetries', symmetries)
+ self.register_tensor("points", points)
+ self.register_tensor("symmetries", symmetries)
@property
def n_sym_mapping(self):
- return {label: obj['n_sym'] for label, obj in self.infos.items()}
+ return {label: obj["n_sym"] for label, obj in self.infos.items()}
def select(self, labels):
- ids = [self.label_to_id[l] for l in labels]
+ ids = [self.label_to_id[label] for label in labels]
return Meshes(
- infos=[self.infos[l] for l in labels],
+ infos=[self.infos[label] for label in labels],
labels=self.labels[ids],
points=self.points[ids],
symmetries=self.symmetries[ids],
@@ -84,8 +106,8 @@ def __init__(self, infos, labels, points, symmetries):
super().__init__()
self.infos = infos
self.labels = np.asarray(labels)
- self.register_tensor('points', points)
- self.register_tensor('symmetries', symmetries)
+ self.register_tensor("points", points)
+ self.register_tensor("symmetries", symmetries)
def select_labels(self, labels):
raise NotImplementedError
@@ -94,7 +116,7 @@ def sample_points(self, n_points, deterministic=False):
return sample_points(self.points, n_points, deterministic=deterministic)
-def pad_stack_tensors(tensor_list, fill='select_random', deterministic=True):
+def pad_stack_tensors(tensor_list, fill="select_random", deterministic=True):
n_max = max([t.shape[0] for t in tensor_list])
if deterministic:
np_random = np.random.RandomState(0)
@@ -108,9 +130,14 @@ def pad_stack_tensors(tensor_list, fill='select_random', deterministic=True):
if isinstance(fill, torch.Tensor):
assert isinstance(fill, torch.Tensor)
assert fill.shape == tensor_n.shape[1:]
- pad = fill.unsqueeze(0).repeat(n_pad, *[1 for _ in fill.shape]).to(tensor_n.device).to(tensor_n.dtype)
+ pad = (
+ fill.unsqueeze(0)
+ .repeat(n_pad, *[1 for _ in fill.shape])
+ .to(tensor_n.device)
+ .to(tensor_n.dtype)
+ )
else:
- assert fill == 'select_random'
+ assert fill == "select_random"
ids_pad = np_random.choice(np.arange(len(tensor_n)), size=n_pad)
pad = tensor_n[ids_pad]
tensor_n_padded = torch.cat((tensor_n, pad), dim=0)
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/rotations.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/rotations.py
index e8705f91..ffc743fc 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/rotations.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/rotations.py
@@ -1,14 +1,13 @@
-import torch
import numpy as np
+import torch
import transforms3d
def compute_rotation_matrix_from_ortho6d(poses):
- """
- Code from https://github.com/papagina/RotationContinuity
+ """Code from https://github.com/papagina/RotationContinuity
On the Continuity of Rotation Representations in Neural Networks
Zhou et al. CVPR19
- https://zhouyisjtu.github.io/project_rotation/rotation.html
+ https://zhouyisjtu.github.io/project_rotation/rotation.html.
"""
assert poses.shape[-1] == 6
x_raw = poses[..., 0:3]
@@ -21,10 +20,9 @@ def compute_rotation_matrix_from_ortho6d(poses):
return matrix
-def euler2quat(xyz, axes='sxyz'):
- """
- euler: sxyz
- quaternion: xyzw
+def euler2quat(xyz, axes="sxyz"):
+ """euler: sxyz
+ quaternion: xyzw.
"""
wxyz = transforms3d.euler.euler2quat(*xyz, axes=axes)
xyzw = [*wxyz[1:], wxyz[0]]
@@ -32,12 +30,14 @@ def euler2quat(xyz, axes='sxyz'):
def angle_axis_to_rotation_matrix(angle_axis):
- """Convert 3d vector of axis-angle rotation to 4x4 rotation matrix
+ """Convert 3d vector of axis-angle rotation to 4x4 rotation matrix.
Args:
+ ----
angle_axis (Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
+ -------
Tensor: tensor of 4x4 rotation matrices.
Shape:
@@ -45,9 +45,11 @@ def angle_axis_to_rotation_matrix(angle_axis):
- Output: :math:`(N, 4, 4)`
Example:
+ -------
>>> input = torch.rand(1, 3) # Nx3
>>> output = tgm.angle_axis_to_rotation_matrix(input) # Nx4x4
"""
+
def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
# We want to be careful to only evaluate the square root if the
# norm of the angle_axis vector is greater than zero. Otherwise
@@ -69,14 +71,18 @@ def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
rotation_matrix = torch.cat(
- [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
+ [r00, r01, r02, r10, r11, r12, r20, r21, r22],
+ dim=1,
+ )
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
rotation_matrix = torch.cat(
- [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
+ [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one],
+ dim=1,
+ )
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
@@ -100,8 +106,9 @@ def _compute_rotation_matrix_taylor(angle_axis):
rotation_matrix = torch.eye(4).to(angle_axis.device).type_as(angle_axis)
rotation_matrix = rotation_matrix.view(1, 4, 4).repeat(batch_size, 1, 1)
# fill output matrix with masked values
- rotation_matrix[..., :3, :3] = \
+ rotation_matrix[..., :3, :3] = (
mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor
+ )
return rotation_matrix # Nx4x4
@@ -111,9 +118,11 @@ def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
+ ----
quaternion (torch.Tensor): tensor with quaternions.
Return:
+ ------
torch.Tensor: tensor with angle axis of rotation.
Shape:
@@ -121,16 +130,17 @@ def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
- Output: :math:`(*, 3)`
Example:
+ -------
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
- raise TypeError("Input type is not a torch.Tensor. Got {}".format(
- type(quaternion)))
+ msg = f"Input type is not a torch.Tensor. Got {type(quaternion)}"
+ raise TypeError(msg)
if not quaternion.shape[-1] == 4:
- raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
- .format(quaternion.shape))
+ msg = f"Input must be a tensor of shape Nx4 or 4. Got {quaternion.shape}"
+ raise ValueError(msg)
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
@@ -142,7 +152,8 @@ def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
- torch.atan2(sin_theta, cos_theta))
+ torch.atan2(sin_theta, cos_theta),
+ )
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/symmetric_distances.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/symmetric_distances.py
index dec1d5f7..0240d4d3 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/symmetric_distances.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/symmetric_distances.py
@@ -1,8 +1,8 @@
-import torch
import cosypose_cext
+import torch
-from .transform_ops import transform_pts
from .camera_geometry import project_points
+from .transform_ops import transform_pts
def expand_ids_for_symmetry(labels, n_symmetries):
@@ -26,9 +26,7 @@ def symmetric_distance_batched(T1, T2, labels, mesh_db):
ids_expand, sym_ids = expand_ids_for_symmetry(labels, mesh_db.n_sym_mapping)
points_expand = meshes.points[ids_expand]
sym_expand = meshes.symmetries[ids_expand, sym_ids]
- dists = mesh_points_dist(T1[ids_expand] @ sym_expand,
- T2[ids_expand],
- points_expand)
+ dists = mesh_points_dist(T1[ids_expand] @ sym_expand, T2[ids_expand], points_expand)
min_ids = scatter_argmin(dists, ids_expand)
min_dists = dists[min_ids]
S12 = meshes.symmetries[torch.arange(len(min_ids)), sym_ids[min_ids]]
@@ -112,9 +110,12 @@ def symmetric_distance_reprojected(T1, T2, K, labels, mesh_db):
ids_expand, sym_ids = expand_ids_for_symmetry(labels, mesh_db.n_sym_mapping)
points_expand = meshes.points[ids_expand]
sym_expand = meshes.symmetries[ids_expand, sym_ids]
- dists = reprojected_dist(T1[ids_expand] @ sym_expand,
- T2[ids_expand], K[ids_expand],
- points_expand)
+ dists = reprojected_dist(
+ T1[ids_expand] @ sym_expand,
+ T2[ids_expand],
+ K[ids_expand],
+ points_expand,
+ )
min_ids = scatter_argmin(dists, ids_expand)
min_dists = dists[min_ids]
S12 = meshes.symmetries[torch.arange(len(min_ids)), sym_ids[min_ids]]
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/symmetries.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/symmetries.py
index ff3c84cf..a29460b9 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/symmetries.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/symmetries.py
@@ -1,13 +1,13 @@
import numpy as np
-from .transform import Transform
from .rotations import euler2quat
+from .transform import Transform
def make_bop_symmetries(dict_symmetries, n_symmetries_continuous=8, scale=0.001):
# Note: See https://github.com/thodan/bop_toolkit/blob/master/bop_toolkit_lib/misc.py
- sym_discrete = dict_symmetries.get('symmetries_discrete', [])
- sym_continous = dict_symmetries.get('symmetries_continuous', [])
+ sym_discrete = dict_symmetries.get("symmetries_discrete", [])
+ sym_continous = dict_symmetries.get("symmetries_continuous", [])
all_M_discrete = [Transform((0, 0, 0, 1), (0, 0, 0))]
all_M_continuous = []
all_M = []
@@ -17,8 +17,8 @@ def make_bop_symmetries(dict_symmetries, n_symmetries_continuous=8, scale=0.001)
M = Transform(M)
all_M_discrete.append(M)
for sym_n in sym_continous:
- assert np.allclose(sym_n['offset'], 0)
- axis = np.array(sym_n['axis'])
+ assert np.allclose(sym_n["offset"], 0)
+ axis = np.array(sym_n["axis"])
assert axis.sum() == 1
for n in range(n_symmetries_continuous):
euler = axis * 2 * np.pi * n / n_symmetries_continuous
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/transform.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/transform.py
index ab0d11d9..a49b84dd 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/transform.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/transform.py
@@ -1,6 +1,6 @@
import numpy as np
import pinocchio as pin
-import eigenpy
+
def parse_pose_args(pose_args):
if len(pose_args) == 2:
diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/transform_ops.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/transform_ops.py
index 83ee3c3f..75a5e0bb 100644
--- a/happypose/pose_estimators/cosypose/cosypose/lib3d/transform_ops.py
+++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/transform_ops.py
@@ -1,6 +1,7 @@
-import transforms3d
import numpy as np
import torch
+import transforms3d
+
from .rotations import compute_rotation_matrix_from_ortho6d
@@ -14,7 +15,8 @@ def transform_pts(T, pts):
elif T.dim() == 3:
assert T.shape == (bsz, 4, 4)
else:
- raise ValueError('Unsupported shape for T', T.shape)
+ msg = "Unsupported shape for T"
+ raise ValueError(msg, T.shape)
pts = pts.unsqueeze(-1)
T = T.unsqueeze(-3)
pts_transformed = T[..., :3, :3] @ pts + T[..., :3, [-1]]
@@ -25,7 +27,7 @@ def invert_T(T):
R = T[..., :3, :3]
t = T[..., :3, [-1]]
R_inv = R.transpose(-2, -1)
- t_inv = - R_inv @ t
+ t_inv = -R_inv @ t
T_inv = T.clone()
T_inv[..., :3, :3] = R_inv
T_inv[..., :3, [-1]] = t_inv
@@ -37,14 +39,26 @@ def add_noise(TCO, euler_deg_std=[15, 15, 15], trans_std=[0.01, 0.01, 0.05]):
device = TCO_out.device
bsz = TCO.shape[0]
euler_noise_deg = np.concatenate(
- [np.random.normal(loc=0, scale=euler_deg_std_i, size=bsz)[:, None]
- for euler_deg_std_i in euler_deg_std], axis=1)
+ [
+ np.random.normal(loc=0, scale=euler_deg_std_i, size=bsz)[:, None]
+ for euler_deg_std_i in euler_deg_std
+ ],
+ axis=1,
+ )
euler_noise_rad = euler_noise_deg * np.pi / 180
- R_noise = torch.tensor([transforms3d.euler.euler2mat(*xyz) for xyz in euler_noise_rad]).float().to(device)
+ R_noise = (
+ torch.tensor([transforms3d.euler.euler2mat(*xyz) for xyz in euler_noise_rad])
+ .float()
+ .to(device)
+ )
trans_noise = np.concatenate(
- [np.random.normal(loc=0, scale=trans_std_i, size=bsz)[:, None]
- for trans_std_i in trans_std], axis=1)
+ [
+ np.random.normal(loc=0, scale=trans_std_i, size=bsz)[:, None]
+ for trans_std_i in trans_std
+ ],
+ axis=1,
+ )
trans_noise = torch.tensor(trans_noise).float().to(device)
TCO_out[:, :3, :3] = TCO_out[:, :3, :3] @ R_noise
TCO_out[:, :3, 3] += trans_noise
diff --git a/happypose/pose_estimators/cosypose/cosypose/libmesh/__init__.py b/happypose/pose_estimators/cosypose/cosypose/libmesh/__init__.py
index 84eccf57..e69de29b 100644
--- a/happypose/pose_estimators/cosypose/cosypose/libmesh/__init__.py
+++ b/happypose/pose_estimators/cosypose/cosypose/libmesh/__init__.py
@@ -1,2 +0,0 @@
-from .meshlab_converter import ply_to_obj, downsample_obj
-from .urdf_utils import obj_to_urdf
diff --git a/happypose/pose_estimators/cosypose/cosypose/libmesh/meshlab_converter.py b/happypose/pose_estimators/cosypose/cosypose/libmesh/meshlab_converter.py
index 5befc1c2..a4442845 100644
--- a/happypose/pose_estimators/cosypose/cosypose/libmesh/meshlab_converter.py
+++ b/happypose/pose_estimators/cosypose/cosypose/libmesh/meshlab_converter.py
@@ -1,14 +1,17 @@
-from pathlib import Path
-import shutil
-from PIL import Image
-import PIL
import os
+import shutil
+from pathlib import Path
+
import numpy as np
+import PIL
+from PIL import Image
from plyfile import PlyData
def _get_template(template_name):
- template_path = Path(__file__).resolve().parent / 'meshlab_templates' / template_name
+ template_path = (
+ Path(__file__).resolve().parent / "meshlab_templates" / template_name
+ )
return template_path.read_text()
@@ -16,18 +19,28 @@ def run_meshlab_script(in_path, out_path, script, cd_dir=None, has_textures=True
in_path = Path(in_path)
out_path = Path(out_path)
n = np.random.randint(1e6)
- script_path = Path(f'/dev/shm/{n}.mlx')
+ script_path = Path(f"/dev/shm/{n}.mlx")
script_path.write_text(script)
if cd_dir is None:
- cd_dir = '.'
- command = [f'cd {cd_dir} &&', 'LC_ALL=C',
- 'meshlabserver', '-i', in_path.as_posix(), '-o', out_path.as_posix(),
- '-s', script_path.as_posix(), '-om', 'vn']
+ cd_dir = "."
+ command = [
+ f"cd {cd_dir} &&",
+ "LC_ALL=C",
+ "meshlabserver",
+ "-i",
+ in_path.as_posix(),
+ "-o",
+ out_path.as_posix(),
+ "-s",
+ script_path.as_posix(),
+ "-om",
+ "vn",
+ ]
if has_textures:
- command += ['wt', 'vt']
+ command += ["wt", "vt"]
print(command)
- os.system(' '.join(command))
+ os.system(" ".join(command))
script_path.unlink()
return
@@ -35,10 +48,10 @@ def run_meshlab_script(in_path, out_path, script, cd_dir=None, has_textures=True
def add_texture_to_mtl(obj_path):
# Sometimes meshlab forgets to puts the texture in the output mtl.
obj_path = Path(obj_path)
- texture_name = obj_path.with_suffix('').name + '_texture.png'
- mtl_path = obj_path.with_suffix('.obj.mtl')
+ texture_name = obj_path.with_suffix("").name + "_texture.png"
+ mtl_path = obj_path.with_suffix(".obj.mtl")
mtl = mtl_path.read_text()
- mtl += f'\nmap_Kd {texture_name}'
+ mtl += f"\nmap_Kd {texture_name}"
mtl_path.write_text(mtl)
return
@@ -54,21 +67,24 @@ def ply_to_obj(ply_path, obj_path, texture_size=(1024, 1024)):
ply = PlyData.read(ply_path)
ply_texture = None
for c in ply.comments:
- if 'TextureFile' in c:
- ply_texture = c.split(' ')[-1]
+ if "TextureFile" in c:
+ ply_texture = c.split(" ")[-1]
if ply_texture is None:
- template = _get_template('template_vertexcolor_to_texture.mlx')
- out_texture_path = obj_path.with_suffix('').name + '_texture.png'
+ template = _get_template("template_vertexcolor_to_texture.mlx")
+ out_texture_path = obj_path.with_suffix("").name + "_texture.png"
script = template.format(out_texture_path=out_texture_path)
run_meshlab_script(ply_copied_path, obj_path, script, cd_dir=obj_path.parent)
else:
- template = _get_template('template_ply_texture_to_obj.mlx')
+ template = _get_template("template_ply_texture_to_obj.mlx")
script = template
- ply_texture_name = ply_texture.split('.')[0]
- out_texture_path = obj_path.parent / (ply_texture_name+'_texture.png')
+ ply_texture_name = ply_texture.split(".")[0]
+ out_texture_path = obj_path.parent / (ply_texture_name + "_texture.png")
shutil.copy(ply_path.parent / ply_texture, out_texture_path)
- Image.open(out_texture_path).resize(texture_size, resample=PIL.Image.BILINEAR).save(out_texture_path)
+ Image.open(out_texture_path).resize(
+ texture_size,
+ resample=PIL.Image.BILINEAR,
+ ).save(out_texture_path)
run_meshlab_script(ply_path, obj_path, template)
add_texture_to_mtl(obj_path)
if not is_same:
@@ -78,14 +94,20 @@ def ply_to_obj(ply_path, obj_path, texture_size=(1024, 1024)):
def downsample_obj(in_path, out_path, n_faces=1000):
# Remesh and downsample
- template = _get_template('template_downsample_textures.mlx')
+ template = _get_template("template_downsample_textures.mlx")
script = template.format(n_faces=n_faces)
- run_meshlab_script(in_path, out_path, script, has_textures=True, cd_dir=in_path.parent)
+ run_meshlab_script(
+ in_path,
+ out_path,
+ script,
+ has_textures=True,
+ cd_dir=in_path.parent,
+ )
def sample_points(in_path, out_path, n_points=2000):
# Remesh and downsample
- template_path = Path(__file__).resolve().parent / 'template_sample_points.mlx'
+ template_path = Path(__file__).resolve().parent / "template_sample_points.mlx"
template = template_path.read_text()
script = template.format(n_points=n_points)
run_meshlab_script(in_path, out_path, script)
diff --git a/happypose/pose_estimators/cosypose/cosypose/libmesh/meshlab_templates/template_sample_points.mlx b/happypose/pose_estimators/cosypose/cosypose/libmesh/meshlab_templates/template_sample_points.mlx
index 71c0498b..ef2ffd6d 100644
--- a/happypose/pose_estimators/cosypose/cosypose/libmesh/meshlab_templates/template_sample_points.mlx
+++ b/happypose/pose_estimators/cosypose/cosypose/libmesh/meshlab_templates/template_sample_points.mlx
@@ -5,4 +5,4 @@
-
\ No newline at end of file
+
diff --git a/happypose/pose_estimators/cosypose/cosypose/libmesh/urdf_utils.py b/happypose/pose_estimators/cosypose/cosypose/libmesh/urdf_utils.py
index f7cccb00..01e8d35f 100644
--- a/happypose/pose_estimators/cosypose/cosypose/libmesh/urdf_utils.py
+++ b/happypose/pose_estimators/cosypose/cosypose/libmesh/urdf_utils.py
@@ -1,5 +1,5 @@
-from pathlib import Path
import xml.etree.ElementTree as ET
+from pathlib import Path
from xml.dom import minidom
@@ -8,43 +8,43 @@ def obj_to_urdf(obj_path, urdf_path):
urdf_path = Path(urdf_path)
assert urdf_path.parent == obj_path.parent
- geometry = ET.Element('geometry')
- mesh = ET.SubElement(geometry, 'mesh')
- mesh.set('filename', obj_path.name)
- mesh.set('scale', '1.0 1.0 1.0')
+ geometry = ET.Element("geometry")
+ mesh = ET.SubElement(geometry, "mesh")
+ mesh.set("filename", obj_path.name)
+ mesh.set("scale", "1.0 1.0 1.0")
- material = ET.Element('material')
- material.set('name', 'mat_part0')
- color = ET.SubElement(material, 'color')
- color.set('rgba', '1.0 1.0 1.0 1.0')
+ material = ET.Element("material")
+ material.set("name", "mat_part0")
+ color = ET.SubElement(material, "color")
+ color.set("rgba", "1.0 1.0 1.0 1.0")
- inertial = ET.Element('inertial')
- origin = ET.SubElement(inertial, 'origin')
- origin.set('rpy', '0 0 0')
- origin.set('xyz', '0.0 0.0 0.0')
+ inertial = ET.Element("inertial")
+ origin = ET.SubElement(inertial, "origin")
+ origin.set("rpy", "0 0 0")
+ origin.set("xyz", "0.0 0.0 0.0")
- mass = ET.SubElement(inertial, 'mass')
- mass.set('value', '0.1')
+ mass = ET.SubElement(inertial, "mass")
+ mass.set("value", "0.1")
- inertia = ET.SubElement(inertial, 'inertia')
- inertia.set('ixx', '1')
- inertia.set('ixy', '0')
- inertia.set('ixz', '0')
- inertia.set('iyy', '1')
- inertia.set('iyz', '0')
- inertia.set('izz', '1')
+ inertia = ET.SubElement(inertial, "inertia")
+ inertia.set("ixx", "1")
+ inertia.set("ixy", "0")
+ inertia.set("ixz", "0")
+ inertia.set("iyy", "1")
+ inertia.set("iyz", "0")
+ inertia.set("izz", "1")
- robot = ET.Element('robot')
- robot.set('name', obj_path.with_suffix('').name)
+ robot = ET.Element("robot")
+ robot.set("name", obj_path.with_suffix("").name)
- link = ET.SubElement(robot, 'link')
- link.set('name', 'base_link')
+ link = ET.SubElement(robot, "link")
+ link.set("name", "base_link")
- visual = ET.SubElement(link, 'visual')
+ visual = ET.SubElement(link, "visual")
visual.append(geometry)
visual.append(material)
- collision = ET.SubElement(link, 'collision')
+ collision = ET.SubElement(link, "collision")
collision.append(geometry)
link.append(inertial)
diff --git a/happypose/pose_estimators/cosypose/cosypose/models/efficientnet.py b/happypose/pose_estimators/cosypose/cosypose/models/efficientnet.py
index 7a2b622f..6c2a3688 100644
--- a/happypose/pose_estimators/cosypose/cosypose/models/efficientnet.py
+++ b/happypose/pose_estimators/cosypose/cosypose/models/efficientnet.py
@@ -1,32 +1,31 @@
-"""
-Copied from https://github.com/lukemelas/EfficientNet-PyTorch
-"""
+"""Copied from https://github.com/lukemelas/EfficientNet-PyTorch."""
import torch
from torch import nn
from torch.nn import functional as F
from .efficientnet_utils import (
- round_filters,
- round_repeats,
+ MemoryEfficientSwish,
+ Swish,
drop_connect,
- get_same_padding_conv2d,
- get_model_params,
efficientnet_params,
+ get_model_params,
+ get_same_padding_conv2d,
load_pretrained_weights,
- Swish,
- MemoryEfficientSwish,
+ round_filters,
+ round_repeats,
)
class MBConvBlock(nn.Module):
- """
- Mobile Inverted Residual Bottleneck Block
+ """Mobile Inverted Residual Bottleneck Block.
Args:
+ ----
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
+ ----------
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
@@ -35,7 +34,9 @@ def __init__(self, block_args, global_params):
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
- self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
+ self.has_se = (self._block_args.se_ratio is not None) and (
+ 0 < self._block_args.se_ratio <= 1
+ )
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
@@ -43,38 +44,76 @@ def __init__(self, block_args, global_params):
# Expansion phase
inp = self._block_args.input_filters # number of input channels
- oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
+ oup = (
+ self._block_args.input_filters * self._block_args.expand_ratio
+ ) # number of output channels
if self._block_args.expand_ratio != 1:
- self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
- self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
+ self._expand_conv = Conv2d(
+ in_channels=inp,
+ out_channels=oup,
+ kernel_size=1,
+ bias=False,
+ )
+ self._bn0 = nn.BatchNorm2d(
+ num_features=oup,
+ momentum=self._bn_mom,
+ eps=self._bn_eps,
+ )
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
self._depthwise_conv = Conv2d(
- in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
- kernel_size=k, stride=s, bias=False)
- self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
+ in_channels=oup,
+ out_channels=oup,
+ groups=oup, # groups makes it depthwise
+ kernel_size=k,
+ stride=s,
+ bias=False,
+ )
+ self._bn1 = nn.BatchNorm2d(
+ num_features=oup,
+ momentum=self._bn_mom,
+ eps=self._bn_eps,
+ )
# Squeeze and Excitation layer, if desired
if self.has_se:
- num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
- self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
- self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
+ num_squeezed_channels = max(
+ 1,
+ int(self._block_args.input_filters * self._block_args.se_ratio),
+ )
+ self._se_reduce = Conv2d(
+ in_channels=oup,
+ out_channels=num_squeezed_channels,
+ kernel_size=1,
+ )
+ self._se_expand = Conv2d(
+ in_channels=num_squeezed_channels,
+ out_channels=oup,
+ kernel_size=1,
+ )
# Output phase
final_oup = self._block_args.output_filters
- self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
- self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
+ self._project_conv = Conv2d(
+ in_channels=oup,
+ out_channels=final_oup,
+ kernel_size=1,
+ bias=False,
+ )
+ self._bn2 = nn.BatchNorm2d(
+ num_features=final_oup,
+ momentum=self._bn_mom,
+ eps=self._bn_eps,
+ )
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
- """
- :param inputs: input tensor
+ """:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
-
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
@@ -90,35 +129,47 @@ def forward(self, inputs, drop_connect_rate=None):
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
- input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
- if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
+ input_filters, output_filters = (
+ self._block_args.input_filters,
+ self._block_args.output_filters,
+ )
+ if (
+ self.id_skip
+ and self._block_args.stride == 1
+ and input_filters == output_filters
+ ):
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
- """Sets swish function as memory efficient (for training) or standard (for export)"""
+ """
+ Sets swish function as memory efficient (for training) or standard (for export).
+ """
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class EfficientNet(nn.Module):
"""
- An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
+ An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained
+ methods.
Args:
+ ----
blocks_args (list): A list of BlockArgs to construct blocks
global_params (namedtuple): A set of GlobalParams shared between blocks
Example:
+ -------
model = EfficientNet.from_pretrained('efficientnet-b0')
"""
def __init__(self, blocks_args=None, global_params=None, in_channels=3):
super().__init__()
- assert isinstance(blocks_args, list), 'blocks_args should be a list'
- assert len(blocks_args) > 0, 'block args must be greater than 0'
+ assert isinstance(blocks_args, list), "blocks_args should be a list"
+ assert len(blocks_args) > 0, "block args must be greater than 0"
self._global_params = global_params
self._blocks_args = blocks_args
@@ -131,25 +182,46 @@ def __init__(self, blocks_args=None, global_params=None, in_channels=3):
# Stem
# in_channels = 3 # rgb
- out_channels = round_filters(32, self._global_params) # number of output channels
- self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
- self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
+ out_channels = round_filters(
+ 32,
+ self._global_params,
+ ) # number of output channels
+ self._conv_stem = Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=2,
+ bias=False,
+ )
+ self._bn0 = nn.BatchNorm2d(
+ num_features=out_channels,
+ momentum=bn_mom,
+ eps=bn_eps,
+ )
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
-
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
- input_filters=round_filters(block_args.input_filters, self._global_params),
- output_filters=round_filters(block_args.output_filters, self._global_params),
- num_repeat=round_repeats(block_args.num_repeat, self._global_params)
+ input_filters=round_filters(
+ block_args.input_filters,
+ self._global_params,
+ ),
+ output_filters=round_filters(
+ block_args.output_filters,
+ self._global_params,
+ ),
+ num_repeat=round_repeats(block_args.num_repeat, self._global_params),
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(MBConvBlock(block_args, self._global_params))
if block_args.num_repeat > 1:
- block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
+ block_args = block_args._replace(
+ input_filters=block_args.output_filters,
+ stride=1,
+ )
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params))
@@ -157,7 +229,11 @@ def __init__(self, blocks_args=None, global_params=None, in_channels=3):
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
- self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
+ self._bn1 = nn.BatchNorm2d(
+ num_features=out_channels,
+ momentum=bn_mom,
+ eps=bn_eps,
+ )
# Final linear layer
# self._avg_pooling = nn.AdaptiveAvgPool2d(1)
@@ -166,14 +242,15 @@ def __init__(self, blocks_args=None, global_params=None, in_channels=3):
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
- """Sets swish function as memory efficient (for training) or standard (for export)"""
+ """
+ Sets swish function as memory efficient (for training) or standard (for export).
+ """
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_features(self, inputs):
- """ Returns output of the final convolution layer """
-
+ """Returns output of the final convolution layer."""
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
@@ -190,8 +267,11 @@ def extract_features(self, inputs):
return x
def forward(self, inputs):
- """ Calls extract_features to extract features, applies final linear layer, and returns logits. """
- bs = inputs.size(0)
+ """
+ Calls extract_features to extract features, applies final linear layer, and
+ returns logits.
+ """
+ inputs.size(0)
# Convolution layers
x = self.extract_features(inputs)
@@ -210,15 +290,32 @@ def from_name(cls, model_name, override_params=None, in_channels=3):
return cls(blocks_args, global_params, in_channels=in_channels)
@classmethod
- def from_pretrained(cls, model_name, advprop=False, num_classes=1000, in_channels=3):
- model = cls.from_name(model_name, override_params={'num_classes': num_classes})
- load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000), advprop=advprop)
+ def from_pretrained(
+ cls,
+ model_name,
+ advprop=False,
+ num_classes=1000,
+ in_channels=3,
+ ):
+ model = cls.from_name(model_name, override_params={"num_classes": num_classes})
+ load_pretrained_weights(
+ model,
+ model_name,
+ load_fc=(num_classes == 1000),
+ advprop=advprop,
+ )
if in_channels != 3:
- Conv2d = get_same_padding_conv2d(image_size = model._global_params.image_size)
+ Conv2d = get_same_padding_conv2d(image_size=model._global_params.image_size)
out_channels = round_filters(32, model._global_params)
- model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
+ model._conv_stem = Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ stride=2,
+ bias=False,
+ )
return model
-
+
@classmethod
def get_image_size(cls, model_name):
cls._check_model_name_is_valid(model_name)
@@ -227,7 +324,7 @@ def get_image_size(cls, model_name):
@classmethod
def _check_model_name_is_valid(cls, model_name):
- """ Validates model name. """
- valid_models = ['efficientnet-b'+str(i) for i in range(9)]
+ """Validates model name."""
+ valid_models = ["efficientnet-b" + str(i) for i in range(9)]
if model_name not in valid_models:
- raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
+ raise ValueError("model_name should be one of: " + ", ".join(valid_models))
diff --git a/happypose/pose_estimators/cosypose/cosypose/models/efficientnet_utils.py b/happypose/pose_estimators/cosypose/cosypose/models/efficientnet_utils.py
index 70608a70..bf7db135 100644
--- a/happypose/pose_estimators/cosypose/cosypose/models/efficientnet_utils.py
+++ b/happypose/pose_estimators/cosypose/cosypose/models/efficientnet_utils.py
@@ -1,13 +1,15 @@
-"""
-Copied from https://github.com/lukemelas/EfficientNet-PyTorch
-This file contains helper functions for building the model and for loading model parameters.
-These helper functions are built to mirror those in the official TensorFlow implementation.
+"""Copied from https://github.com/lukemelas/EfficientNet-PyTorch
+This file contains helper functions for building the model and for loading model
+parameters.
+These helper functions are built to mirror those in the official TensorFlow
+implementation.
"""
-import re
-import math
import collections
+import math
+import re
from functools import partial
+
import torch
from torch import nn
from torch.nn import functional as F
@@ -19,15 +21,36 @@
# Parameters for the entire model (stem, all blocks, and head)
-GlobalParams = collections.namedtuple('GlobalParams', [
- 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate',
- 'num_classes', 'width_coefficient', 'depth_coefficient',
- 'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size'])
+GlobalParams = collections.namedtuple(
+ "GlobalParams",
+ [
+ "batch_norm_momentum",
+ "batch_norm_epsilon",
+ "dropout_rate",
+ "num_classes",
+ "width_coefficient",
+ "depth_coefficient",
+ "depth_divisor",
+ "min_depth",
+ "drop_connect_rate",
+ "image_size",
+ ],
+)
# Parameters for an individual model block
-BlockArgs = collections.namedtuple('BlockArgs', [
- 'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
- 'expand_ratio', 'id_skip', 'stride', 'se_ratio'])
+BlockArgs = collections.namedtuple(
+ "BlockArgs",
+ [
+ "kernel_size",
+ "num_repeat",
+ "input_filters",
+ "output_filters",
+ "expand_ratio",
+ "id_skip",
+ "stride",
+ "se_ratio",
+ ],
+)
# Change namedtuple defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
@@ -52,13 +75,14 @@ class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
+
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
def round_filters(filters, global_params):
- """ Calculate and round number of filters based on depth multiplier. """
+ """Calculate and round number of filters based on depth multiplier."""
multiplier = global_params.width_coefficient
if not multiplier:
return filters
@@ -73,7 +97,7 @@ def round_filters(filters, global_params):
def round_repeats(repeats, global_params):
- """ Round number of filters based on depth multiplier. """
+ """Round number of filters based on depth multiplier."""
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
@@ -81,20 +105,27 @@ def round_repeats(repeats, global_params):
def drop_connect(inputs, p, training):
- """ Drop connect. """
- if not training: return inputs
+ """Drop connect."""
+ if not training:
+ return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
- random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
+ random_tensor += torch.rand(
+ [batch_size, 1, 1, 1],
+ dtype=inputs.dtype,
+ device=inputs.device,
+ )
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_same_padding_conv2d(image_size=None):
- """ Chooses static padding if you have specified an image size, and dynamic padding otherwise.
- Static padding is necessary for ONNX exporting of models. """
+ """Chooses static padding if you have specified an image size, and dynamic padding
+ otherwise.
+ Static padding is necessary for ONNX exporting of models.
+ """
if image_size is None:
return Conv2dDynamicSamePadding
else:
@@ -102,10 +133,28 @@ def get_same_padding_conv2d(image_size=None):
class Conv2dDynamicSamePadding(nn.Conv2d):
- """ 2D Convolutions like TensorFlow, for a dynamic image size """
-
- def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
- super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
+ """2D Convolutions like TensorFlow, for a dynamic image size."""
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride=1,
+ dilation=1,
+ groups=1,
+ bias=True,
+ ):
+ super().__init__(
+ in_channels,
+ out_channels,
+ kernel_size,
+ stride,
+ 0,
+ dilation,
+ groups,
+ bias,
+ )
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
@@ -116,39 +165,69 @@ def forward(self, x):
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
- x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
- return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
+ x = F.pad(
+ x,
+ [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2],
+ )
+ return F.conv2d(
+ x,
+ self.weight,
+ self.bias,
+ self.stride,
+ self.padding,
+ self.dilation,
+ self.groups,
+ )
class Conv2dStaticSamePadding(nn.Conv2d):
- """ 2D Convolutions like TensorFlow, for a fixed image size"""
-
- def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):
+ """2D Convolutions like TensorFlow, for a fixed image size."""
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ image_size=None,
+ **kwargs,
+ ):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
- ih, iw = image_size if type(image_size) == list else [image_size, image_size]
+ ih, iw = (
+ image_size if isinstance(image_size, list) else [image_size, image_size]
+ )
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
- self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
+ self.static_padding = nn.ZeroPad2d(
+ (pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2),
+ )
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
- x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
+ x = F.conv2d(
+ x,
+ self.weight,
+ self.bias,
+ self.stride,
+ self.padding,
+ self.dilation,
+ self.groups,
+ )
return x
class Identity(nn.Module):
- def __init__(self, ):
- super(Identity, self).__init__()
+ def __init__(self):
+ super().__init__()
def forward(self, input):
return input
@@ -160,74 +239,77 @@ def forward(self, input):
def efficientnet_params(model_name):
- """ Map EfficientNet model name to parameter coefficients. """
+ """Map EfficientNet model name to parameter coefficients."""
params_dict = {
# Coefficients: width,depth,res,dropout
- 'efficientnet-b0': (1.0, 1.0, 224, 0.2),
- 'efficientnet-b1': (1.0, 1.1, 240, 0.2),
- 'efficientnet-b2': (1.1, 1.2, 260, 0.3),
- 'efficientnet-b3': (1.2, 1.4, 300, 0.3),
- 'efficientnet-b4': (1.4, 1.8, 380, 0.4),
- 'efficientnet-b5': (1.6, 2.2, 456, 0.4),
- 'efficientnet-b6': (1.8, 2.6, 528, 0.5),
- 'efficientnet-b7': (2.0, 3.1, 600, 0.5),
- 'efficientnet-b8': (2.2, 3.6, 672, 0.5),
- 'efficientnet-l2': (4.3, 5.3, 800, 0.5),
+ "efficientnet-b0": (1.0, 1.0, 224, 0.2),
+ "efficientnet-b1": (1.0, 1.1, 240, 0.2),
+ "efficientnet-b2": (1.1, 1.2, 260, 0.3),
+ "efficientnet-b3": (1.2, 1.4, 300, 0.3),
+ "efficientnet-b4": (1.4, 1.8, 380, 0.4),
+ "efficientnet-b5": (1.6, 2.2, 456, 0.4),
+ "efficientnet-b6": (1.8, 2.6, 528, 0.5),
+ "efficientnet-b7": (2.0, 3.1, 600, 0.5),
+ "efficientnet-b8": (2.2, 3.6, 672, 0.5),
+ "efficientnet-l2": (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
-class BlockDecoder(object):
- """ Block Decoder for readability, straight from the official TensorFlow repository """
+class BlockDecoder:
+ """
+ Block Decoder for readability, straight from the official TensorFlow repository.
+ """
@staticmethod
def _decode_block_string(block_string):
- """ Gets a block through a string notation of arguments. """
+ """Gets a block through a string notation of arguments."""
assert isinstance(block_string, str)
- ops = block_string.split('_')
+ ops = block_string.split("_")
options = {}
for op in ops:
- splits = re.split(r'(\d.*)', op)
+ splits = re.split(r"(\d.*)", op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
- assert (('s' in options and len(options['s']) == 1) or
- (len(options['s']) == 2 and options['s'][0] == options['s'][1]))
+ assert ("s" in options and len(options["s"]) == 1) or (
+ len(options["s"]) == 2 and options["s"][0] == options["s"][1]
+ )
return BlockArgs(
- kernel_size=int(options['k']),
- num_repeat=int(options['r']),
- input_filters=int(options['i']),
- output_filters=int(options['o']),
- expand_ratio=int(options['e']),
- id_skip=('noskip' not in block_string),
- se_ratio=float(options['se']) if 'se' in options else None,
- stride=[int(options['s'][0])])
+ kernel_size=int(options["k"]),
+ num_repeat=int(options["r"]),
+ input_filters=int(options["i"]),
+ output_filters=int(options["o"]),
+ expand_ratio=int(options["e"]),
+ id_skip=("noskip" not in block_string),
+ se_ratio=float(options["se"]) if "se" in options else None,
+ stride=[int(options["s"][0])],
+ )
@staticmethod
def _encode_block_string(block):
"""Encodes a block to a string."""
args = [
- 'r%d' % block.num_repeat,
- 'k%d' % block.kernel_size,
- 's%d%d' % (block.strides[0], block.strides[1]),
- 'e%s' % block.expand_ratio,
- 'i%d' % block.input_filters,
- 'o%d' % block.output_filters
+ "r%d" % block.num_repeat,
+ "k%d" % block.kernel_size,
+ "s%d%d" % (block.strides[0], block.strides[1]),
+ "e%s" % block.expand_ratio,
+ "i%d" % block.input_filters,
+ "o%d" % block.output_filters,
]
if 0 < block.se_ratio <= 1:
- args.append('se%s' % block.se_ratio)
+ args.append("se%s" % block.se_ratio)
if block.id_skip is False:
- args.append('noskip')
- return '_'.join(args)
+ args.append("noskip")
+ return "_".join(args)
@staticmethod
def decode(string_list):
- """
- Decodes a list of string notations to specify blocks inside the network.
+ """Decodes a list of string notations to specify blocks inside the network.
:param string_list: a list of strings, each string is a notation of block
:return: a list of BlockArgs namedtuples of block args
@@ -240,8 +322,7 @@ def decode(string_list):
@staticmethod
def encode(blocks_args):
- """
- Encodes a list of BlockArgs to a list of strings.
+ """Encodes a list of BlockArgs to a list of strings.
:param blocks_args: a list of BlockArgs namedtuples of block args
:return: a list of strings, each string is a notation of block
@@ -252,15 +333,23 @@ def encode(blocks_args):
return block_strings
-def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2,
- drop_connect_rate=0.2, image_size=None, num_classes=1000):
- """ Creates a efficientnet model. """
-
+def efficientnet(
+ width_coefficient=None,
+ depth_coefficient=None,
+ dropout_rate=0.2,
+ drop_connect_rate=0.2,
+ image_size=None,
+ num_classes=1000,
+):
+ """Creates a efficientnet model."""
blocks_args = [
- 'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
- 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
- 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
- 'r1_k3_s11_e6_i192_o320_se0.25',
+ "r1_k3_s11_e1_i32_o16_se0.25",
+ "r2_k3_s22_e6_i16_o24_se0.25",
+ "r2_k5_s22_e6_i24_o40_se0.25",
+ "r3_k3_s22_e6_i40_o80_se0.25",
+ "r3_k5_s11_e6_i80_o112_se0.25",
+ "r4_k5_s22_e6_i112_o192_se0.25",
+ "r1_k3_s11_e6_i192_o320_se0.25",
]
blocks_args = BlockDecoder.decode(blocks_args)
@@ -282,55 +371,63 @@ def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.
def get_model_params(model_name, override_params):
- """ Get the block args and global params for a given model """
- if model_name.startswith('efficientnet'):
+ """Get the block args and global params for a given model."""
+ if model_name.startswith("efficientnet"):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
- width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
+ width_coefficient=w,
+ depth_coefficient=d,
+ dropout_rate=p,
+ image_size=s,
+ )
else:
- raise NotImplementedError('model name is not pre-defined: %s' % model_name)
+ raise NotImplementedError("model name is not pre-defined: %s" % model_name)
if override_params:
- # ValueError will be raised here if override_params has fields not included in global_params.
+ # ValueError will be raised here if override_params has fields not included in
+ # global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
url_map = {
- 'efficientnet-b0': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b0-355c32eb.pth',
- 'efficientnet-b1': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b1-f1951068.pth',
- 'efficientnet-b2': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b2-8bb594d6.pth',
- 'efficientnet-b3': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b3-5fb5a3c3.pth',
- 'efficientnet-b4': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b4-6ed6700e.pth',
- 'efficientnet-b5': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b5-b6417697.pth',
- 'efficientnet-b6': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b6-c76e70fd.pth',
- 'efficientnet-b7': 'https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b7-dcc49843.pth',
+ "efficientnet-b0": "https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b0-355c32eb.pth",
+ "efficientnet-b1": "https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b1-f1951068.pth",
+ "efficientnet-b2": "https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b2-8bb594d6.pth",
+ "efficientnet-b3": "https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b3-5fb5a3c3.pth",
+ "efficientnet-b4": "https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b4-6ed6700e.pth",
+ "efficientnet-b5": "https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b5-b6417697.pth",
+ "efficientnet-b6": "https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b6-c76e70fd.pth",
+ "efficientnet-b7": "https://publicmodels.blob.core.windows.net/container/aa/efficientnet-b7-dcc49843.pth",
}
url_map_advprop = {
- 'efficientnet-b0': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b0-b64d5a18.pth',
- 'efficientnet-b1': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b1-0f3ce85a.pth',
- 'efficientnet-b2': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b2-6e9d97e5.pth',
- 'efficientnet-b3': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b3-cdd7c0f4.pth',
- 'efficientnet-b4': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b4-44fb3a87.pth',
- 'efficientnet-b5': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b5-86493f6b.pth',
- 'efficientnet-b6': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b6-ac80338e.pth',
- 'efficientnet-b7': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b7-4652b6dd.pth',
- 'efficientnet-b8': 'https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b8-22a8fe65.pth',
+ "efficientnet-b0": "https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b0-b64d5a18.pth",
+ "efficientnet-b1": "https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b1-0f3ce85a.pth",
+ "efficientnet-b2": "https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b2-6e9d97e5.pth",
+ "efficientnet-b3": "https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b3-cdd7c0f4.pth",
+ "efficientnet-b4": "https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b4-44fb3a87.pth",
+ "efficientnet-b5": "https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b5-86493f6b.pth",
+ "efficientnet-b6": "https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b6-ac80338e.pth",
+ "efficientnet-b7": "https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b7-4652b6dd.pth",
+ "efficientnet-b8": "https://publicmodels.blob.core.windows.net/container/advprop/efficientnet-b8-22a8fe65.pth",
}
def load_pretrained_weights(model, model_name, load_fc=True, advprop=False):
- """ Loads pretrained weights, and downloads if loading for the first time. """
+ """Loads pretrained weights, and downloads if loading for the first time."""
# AutoAugment or Advprop (different preprocessing)
url_map_ = url_map_advprop if advprop else url_map
state_dict = model_zoo.load_url(url_map_[model_name])
if load_fc:
model.load_state_dict(state_dict)
else:
- state_dict.pop('_fc.weight')
- state_dict.pop('_fc.bias')
+ state_dict.pop("_fc.weight")
+ state_dict.pop("_fc.bias")
res = model.load_state_dict(state_dict, strict=False)
- assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'
- print('Loaded pretrained weights for {}'.format(model_name))
+ assert set(res.missing_keys) == {
+ "_fc.weight",
+ "_fc.bias",
+ }, "issue loading pretrained weights"
+ print(f"Loaded pretrained weights for {model_name}")
diff --git a/happypose/pose_estimators/cosypose/cosypose/models/flownet.py b/happypose/pose_estimators/cosypose/cosypose/models/flownet.py
index 04c11e32..8c27220c 100644
--- a/happypose/pose_estimators/cosypose/cosypose/models/flownet.py
+++ b/happypose/pose_estimators/cosypose/cosypose/models/flownet.py
@@ -1,7 +1,8 @@
# See Implementation here https://github.com/ClementPinard/FlowNetPytorch/blob/master/models/FlowNetS.py
import torch
import torch.nn as nn
-from torch.nn.init import kaiming_normal_, constant_
+from torch.nn.init import constant_, kaiming_normal_
+
from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
@@ -9,19 +10,19 @@ class FlowNetS(nn.Module):
expansion = 1
def __init__(self, n_inputs=6, batchNorm=False):
- super(FlowNetS,self).__init__()
+ super().__init__()
self.batchNorm = batchNorm
- self.conv1 = conv(self.batchNorm, n_inputs, 64, kernel_size=7, stride=2)
- self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
- self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
- self.conv3_1 = conv(self.batchNorm, 256, 256)
- self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
- self.conv4_1 = conv(self.batchNorm, 512, 512)
- self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
- self.conv5_1 = conv(self.batchNorm, 512, 512)
- self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
- self.conv6_1 = conv(self.batchNorm,1024, 1024)
+ self.conv1 = conv(self.batchNorm, n_inputs, 64, kernel_size=7, stride=2)
+ self.conv2 = conv(self.batchNorm, 64, 128, kernel_size=5, stride=2)
+ self.conv3 = conv(self.batchNorm, 128, 256, kernel_size=5, stride=2)
+ self.conv3_1 = conv(self.batchNorm, 256, 256)
+ self.conv4 = conv(self.batchNorm, 256, 512, stride=2)
+ self.conv4_1 = conv(self.batchNorm, 512, 512)
+ self.conv5 = conv(self.batchNorm, 512, 512, stride=2)
+ self.conv5_1 = conv(self.batchNorm, 512, 512)
+ self.conv6 = conv(self.batchNorm, 512, 1024, stride=2)
+ self.conv6_1 = conv(self.batchNorm, 1024, 1024)
# self.deconv5 = deconv(1024,512)
# self.deconv4 = deconv(1026,256)
@@ -80,34 +81,55 @@ def forward(self, x):
return out_conv6
def weight_parameters(self):
- return [param for name, param in self.named_parameters() if 'weight' in name]
+ return [param for name, param in self.named_parameters() if "weight" in name]
def bias_parameters(self):
- return [param for name, param in self.named_parameters() if 'bias' in name]
+ return [param for name, param in self.named_parameters() if "bias" in name]
def conv(batchNorm, in_planes, out_planes, kernel_size=3, stride=1):
if batchNorm:
return nn.Sequential(
- nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=False),
+ nn.Conv2d(
+ in_planes,
+ out_planes,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ bias=False,
+ ),
nn.BatchNorm2d(out_planes),
- nn.LeakyReLU(0.1,inplace=True)
+ nn.LeakyReLU(0.1, inplace=True),
)
else:
return nn.Sequential(
- nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
- nn.LeakyReLU(0.1,inplace=True)
+ nn.Conv2d(
+ in_planes,
+ out_planes,
+ kernel_size=kernel_size,
+ stride=stride,
+ padding=(kernel_size - 1) // 2,
+ bias=True,
+ ),
+ nn.LeakyReLU(0.1, inplace=True),
)
def predict_flow(in_planes):
- return nn.Conv2d(in_planes,2,kernel_size=3,stride=1,padding=1,bias=False)
+ return nn.Conv2d(in_planes, 2, kernel_size=3, stride=1, padding=1, bias=False)
def deconv(in_planes, out_planes):
return nn.Sequential(
- nn.ConvTranspose2d(in_planes, out_planes, kernel_size=4, stride=2, padding=1, bias=False),
- nn.LeakyReLU(0.1,inplace=True)
+ nn.ConvTranspose2d(
+ in_planes,
+ out_planes,
+ kernel_size=4,
+ stride=2,
+ padding=1,
+ bias=False,
+ ),
+ nn.LeakyReLU(0.1, inplace=True),
)
@@ -115,14 +137,16 @@ def crop_like(input, target):
if input.size()[2:] == target.size()[2:]:
return input
else:
- return input[:, :, :target.size(2), :target.size(3)]
+ return input[:, :, : target.size(2), : target.size(3)]
def flownet_pretrained(n_inputs):
assert n_inputs == 6
model = FlowNetS(n_inputs=n_inputs).cuda()
# Download pretrained checkpoint here https://github.com/ClementPinard/FlowNetPytorch/blob/master/models/FlowNetS.py
- state_dict = torch.load(LOCAL_DATA_DIR / 'flownets_EPE1.951.pth.tar')
- state_dict = {k: v for k, v in state_dict.items() if ('conv' in k and 'deconv' not in k)}
+ state_dict = torch.load(LOCAL_DATA_DIR / "flownets_EPE1.951.pth.tar")
+ state_dict = {
+ k: v for k, v in state_dict.items() if ("conv" in k and "deconv" not in k)
+ }
model.load_state_dict(state_dict)
return model
diff --git a/happypose/pose_estimators/cosypose/cosypose/models/mask_rcnn.py b/happypose/pose_estimators/cosypose/cosypose/models/mask_rcnn.py
index 2748802a..e1b1a390 100644
--- a/happypose/pose_estimators/cosypose/cosypose/models/mask_rcnn.py
+++ b/happypose/pose_estimators/cosypose/cosypose/models/mask_rcnn.py
@@ -1,19 +1,26 @@
-from torchvision.models.detection.mask_rcnn import MaskRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
+from torchvision.models.detection.mask_rcnn import MaskRCNN
from torchvision.models.detection.rpn import AnchorGenerator
class DetectorMaskRCNN(MaskRCNN):
- def __init__(self, input_resize=(240, 320), n_classes=2,
- backbone_str='resnet50-fpn',
- anchor_sizes=((32, ), (64, ), (128, ), (256, ), (512, ))):
-
- assert backbone_str == 'resnet50-fpn'
- backbone = resnet_fpn_backbone('resnet50', pretrained=False)
+ def __init__(
+ self,
+ input_resize=(240, 320),
+ n_classes=2,
+ backbone_str="resnet50-fpn",
+ anchor_sizes=((32,), (64,), (128,), (256,), (512,)),
+ ):
+ assert backbone_str == "resnet50-fpn"
+ backbone = resnet_fpn_backbone("resnet50", pretrained=False)
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
- super().__init__(backbone=backbone, num_classes=n_classes,
- rpn_anchor_generator=rpn_anchor_generator,
- max_size=max(input_resize), min_size=min(input_resize))
+ super().__init__(
+ backbone=backbone,
+ num_classes=n_classes,
+ rpn_anchor_generator=rpn_anchor_generator,
+ max_size=max(input_resize),
+ min_size=min(input_resize),
+ )
diff --git a/happypose/pose_estimators/cosypose/cosypose/models/pose.py b/happypose/pose_estimators/cosypose/cosypose/models/pose.py
index f2f9fc18..48e2a5c3 100644
--- a/happypose/pose_estimators/cosypose/cosypose/models/pose.py
+++ b/happypose/pose_estimators/cosypose/cosypose/models/pose.py
@@ -2,26 +2,34 @@
from torch import nn
from happypose.pose_estimators.cosypose.cosypose.config import DEBUG_DATA_DIR
-from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import get_K_crop_resize, boxes_from_uv
-
-from happypose.pose_estimators.cosypose.cosypose.lib3d.cropping import deepim_crops_robust as deepim_crops
-from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import project_points_robust as project_points
-
+from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import (
+ boxes_from_uv,
+ get_K_crop_resize,
+)
+from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import (
+ project_points_robust as project_points,
+)
+from happypose.pose_estimators.cosypose.cosypose.lib3d.cosypose_ops import (
+ apply_imagespace_predictions,
+)
+from happypose.pose_estimators.cosypose.cosypose.lib3d.cropping import (
+ deepim_crops_robust as deepim_crops,
+)
from happypose.pose_estimators.cosypose.cosypose.lib3d.rotations import (
- compute_rotation_matrix_from_ortho6d, compute_rotation_matrix_from_quaternions)
-from happypose.pose_estimators.cosypose.cosypose.lib3d.cosypose_ops import apply_imagespace_predictions
-
-from happypose.pose_estimators.megapose.models.pose_rigid import PosePredictorOutputCosypose
-
+ compute_rotation_matrix_from_ortho6d,
+ compute_rotation_matrix_from_quaternions,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+from happypose.pose_estimators.megapose.models.pose_rigid import (
+ PosePredictorOutputCosypose,
+)
from happypose.toolbox.renderer import Panda3dLightData
+
logger = get_logger(__name__)
class PosePredictor(nn.Module):
- def __init__(self, backbone, renderer,
- mesh_db, render_size=(240, 320),
- pose_dim=9):
+ def __init__(self, backbone, renderer, mesh_db, render_size=(240, 320), pose_dim=9):
super().__init__()
self.backbone = backbone
@@ -32,12 +40,12 @@ def __init__(self, backbone, renderer,
n_features = backbone.n_features
- self.heads = dict()
+ self.heads = {}
self.pose_fc = nn.Linear(n_features, pose_dim, bias=True)
- self.heads['pose'] = self.pose_fc
+ self.heads["pose"] = self.pose_fc
self.debug = False
- self.tmp_debug = dict()
+ self.tmp_debug = {}
def enable_debug(self):
self.debug = True
@@ -55,15 +63,28 @@ def crop_inputs(self, images, K, TCO, labels):
uv = project_points(points, K, TCO)
boxes_rend = boxes_from_uv(uv)
boxes_crop, images_cropped = deepim_crops(
- images=images, obs_boxes=boxes_rend, K=K,
- TCO_pred=TCO, O_vertices=points, output_size=self.render_size, lamb=1.4
+ images=images,
+ obs_boxes=boxes_rend,
+ K=K,
+ TCO_pred=TCO,
+ O_vertices=points,
+ output_size=self.render_size,
+ lamb=1.4,
+ )
+ K_crop = get_K_crop_resize(
+ K=K.clone(),
+ boxes=boxes_crop,
+ orig_size=images.shape[-2:],
+ crop_resize=self.render_size,
)
- K_crop = get_K_crop_resize(K=K.clone(), boxes=boxes_crop,
- orig_size=images.shape[-2:], crop_resize=self.render_size)
if self.debug:
self.tmp_debug.update(
boxes_rend=boxes_rend,
- rend_center_uv=project_points(torch.zeros(bsz, 1, 3).to(K.device), K, TCO),
+ rend_center_uv=project_points(
+ torch.zeros(bsz, 1, 3).to(K.device),
+ K,
+ TCO,
+ ),
uv=uv,
boxes_crop=boxes_crop,
)
@@ -77,14 +98,15 @@ def update_pose(self, TCO, K_crop, pose_outputs):
dR = compute_rotation_matrix_from_quaternions(pose_outputs[:, 0:4])
vxvyvz = pose_outputs[:, 4:7]
else:
- raise ValueError(f'pose_dim={self.pose_dim} not supported')
+ msg = f"pose_dim={self.pose_dim} not supported"
+ raise ValueError(msg)
TCO_updated = apply_imagespace_predictions(TCO, K_crop, vxvyvz, dR)
return TCO_updated
def net_forward(self, x):
x = self.backbone(x)
x = x.flatten(2).mean(dim=-1)
- outputs = dict()
+ outputs = {}
for k, head in self.heads.items():
outputs[k] = head(x)
return outputs
@@ -95,33 +117,44 @@ def forward(self, images, K, labels, TCO, n_iterations=1):
assert TCO.shape == (bsz, 4, 4)
assert len(labels) == bsz
- outputs = dict()
+ outputs = {}
TCO_input = TCO
for n in range(n_iterations):
TCO_input = TCO_input.detach()
- images_crop, K_crop, boxes_rend, boxes_crop = self.crop_inputs(images, K, TCO_input, labels)
-
- ambient_light = Panda3dLightData(light_type="ambient", color=(1.0, 1.0, 1.0, 1.0))
+ images_crop, K_crop, boxes_rend, boxes_crop = self.crop_inputs(
+ images,
+ K,
+ TCO_input,
+ labels,
+ )
+
+ ambient_light = Panda3dLightData(
+ light_type="ambient",
+ color=(1.0, 1.0, 1.0, 1.0),
+ )
light_datas = [[ambient_light] for _ in range(len(labels))]
-
- renders = self.renderer.render(labels=labels,
- TCO=TCO_input,
- K=K_crop, resolution=self.render_size,
- light_datas=light_datas,)
+
+ renders = self.renderer.render(
+ labels=labels,
+ TCO=TCO_input,
+ K=K_crop,
+ resolution=self.render_size,
+ light_datas=light_datas,
+ )
renders = renders.rgbs
x = torch.cat((images_crop, renders), dim=1)
model_outputs = self.net_forward(x)
- TCO_output = self.update_pose(TCO_input, K_crop, model_outputs['pose'])
+ TCO_output = self.update_pose(TCO_input, K_crop, model_outputs["pose"])
- outputs[f'iteration={n+1}'] = {
- 'TCO_input': TCO_input,
- 'TCO_output': TCO_output,
- 'K_crop': K_crop,
- 'model_outputs': model_outputs,
- 'boxes_rend': boxes_rend,
- 'boxes_crop': boxes_crop,
+ outputs[f"iteration={n+1}"] = {
+ "TCO_input": TCO_input,
+ "TCO_output": TCO_output,
+ "K_crop": K_crop,
+ "model_outputs": model_outputs,
+ "boxes_rend": boxes_rend,
+ "boxes_crop": boxes_crop,
}
outputs[f"iteration={n+1}"] = PosePredictorOutputCosypose(
@@ -139,14 +172,14 @@ def forward(self, images, K, labels, TCO, n_iterations=1):
TCO_input = TCO_output
if self.debug:
- self.tmp_debug.update(outputs[f'iteration={n+1}'])
+ self.tmp_debug.update(outputs[f"iteration={n+1}"])
self.tmp_debug.update(
images=images,
images_crop=images_crop,
renders=renders,
)
- path = DEBUG_DATA_DIR / f'debug_iter={n+1}.pth.tar'
- logger.info(f'Wrote debug data: {path}')
+ path = DEBUG_DATA_DIR / f"debug_iter={n+1}.pth.tar"
+ logger.info(f"Wrote debug data: {path}")
torch.save(self.tmp_debug, path)
return outputs
diff --git a/happypose/pose_estimators/cosypose/cosypose/models/wide_resnet.py b/happypose/pose_estimators/cosypose/cosypose/models/wide_resnet.py
index 3710f998..4ef42a6e 100644
--- a/happypose/pose_estimators/cosypose/cosypose/models/wide_resnet.py
+++ b/happypose/pose_estimators/cosypose/cosypose/models/wide_resnet.py
@@ -1,28 +1,38 @@
import torch
-from torch import nn
import torch.nn.functional as F
+from torch import nn
def conv3x3(in_planes, out_planes, stride=1):
- """3x3 convolution with padding"""
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
- padding=1, bias=False)
+ """3x3 convolution with padding."""
+ return nn.Conv2d(
+ in_planes,
+ out_planes,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ bias=False,
+ )
class BasicBlockV2(nn.Module):
r"""BasicBlock V2 from
- `"Identity Mappings in Deep Residual Networks"`_ paper.
+ `"Identity Mappings in Deep Residual Networks"
+ `_ paper.
This is used for ResNet V2 for 18, 34 layers.
+
Args:
+ ----
inplanes (int): number of input channels.
planes (int): number of output channels.
stride (int): stride size.
downsample (Module) optional downsample module to downsample the input.
"""
+
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
- super(BasicBlockV2, self).__init__()
+ super().__init__()
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
@@ -41,12 +51,18 @@ def forward(self, x):
class WideResNet(nn.Module):
def __init__(self, block, layers, width, num_inputs=3, maxpool=True):
- super(WideResNet, self).__init__()
+ super().__init__()
config = [int(v * width) for v in (64, 128, 256, 512)]
self.inplanes = config[0]
- self.conv1 = nn.Conv2d(num_inputs, self.inplanes, kernel_size=5,
- stride=2, padding=2, bias=False)
+ self.conv1 = nn.Conv2d(
+ num_inputs,
+ self.inplanes,
+ kernel_size=5,
+ stride=2,
+ padding=2,
+ bias=False,
+ )
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
if maxpool:
@@ -60,8 +76,7 @@ def __init__(self, block, layers, width, num_inputs=3, maxpool=True):
for m in self.modules():
if isinstance(m, nn.Conv2d):
- nn.init.kaiming_normal_(
- m.weight, mode='fan_out', nonlinearity='relu')
+ nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@@ -69,12 +84,17 @@ def __init__(self, block, layers, width, num_inputs=3, maxpool=True):
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
- downsample = nn.Conv2d(self.inplanes, planes * block.expansion,
- kernel_size=1, stride=stride, bias=False)
-
- layers = [block(self.inplanes, planes, stride, downsample), ]
+ downsample = nn.Conv2d(
+ self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False,
+ )
+
+ layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
- for i in range(1, blocks):
+ for _i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
@@ -96,17 +116,27 @@ def forward(self, x):
class WideResNet18(WideResNet):
def __init__(self, n_inputs=3, width=1.0):
- super().__init__(block=BasicBlockV2, layers=CONFIG[18], width=1.0, num_inputs=n_inputs)
+ super().__init__(
+ block=BasicBlockV2,
+ layers=CONFIG[18],
+ width=1.0,
+ num_inputs=n_inputs,
+ )
self.n_features = int(512 * width)
class WideResNet34(WideResNet):
def __init__(self, n_inputs=3, width=1.0):
- super().__init__(block=BasicBlockV2, layers=CONFIG[34], width=1.0, num_inputs=n_inputs)
+ super().__init__(
+ block=BasicBlockV2,
+ layers=CONFIG[34],
+ width=1.0,
+ num_inputs=n_inputs,
+ )
self.n_features = int(512 * width)
-if __name__ == '__main__':
+if __name__ == "__main__":
model = WideResNet(BasicBlockV2, [2, 2, 2, 2], 0.5, num_inputs=3, num_outputs=4)
x = torch.randn(1, 3, 224, 224)
outputs = model(x)
diff --git a/happypose/pose_estimators/cosypose/cosypose/multiview/bundle_adjustment.py b/happypose/pose_estimators/cosypose/cosypose/multiview/bundle_adjustment.py
index 11be9e4a..6d2a0156 100644
--- a/happypose/pose_estimators/cosypose/cosypose/multiview/bundle_adjustment.py
+++ b/happypose/pose_estimators/cosypose/cosypose/multiview/bundle_adjustment.py
@@ -1,37 +1,43 @@
-import numpy as np
from collections import defaultdict
-import torch
+
+import numpy as np
import pandas as pd
+import torch
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
-import cosypose.utils.tensor_collection as tc
-
-from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import invert_T, compute_transform_from_pose9d
-from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import project_points
-from happypose.pose_estimators.cosypose.cosypose.lib3d.symmetric_distances import symmetric_distance_reprojected
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
+from happypose.pose_estimators.cosypose.cosypose.lib3d.camera_geometry import (
+ project_points,
+)
+from happypose.pose_estimators.cosypose.cosypose.lib3d.symmetric_distances import (
+ symmetric_distance_reprojected,
+)
+from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import (
+ compute_transform_from_pose9d,
+ invert_T,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+from happypose.pose_estimators.cosypose.cosypose.utils.timer import Timer
from .ransac import make_obj_infos
-
-from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
-from happypose.pose_estimators.cosypose.cosypose.utils.timer import Timer
logger = get_logger(__name__)
def make_view_groups(pairs_TC1C2):
- views = pairs_TC1C2.infos.loc[:, ['view1', 'view2']].values.T
+ views = pairs_TC1C2.infos.loc[:, ["view1", "view2"]].values.T
views = np.unique(views.reshape(-1))
- view_df = pd.DataFrame(dict(view_id=views, view_local_id=np.arange(len(views))))
- view_to_id = view_df.set_index('view_id')
- view1 = view_to_id.loc[pairs_TC1C2.infos.loc[:, 'view1'], 'view_local_id'].values
- view2 = view_to_id.loc[pairs_TC1C2.infos.loc[:, 'view2'], 'view_local_id'].values
+ view_df = pd.DataFrame({"view_id": views, "view_local_id": np.arange(len(views))})
+ view_to_id = view_df.set_index("view_id")
+ view1 = view_to_id.loc[pairs_TC1C2.infos.loc[:, "view1"], "view_local_id"].values
+ view2 = view_to_id.loc[pairs_TC1C2.infos.loc[:, "view2"], "view_local_id"].values
data = np.ones(len(view1))
n_views = len(views)
graph = csr_matrix((data, (view1, view2)), shape=(n_views, n_views))
- n_components, ids = connected_components(graph, directed=True, connection='strong')
- view_df['view_group'] = ids
- view_df = view_df.drop(columns=['view_local_id'])
+ n_components, ids = connected_components(graph, directed=True, connection="strong")
+ view_df["view_group"] = ids
+ view_df = view_df.drop(columns=["view_local_id"])
return view_df
@@ -41,52 +47,72 @@ class SamplerError(Exception):
class MultiviewRefinement:
def __init__(self, candidates, cameras, pairs_TC1C2, mesh_db):
-
self.device, self.dtype = candidates.device, candidates.poses.dtype
self.mesh_db = mesh_db
cameras = cameras.to(self.device).to(self.dtype)
pairs_TC1C2 = pairs_TC1C2.to(self.device).to(self.dtype)
- view_ids = np.unique(candidates.infos['view_id'])
+ view_ids = np.unique(candidates.infos["view_id"])
keep_ids = np.logical_and(
- np.isin(pairs_TC1C2.infos['view1'], view_ids),
- np.isin(pairs_TC1C2.infos['view2'], view_ids),
+ np.isin(pairs_TC1C2.infos["view1"], view_ids),
+ np.isin(pairs_TC1C2.infos["view2"], view_ids),
)
pairs_TC1C2 = pairs_TC1C2[np.where(keep_ids)[0]]
- keep_ids = np.where(np.isin(cameras.infos['view_id'], view_ids))[0]
+ keep_ids = np.where(np.isin(cameras.infos["view_id"], view_ids))[0]
cameras = cameras[keep_ids]
self.cam_infos = cameras.infos
- self.view_to_id = {view_id: n for n, view_id in enumerate(self.cam_infos['view_id'])}
+ self.view_to_id = {
+ view_id: n for n, view_id in enumerate(self.cam_infos["view_id"])
+ }
self.K = cameras.K
self.n_views = len(self.cam_infos)
self.obj_infos = make_obj_infos(candidates)
- self.obj_to_id = {obj_id: n for n, obj_id in enumerate(self.obj_infos['obj_id'])}
- self.obj_points = self.mesh_db.select(self.obj_infos['label'].values).points
+ self.obj_to_id = {
+ obj_id: n for n, obj_id in enumerate(self.obj_infos["obj_id"])
+ }
+ self.obj_points = self.mesh_db.select(self.obj_infos["label"].values).points
self.n_points = self.obj_points.shape[1]
self.n_objects = len(self.obj_infos)
self.cand = candidates
self.cand_TCO = candidates.poses
- self.cand_labels = candidates.infos['label']
- self.cand_view_ids = [self.view_to_id[view_id] for view_id in candidates.infos['view_id']]
- self.cand_obj_ids = [self.obj_to_id[obj_id] for obj_id in candidates.infos['obj_id']]
+ self.cand_labels = candidates.infos["label"]
+ self.cand_view_ids = [
+ self.view_to_id[view_id] for view_id in candidates.infos["view_id"]
+ ]
+ self.cand_obj_ids = [
+ self.obj_to_id[obj_id] for obj_id in candidates.infos["obj_id"]
+ ]
self.n_candidates = len(self.cand_TCO)
- self.visibility_matrix = self.make_visibility_matrix(self.cand_view_ids, self.cand_obj_ids)
-
- self.v2v1_TC2C1_map = {(self.view_to_id[v2], self.view_to_id[v1]): invert_T(TC1C2) for
- (v1, v2, TC1C2) in zip(pairs_TC1C2.infos['view1'],
- pairs_TC1C2.infos['view2'],
- pairs_TC1C2.TC1C2)}
- self.ov_TCO_cand_map = {(o, v): TCO for (o, v, TCO) in zip(self.cand_obj_ids,
- self.cand_view_ids,
- self.cand_TCO)}
+ self.visibility_matrix = self.make_visibility_matrix(
+ self.cand_view_ids,
+ self.cand_obj_ids,
+ )
+
+ self.v2v1_TC2C1_map = {
+ (self.view_to_id[v2], self.view_to_id[v1]): invert_T(TC1C2)
+ for (v1, v2, TC1C2) in zip(
+ pairs_TC1C2.infos["view1"],
+ pairs_TC1C2.infos["view2"],
+ pairs_TC1C2.TC1C2,
+ )
+ }
+ self.ov_TCO_cand_map = {
+ (o, v): TCO
+ for (o, v, TCO) in zip(self.cand_obj_ids, self.cand_view_ids, self.cand_TCO)
+ }
self.residuals_ids = self.make_residuals_ids()
def make_visibility_matrix(self, cand_view_ids, cand_obj_ids):
- matrix = torch.zeros(self.n_objects, self.n_views, dtype=torch.int, device=self.device)
+ matrix = torch.zeros(
+ self.n_objects,
+ self.n_views,
+ dtype=torch.int,
+ device=self.device,
+ )
matrix[cand_obj_ids, cand_view_ids] = 1
return matrix
@@ -100,18 +126,30 @@ def make_residuals_ids(self):
view_ids.append(self.cand_view_ids[cand_id])
point_ids.append(point_id)
xy_ids.append(xy_id)
- residuals_ids = dict(
- cand_id=cand_ids,
- obj_id=obj_ids,
- view_id=view_ids,
- point_id=point_ids,
- xy_id=xy_ids,
- )
+ residuals_ids = {
+ "cand_id": cand_ids,
+ "obj_id": obj_ids,
+ "view_id": view_ids,
+ "point_id": point_ids,
+ "xy_id": xy_ids,
+ }
return residuals_ids
def sample_initial_TWO_TWC(self, seed):
- TWO = torch.zeros(self.n_objects, 4, 4, dtype=self.dtype, device=self.device) * float('nan')
- TWC = torch.zeros(self.n_views, 4, 4, dtype=self.dtype, device=self.device) * float('nan')
+ TWO = torch.zeros(
+ self.n_objects,
+ 4,
+ 4,
+ dtype=self.dtype,
+ device=self.device,
+ ) * float("nan")
+ TWC = torch.zeros(
+ self.n_views,
+ 4,
+ 4,
+ dtype=self.dtype,
+ device=self.device,
+ ) * float("nan")
object_to_views = defaultdict(set)
for v in range(self.n_views):
@@ -125,7 +163,7 @@ def sample_initial_TWO_TWC(self, seed):
w = views_ordered[0]
TWC[w] = torch.eye(4, 4, device=self.device, dtype=self.dtype)
- views_initialized = {w, }
+ views_initialized = {w}
views_to_initialize = set(np.arange(self.n_views)) - views_initialized
n_pass = 20
@@ -146,7 +184,8 @@ def sample_initial_TWO_TWC(self, seed):
break
n += 1
if n >= n_pass:
- raise SamplerError('Cannot find an initialization')
+ msg = "Cannot find an initialization"
+ raise SamplerError(msg)
# Initialize objects
for o in objects_ordered:
@@ -158,7 +197,10 @@ def sample_initial_TWO_TWC(self, seed):
@staticmethod
def extract_pose9d(T):
- T_9d = torch.cat((T[..., :3, :2].transpose(-1, -2).flatten(-2, -1), T[..., :3, -1]), dim=-1)
+ T_9d = torch.cat(
+ (T[..., :3, :2].transpose(-1, -2).flatten(-2, -1), T[..., :3, -1]),
+ dim=-1,
+ )
return T_9d
def align_TCO_cand(self, TWO_9d, TCW_9d):
@@ -166,19 +208,25 @@ def align_TCO_cand(self, TWO_9d, TCW_9d):
TCW = compute_transform_from_pose9d(TCW_9d)
TCO = TCW[self.cand_view_ids] @ TWO[self.cand_obj_ids]
- dists, sym = symmetric_distance_reprojected(self.cand_TCO, TCO,
- self.K[self.cand_view_ids],
- self.cand_labels, self.mesh_db)
+ dists, sym = symmetric_distance_reprojected(
+ self.cand_TCO,
+ TCO,
+ self.K[self.cand_view_ids],
+ self.cand_labels,
+ self.mesh_db,
+ )
TCO_cand_aligned = self.cand_TCO @ sym
return dists, TCO_cand_aligned
def forward_jacobian(self, TWO_9d, TCW_9d, residuals_threshold):
_, TCO_cand_aligned = self.align_TCO_cand(TWO_9d, TCW_9d)
- # NOTE: This could be *much* faster by computing gradients manually, reducing number of operations.
- cand_ids, view_ids, obj_ids, point_ids, xy_ids = [
- self.residuals_ids[k] for k in ('cand_id', 'view_id', 'obj_id', 'point_id', 'xy_id')
- ]
+ # NOTE: This could be *much* faster by computing gradients manually, reducing
+ # number of operations.
+ cand_ids, view_ids, obj_ids, point_ids, xy_ids = (
+ self.residuals_ids[k]
+ for k in ("cand_id", "view_id", "obj_id", "point_id", "xy_id")
+ )
n_residuals = len(cand_ids) # Number of residuals
arange_n = torch.arange(n_residuals)
@@ -199,13 +247,19 @@ def forward_jacobian(self, TWO_9d, TCW_9d, residuals_threshold):
points_n = self.obj_points[obj_ids, point_ids].unsqueeze(1)
TCO_points_n = project_points(points_n, K_n, TCO_n).squeeze(1)[arange_n, xy_ids]
- TCO_cand_points_n = project_points(points_n, K_n, TCO_cand_n).squeeze(1)[arange_n, xy_ids]
+ TCO_cand_points_n = project_points(points_n, K_n, TCO_cand_n).squeeze(1)[
+ arange_n,
+ xy_ids,
+ ]
y = TCO_cand_points_n
yhat = TCO_points_n
errors = y - yhat
- residuals = (errors ** 2)
- residuals = torch.min(residuals, torch.ones_like(residuals) * residuals_threshold)
+ residuals = errors**2
+ residuals = torch.min(
+ residuals,
+ torch.ones_like(residuals) * residuals_threshold,
+ )
loss = residuals.mean()
if torch.is_grad_enabled():
@@ -221,10 +275,18 @@ def compute_lm_step(self, errors, J, lambd):
h = torch.pinverse(A.cpu()).cuda() @ b
return h.flatten()
- def optimize_lm(self, TWO_9d, TCW_9d,
- optimize_cameras=True,
- n_iterations=50, residuals_threshold=25,
- lambd0=1e-3, L_down=9, L_up=11, eps=1e-5):
+ def optimize_lm(
+ self,
+ TWO_9d,
+ TCW_9d,
+ optimize_cameras=True,
+ n_iterations=50,
+ residuals_threshold=25,
+ lambd0=1e-3,
+ L_down=9,
+ L_up=11,
+ eps=1e-5,
+ ):
# See http://people.duke.edu/~hpgavin/ce281/lm.pdf
n_params_TWO = TWO_9d.numel()
n_params_TCW = TCW_9d.numel()
@@ -236,15 +298,18 @@ def optimize_lm(self, TWO_9d, TCW_9d,
done = False
history = defaultdict(list)
for n in range(n_iterations):
-
if not prev_iter_is_update:
- errors, loss, J_TWO, J_TCW = self.forward_jacobian(TWO_9d, TCW_9d, residuals_threshold)
-
- history['TWO_9d'].append(TWO_9d)
- history['TCW_9d'].append(TCW_9d)
- history['loss'].append(loss)
- history['lambda'].append(lambd)
- history['iteration'].append(n)
+ errors, loss, J_TWO, J_TCW = self.forward_jacobian(
+ TWO_9d,
+ TCW_9d,
+ residuals_threshold,
+ )
+
+ history["TWO_9d"].append(TWO_9d)
+ history["TCW_9d"].append(TCW_9d)
+ history["loss"].append(loss)
+ history["lambda"].append(lambd)
+ history["iteration"].append(n)
if done:
break
@@ -261,7 +326,11 @@ def optimize_lm(self, TWO_9d, TCW_9d,
else:
TCW_9d_updated = TCW_9d
- errors, next_loss, J_TWO, J_TCW = self.forward_jacobian(TWO_9d_updated, TCW_9d_updated, residuals_threshold)
+ errors, next_loss, J_TWO, J_TCW = self.forward_jacobian(
+ TWO_9d_updated,
+ TCW_9d_updated,
+ residuals_threshold,
+ )
rho = loss - next_loss
if rho.abs() < eps:
@@ -303,19 +372,19 @@ def make_scene_infos(self, TWO_9d, TCW_9d):
cameras = tc.PandasTensorCollection(
infos=self.cam_infos,
TWC=TWC,
- K=self.K
+ K=self.K,
)
return objects, cameras
def convert_history(self, history):
- history['objects'] = []
- history['cameras'] = []
- for n in range(len(history['iteration'])):
- TWO_9d = history['TWO_9d'][n]
- TCW_9d = history['TCW_9d'][n]
+ history["objects"] = []
+ history["cameras"] = []
+ for n in range(len(history["iteration"])):
+ TWO_9d = history["TWO_9d"][n]
+ TCW_9d = history["TCW_9d"][n]
objects, cameras = self.make_scene_infos(TWO_9d, TCW_9d)
- history['objects'].append(objects)
- history['cameras'].append(cameras)
+ history["objects"].append(objects)
+ history["cameras"].append(cameras)
return history
def solve(self, sample_n_init=1, **lm_kwargs):
@@ -324,12 +393,17 @@ def solve(self, sample_n_init=1, **lm_kwargs):
timer_misc = Timer()
timer_init.start()
- TWO_9d_init, TCW_9d_init = self.robust_initialization_TWO_TCW(n_init=sample_n_init)
+ TWO_9d_init, TCW_9d_init = self.robust_initialization_TWO_TCW(
+ n_init=sample_n_init,
+ )
timer_init.pause()
timer_opt.start()
TWO_9d_opt, TCW_9d_opt, history = self.optimize_lm(
- TWO_9d_init, TCW_9d_init, **lm_kwargs)
+ TWO_9d_init,
+ TCW_9d_init,
+ **lm_kwargs,
+ )
timer_opt.pause()
timer_misc.start()
@@ -338,14 +412,14 @@ def solve(self, sample_n_init=1, **lm_kwargs):
history = self.convert_history(history)
timer_misc.pause()
- outputs = dict(
- objects_init=objects_init,
- cameras_init=cameras_init,
- objects=objects,
- cameras=cameras,
- history=history,
- time_init=timer_init.stop(),
- time_opt=timer_opt.stop(),
- time_misc=timer_misc.stop(),
- )
+ outputs = {
+ "objects_init": objects_init,
+ "cameras_init": cameras_init,
+ "objects": objects,
+ "cameras": cameras,
+ "history": history,
+ "time_init": timer_init.stop(),
+ "time_opt": timer_opt.stop(),
+ "time_misc": timer_misc.stop(),
+ }
return outputs
diff --git a/happypose/pose_estimators/cosypose/cosypose/multiview/ransac.py b/happypose/pose_estimators/cosypose/cosypose/multiview/ransac.py
index 9217e544..c26bbdfc 100644
--- a/happypose/pose_estimators/cosypose/cosypose/multiview/ransac.py
+++ b/happypose/pose_estimators/cosypose/cosypose/multiview/ransac.py
@@ -1,24 +1,26 @@
-import torch
from collections import defaultdict
-import pandas as pd
-import numpy as np
+
import cosypose_cext
+import numpy as np
+import pandas as pd
+import torch
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
-import cosypose.utils.tensor_collection as tc
-from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import invert_T
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
from happypose.pose_estimators.cosypose.cosypose.lib3d.symmetric_distances import (
- symmetric_distance_batched_fast, expand_ids_for_symmetry, scatter_argmin)
-
+ expand_ids_for_symmetry,
+ scatter_argmin,
+ symmetric_distance_batched_fast,
+)
+from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import invert_T
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
from happypose.pose_estimators.cosypose.cosypose.utils.timer import Timer
+
logger = get_logger(__name__)
-def estimate_camera_poses(TC1Oa, TC2Ob, labels_ab,
- TC1Og, TC2Od, labels_gd,
- mesh_db):
+def estimate_camera_poses(TC1Oa, TC2Ob, labels_ab, TC1Og, TC2Od, labels_gd, mesh_db):
# Assume (TC1Oa and TC2Ob), (TC1Og, TC2Od) are the same.
# Notation differ from the paper, paper(code)
# we have 1(a), 2(b), a(alpha), b(beta), g(gamma), d(delta)
@@ -39,7 +41,8 @@ def estimate_camera_poses(TC1Oa, TC2Ob, labels_ab,
dists, _ = dist_fn(
TC1Og[ids_expand],
(TC1Oa[ids_expand] @ sym_expand @ TObC2[ids_expand]) @ TC2Od[ids_expand],
- labels_gd[ids_expand], mesh_db
+ labels_gd[ids_expand],
+ mesh_db,
)
min_ids = scatter_argmin(dists, ids_expand)
S_Oa_star = meshes_ab.symmetries[torch.arange(len(min_ids)), sym_ids[min_ids]]
@@ -48,18 +51,26 @@ def estimate_camera_poses(TC1Oa, TC2Ob, labels_ab,
def estimate_camera_poses_batch(candidates, seeds, mesh_db, bsz=1024):
- n_tot = len(seeds['match1_cand1'])
+ n_tot = len(seeds["match1_cand1"])
n_batch = max(1, int(n_tot // bsz))
ids_split = np.array_split(np.arange(n_tot), n_batch)
all_TC1C2 = []
for ids in ids_split:
- labels_ab = candidates.infos['label'].iloc[seeds['match1_cand1'][ids]].values
- labels_gd = candidates.infos['label'].iloc[seeds['match2_cand1'][ids]].values
- TC1Oa = candidates.poses[seeds['match1_cand1'][ids]]
- TC2Ob = candidates.poses[seeds['match1_cand2'][ids]]
- TC1Og = candidates.poses[seeds['match2_cand1'][ids]]
- TC2Od = candidates.poses[seeds['match2_cand2'][ids]]
- TC1C2 = estimate_camera_poses(TC1Oa, TC2Ob, labels_ab, TC1Og, TC2Od, labels_gd, mesh_db)
+ labels_ab = candidates.infos["label"].iloc[seeds["match1_cand1"][ids]].values
+ labels_gd = candidates.infos["label"].iloc[seeds["match2_cand1"][ids]].values
+ TC1Oa = candidates.poses[seeds["match1_cand1"][ids]]
+ TC2Ob = candidates.poses[seeds["match1_cand2"][ids]]
+ TC1Og = candidates.poses[seeds["match2_cand1"][ids]]
+ TC2Od = candidates.poses[seeds["match2_cand2"][ids]]
+ TC1C2 = estimate_camera_poses(
+ TC1Oa,
+ TC2Ob,
+ labels_ab,
+ TC1Og,
+ TC2Od,
+ labels_gd,
+ mesh_db,
+ )
all_TC1C2.append(TC1C2)
return torch.cat(all_TC1C2, dim=0)
@@ -74,27 +85,27 @@ def score_tmatches(TC1Oa, TC2Ob, TC1C2, labels_ab, mesh_db):
def score_tmaches_batch(candidates, tmatches, TC1C2, mesh_db, bsz=4096):
- n_tot = len(tmatches['cand1'])
+ n_tot = len(tmatches["cand1"])
n_batch = max(1, int(n_tot // bsz))
ids_split = np.array_split(np.arange(n_tot), n_batch)
all_dists = []
for ids in ids_split:
- labels = candidates.infos['label'].iloc[tmatches['cand1'][ids]].values
- TC1Oa = candidates.poses[tmatches['cand1'][ids]]
- TC2Ob = candidates.poses[tmatches['cand2'][ids]]
- TC1C2_ = TC1C2[tmatches['hypothesis_id'][ids]]
+ labels = candidates.infos["label"].iloc[tmatches["cand1"][ids]].values
+ TC1Oa = candidates.poses[tmatches["cand1"][ids]]
+ TC2Ob = candidates.poses[tmatches["cand2"][ids]]
+ TC1C2_ = TC1C2[tmatches["hypothesis_id"][ids]]
dists = score_tmatches(TC1Oa, TC2Ob, TC1C2_, labels, mesh_db)
all_dists.append(dists)
return torch.cat(all_dists, dim=0)
def scene_level_matching(candidates, inliers):
- cand1 = inliers['inlier_matches_cand1']
- cand2 = inliers['inlier_matches_cand2']
+ cand1 = inliers["inlier_matches_cand1"]
+ cand2 = inliers["inlier_matches_cand2"]
edges = np.ones((len(cand1)), dtype=np.int)
n_cand = len(candidates)
graph = csr_matrix((edges, (cand1, cand2)), shape=(n_cand, n_cand))
- n_components, ids = connected_components(graph, directed=True, connection='strong')
+ n_components, ids = connected_components(graph, directed=True, connection="strong")
component_size = defaultdict(lambda: 0)
for idx in ids:
@@ -104,71 +115,78 @@ def scene_level_matching(candidates, inliers):
obj_n_cand[n] = component_size[idx]
cand_infos = candidates.infos.copy()
- cand_infos['component_id'] = ids
+ cand_infos["component_id"] = ids
keep_cand = obj_n_cand >= 2
cand_infos = cand_infos[keep_cand].reset_index(drop=True)
- for n, (comp_id, group) in enumerate(cand_infos.groupby('component_id')):
- cand_infos.loc[group.index, 'component_id'] = n
- cand_infos = cand_infos.rename(columns={'component_id': 'obj_id'})
+ for n, (_comp_id, group) in enumerate(cand_infos.groupby("component_id")):
+ cand_infos.loc[group.index, "component_id"] = n
+ cand_infos = cand_infos.rename(columns={"component_id": "obj_id"})
- matched_candidates = tc.PandasTensorCollection(infos=cand_infos,
- poses=candidates.poses[cand_infos['cand_id'].values])
+ matched_candidates = tc.PandasTensorCollection(
+ infos=cand_infos,
+ poses=candidates.poses[cand_infos["cand_id"].values],
+ )
return matched_candidates
def make_obj_infos(matched_candidates):
- scene_infos = matched_candidates.infos.loc[:, ['obj_id', 'score', 'label']].copy()
- gb = scene_infos.groupby('obj_id')
- scene_infos['n_cand'] = gb['score'].transform(len).astype(np.int)
- scene_infos['score'] = gb['score'].transform(np.sum)
+ scene_infos = matched_candidates.infos.loc[:, ["obj_id", "score", "label"]].copy()
+ gb = scene_infos.groupby("obj_id")
+ scene_infos["n_cand"] = gb["score"].transform(len).astype(np.int)
+ scene_infos["score"] = gb["score"].transform(np.sum)
scene_infos = gb.first().reset_index(drop=False)
return scene_infos
def get_best_viewpair_pose_est(TC1C2, seeds, inliers):
- best_hypotheses = inliers['best_hypotheses']
+ best_hypotheses = inliers["best_hypotheses"]
TC1C2_best = TC1C2[best_hypotheses]
- view1 = seeds['view1'][best_hypotheses]
- view2 = seeds['view2'][best_hypotheses]
- infos = pd.DataFrame(dict(view1=view1, view2=view2))
+ view1 = seeds["view1"][best_hypotheses]
+ view2 = seeds["view2"][best_hypotheses]
+ infos = pd.DataFrame({"view1": view1, "view2": view2})
return tc.PandasTensorCollection(infos=infos, TC1C2=TC1C2_best)
-def multiview_candidate_matching(candidates, mesh_db,
- model_bsz=1e3,
- score_bsz=1e5,
- dist_threshold=0.02,
- cameras=None,
- n_ransac_iter=20,
- n_min_inliers=3):
+def multiview_candidate_matching(
+ candidates,
+ mesh_db,
+ model_bsz=1e3,
+ score_bsz=1e5,
+ dist_threshold=0.02,
+ cameras=None,
+ n_ransac_iter=20,
+ n_min_inliers=3,
+):
timer_models = Timer()
timer_score = Timer()
timer_misc = Timer()
known_poses = cameras is not None
if known_poses:
- logger.debug('Using known camera poses.')
+ logger.debug("Using known camera poses.")
n_ransac_iter = 1
else:
- logger.debug('Estimating camera poses using RANSAC.')
+ logger.debug("Estimating camera poses using RANSAC.")
timer_misc.start()
- candidates.infos['cand_id'] = np.arange(len(candidates))
+ candidates.infos["cand_id"] = np.arange(len(candidates))
timer_misc.pause()
timer_models.start()
seeds, tmatches = cosypose_cext.make_ransac_infos(
- candidates.infos['view_id'].values.tolist(), candidates.infos['label'].values.tolist(),
- n_ransac_iter, 0,
+ candidates.infos["view_id"].values.tolist(),
+ candidates.infos["label"].values.tolist(),
+ n_ransac_iter,
+ 0,
)
if not known_poses:
TC1C2 = estimate_camera_poses_batch(candidates, seeds, mesh_db, bsz=model_bsz)
else:
- cameras.infos['idx'] = np.arange(len(cameras))
- view_map = cameras.infos.set_index('view_id')
- TWC1 = cameras.TWC[view_map.loc[seeds['view1'], 'idx'].values]
- TWC2 = cameras.TWC[view_map.loc[seeds['view2'], 'idx'].values]
+ cameras.infos["idx"] = np.arange(len(cameras))
+ view_map = cameras.infos.set_index("view_id")
+ TWC1 = cameras.TWC[view_map.loc[seeds["view1"], "idx"].values]
+ TWC2 = cameras.TWC[view_map.loc[seeds["view2"], "idx"].values]
TC1C2 = invert_T(TWC1) @ TWC2
timer_models.pause()
@@ -176,9 +194,14 @@ def multiview_candidate_matching(candidates, mesh_db,
dists = score_tmaches_batch(candidates, tmatches, TC1C2, mesh_db, bsz=score_bsz)
inliers = cosypose_cext.find_ransac_inliers(
- seeds['view1'], seeds['view2'],
- tmatches['hypothesis_id'], tmatches['cand1'], tmatches['cand2'],
- dists.cpu().numpy(), dist_threshold, n_min_inliers,
+ seeds["view1"],
+ seeds["view2"],
+ tmatches["hypothesis_id"],
+ tmatches["cand1"],
+ tmatches["cand2"],
+ dists.cpu().numpy(),
+ dist_threshold,
+ n_min_inliers,
)
timer_score.pause()
@@ -188,12 +211,12 @@ def multiview_candidate_matching(candidates, mesh_db,
scene_infos = make_obj_infos(filtered_candidates)
timer_misc.pause()
- outputs = dict(
- filtered_candidates=filtered_candidates,
- scene_infos=scene_infos,
- pairs_TC1C2=pairs_TC1C2,
- time_models=timer_models.stop(),
- time_score=timer_score.stop(),
- time_misc=timer_misc.stop(),
- )
+ outputs = {
+ "filtered_candidates": filtered_candidates,
+ "scene_infos": scene_infos,
+ "pairs_TC1C2": pairs_TC1C2,
+ "time_models": timer_models.stop(),
+ "time_score": timer_score.stop(),
+ "time_misc": timer_misc.stop(),
+ }
return outputs
diff --git a/happypose/pose_estimators/cosypose/cosypose/recording/bop_recording_scene.py b/happypose/pose_estimators/cosypose/cosypose/recording/bop_recording_scene.py
index dd2f8df6..19425f0d 100644
--- a/happypose/pose_estimators/cosypose/cosypose/recording/bop_recording_scene.py
+++ b/happypose/pose_estimators/cosypose/cosypose/recording/bop_recording_scene.py
@@ -1,13 +1,21 @@
from pathlib import Path
-import pinocchio as pin
+
import numpy as np
+import pinocchio as pin
from happypose.pose_estimators.cosypose.cosypose.config import ASSET_DIR
-
-from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_urdf_dataset, make_texture_dataset
-
-from happypose.pose_estimators.cosypose.cosypose.simulator import BaseScene, Body, Camera
-from happypose.pose_estimators.cosypose.cosypose.simulator import BodyCache, TextureCache, apply_random_textures
+from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import (
+ make_texture_dataset,
+ make_urdf_dataset,
+)
+from happypose.pose_estimators.cosypose.cosypose.simulator import (
+ BaseScene,
+ Body,
+ BodyCache,
+ Camera,
+ TextureCache,
+ apply_random_textures,
+)
class SamplerError(Exception):
@@ -16,22 +24,23 @@ def __init__(self, *args, **kwargs):
class BopRecordingScene(BaseScene):
- def __init__(self,
- urdf_ds='ycbv',
- texture_ds='shapenet',
- domain_randomization=True,
- textures_on_objects=False,
- n_objects_interval=(2, 5),
- objects_xyz_interval=((0.0, -0.5, -0.15), (1.0, 0.5, 0.15)),
- proba_falling=0.5,
- resolution=(640, 480),
- focal_interval=((515, 515), (515, 515)),
- camera_distance_interval=(0.5, 1.5),
- border_check=True,
- gpu_renderer=True,
- n_textures_cache=50,
- seed=0):
-
+ def __init__(
+ self,
+ urdf_ds="ycbv",
+ texture_ds="shapenet",
+ domain_randomization=True,
+ textures_on_objects=False,
+ n_objects_interval=(2, 5),
+ objects_xyz_interval=((0.0, -0.5, -0.15), (1.0, 0.5, 0.15)),
+ proba_falling=0.5,
+ resolution=(640, 480),
+ focal_interval=((515, 515), (515, 515)),
+ camera_distance_interval=(0.5, 1.5),
+ border_check=True,
+ gpu_renderer=True,
+ n_textures_cache=50,
+ seed=0,
+ ):
# Objects
self.urdf_ds = make_urdf_dataset(urdf_ds)
self.n_objects_interval = n_objects_interval
@@ -60,11 +69,11 @@ def __init__(self,
self.seed = seed
def load_background(self):
- cage_path = Path(ASSET_DIR / 'cage' / 'cage.urdf').as_posix()
+ cage_path = Path(ASSET_DIR / "cage" / "cage.urdf").as_posix()
self.background = Body.load(cage_path, client_id=self.client_id, scale=3.0)
def load_plane(self):
- plane_path = Path(ASSET_DIR / 'plane' / 'plane.urdf').as_posix()
+ plane_path = Path(ASSET_DIR / "plane" / "plane.urdf").as_posix()
self.plane = Body.load(plane_path, client_id=self.client_id, scale=2.0)
def background_pos_orn_rand(self):
@@ -84,7 +93,10 @@ def load_body_cache(self):
def load_texture_cache(self):
assert self._connected
- ds_texture_ids = self.np_random.choice(len(self.texture_ds), size=self.n_textures_cache)
+ ds_texture_ids = self.np_random.choice(
+ len(self.texture_ds),
+ size=self.n_textures_cache,
+ )
self.texture_cache = TextureCache(self.texture_ds, self.client_id)
[self.texture_cache.get_texture(idx) for idx in ds_texture_ids]
@@ -112,8 +124,11 @@ def visuals_rand(self):
if self.textures_on_objects and self.np_random.rand() > 0.9:
bodies = self.bodies + bodies
for body in bodies:
- apply_random_textures(body, self.texture_cache.cached_textures,
- np_random=self.np_random)
+ apply_random_textures(
+ body,
+ self.texture_cache.cached_textures,
+ np_random=self.np_random,
+ )
def objects_pos_orn_rand(self):
self.hide_plane()
@@ -145,14 +160,20 @@ def sample_camera(self):
K[1, 2] = H / 2
K[2, 2] = 1.0
rho = self.np_random.uniform(*self.camera_distance_interval)
- theta = self.np_random.uniform(0, np.pi/2)
+ theta = self.np_random.uniform(0, np.pi / 2)
phi = self.np_random.uniform(0, 2 * np.pi)
roll = self.np_random.uniform(-10, 10) * np.pi / 180
box_center = np.mean(self.objects_xyz_interval, axis=0)
cam = Camera(resolution=self.resolution, client_id=self._client_id)
cam.set_intrinsic_K(K)
- cam.set_extrinsic_spherical(target=box_center, rho=rho, phi=phi, theta=theta, roll=roll)
+ cam.set_extrinsic_spherical(
+ target=box_center,
+ rho=rho,
+ phi=phi,
+ theta=theta,
+ roll=roll,
+ )
return cam
def camera_rand(self):
@@ -162,31 +183,38 @@ def camera_rand(self):
while not valid:
cam = self.sample_camera()
cam_obs_ = cam.get_state()
- mask = cam_obs_['mask']
+ mask = cam_obs_["mask"]
mask[mask == self.background._body_id] = 0
mask[mask == 255] = 0
- uniqs = np.unique(cam_obs_['mask'])
+ uniqs = np.unique(cam_obs_["mask"])
valid = len(uniqs) == len(self.bodies) + 1
if valid and self.border_check:
for uniq in uniqs[uniqs > 0]:
- H, W = cam_obs_['mask'].shape
- ids = np.where(cam_obs_['mask'] == uniq)
- if ids[0].max() == H-1 or ids[0].min() == 0 or \
- ids[1].max() == W-1 or ids[1].min() == 0:
+ H, W = cam_obs_["mask"].shape
+ ids = np.where(cam_obs_["mask"] == uniq)
+ if (
+ ids[0].max() == H - 1
+ or ids[0].min() == 0
+ or ids[1].max() == W - 1
+ or ids[1].min() == 0
+ ):
valid = False
N += 1
if N >= 3:
- raise SamplerError('Cannot sample valid camera configuration.')
+ msg = "Cannot sample valid camera configuration."
+ raise SamplerError(msg)
self.cam_obs = cam_obs_
- def _full_rand(self,
- objects=True,
- objects_pos_orn=True,
- falling=False,
- background_pos_orn=True,
- camera=True,
- visuals=True):
+ def _full_rand(
+ self,
+ objects=True,
+ objects_pos_orn=True,
+ falling=False,
+ background_pos_orn=True,
+ camera=True,
+ visuals=True,
+ ):
if background_pos_orn:
self.background_pos_orn_rand()
if objects:
@@ -205,13 +233,13 @@ def get_state(self):
objects = []
for body in self.bodies:
state = body.get_state()
- state['id_in_segm'] = body._body_id
+ state["id_in_segm"] = body._body_id
objects.append(state)
- state = dict(
- camera=self.cam_obs,
- objects=objects,
- )
+ state = {
+ "camera": self.cam_obs,
+ "objects": objects,
+ }
return state
def try_rand(self):
@@ -221,20 +249,21 @@ def try_rand(self):
falling = self.np_random.rand() < self.proba_falling
visuals = self.domain_randomization
background_pos_orn = self.domain_randomization
- kwargs = dict(
- objects=True,
- objects_pos_orn=True,
- falling=falling,
- background_pos_orn=background_pos_orn,
- camera=True,
- visuals=visuals,
- )
+ kwargs = {
+ "objects": True,
+ "objects_pos_orn": True,
+ "falling": falling,
+ "background_pos_orn": background_pos_orn,
+ "camera": True,
+ "visuals": visuals,
+ }
self._full_rand(**kwargs)
return
except SamplerError as e:
print("Sampling failed: ", e)
n_iter += 1
- raise SamplerError('Sampling failed')
+ msg = "Sampling failed"
+ raise SamplerError(msg)
def make_new_scene(self):
self.try_rand()
diff --git a/happypose/pose_estimators/cosypose/cosypose/recording/record_chunk.py b/happypose/pose_estimators/cosypose/cosypose/recording/record_chunk.py
index c87f9f01..d4d22853 100644
--- a/happypose/pose_estimators/cosypose/cosypose/recording/record_chunk.py
+++ b/happypose/pose_estimators/cosypose/cosypose/recording/record_chunk.py
@@ -1,14 +1,15 @@
-import numpy as np
-import pickle
import importlib
+import pickle
+from io import BytesIO
from pathlib import Path
+
+import numpy as np
from PIL import Image
-from io import BytesIO
def get_cls(cls_str):
- split = cls_str.split('.')
- mod_name = '.'.join(split[:-1])
+ split = cls_str.split(".")
+ mod_name = ".".join(split[:-1])
cls_name = split[-1]
mod = importlib.import_module(mod_name)
return getattr(mod, cls_name)
@@ -24,28 +25,32 @@ def _serialize_im(im, **pil_kwargs):
def _get_dic_buf(state, jpeg=True, jpeg_compression=100):
if jpeg:
- pil_kwargs = dict(format='JPEG', quality=jpeg_compression)
+ pil_kwargs = {"format": "JPEG", "quality": jpeg_compression}
else:
- pil_kwargs = dict(format='PNG', quality=100)
-
- del state['camera']['depth']
- state['camera']['rgb'] = _serialize_im(state['camera']['rgb'], **pil_kwargs)
- state['camera']['mask'] = _serialize_im(state['camera']['mask'], format='PNG', quality=100)
+ pil_kwargs = {"format": "PNG", "quality": 100}
+
+ del state["camera"]["depth"]
+ state["camera"]["rgb"] = _serialize_im(state["camera"]["rgb"], **pil_kwargs)
+ state["camera"]["mask"] = _serialize_im(
+ state["camera"]["mask"],
+ format="PNG",
+ quality=100,
+ )
return pickle.dumps(state)
def write_chunk(state_list, seed, ds_dir):
- key_to_buf = dict()
- dumps_dir = Path(ds_dir) / 'dumps'
+ key_to_buf = {}
+ dumps_dir = Path(ds_dir) / "dumps"
dumps_dir.mkdir(exist_ok=True)
for n, state in enumerate(state_list):
- key = f'{seed}-{n}'
+ key = f"{seed}-{n}"
key_to_buf[key] = _get_dic_buf(state)
# Write on disk
for key, buf in key_to_buf.items():
- (dumps_dir / key).with_suffix('.pkl').write_bytes(buf)
+ (dumps_dir / key).with_suffix(".pkl").write_bytes(buf)
keys = list(key_to_buf.keys())
return keys
@@ -55,7 +60,7 @@ def record_chunk(ds_dir, scene_cls, scene_kwargs, seed, n_frames):
ds_dir.mkdir(exist_ok=True)
scene_cls = get_cls(scene_cls)
- scene_kwargs['seed'] = seed
+ scene_kwargs["seed"] = seed
scene = scene_cls(**scene_kwargs)
scene.connect(load=True)
diff --git a/happypose/pose_estimators/cosypose/cosypose/recording/record_dataset.py b/happypose/pose_estimators/cosypose/cosypose/recording/record_dataset.py
index b436e215..f4a91bf9 100644
--- a/happypose/pose_estimators/cosypose/cosypose/recording/record_dataset.py
+++ b/happypose/pose_estimators/cosypose/cosypose/recording/record_dataset.py
@@ -1,40 +1,56 @@
-import yaml
import pickle
import shutil
from pathlib import Path
-from tqdm import tqdm
+import dask
+import yaml
from dask_jobqueue import SLURMCluster
from distributed import Client, LocalCluster, as_completed
-from .record_chunk import record_chunk
+from tqdm import tqdm
-from happypose.pose_estimators.cosypose.cosypose.config import CONDA_BASE_DIR, CONDA_ENV, PROJECT_DIR, DASK_LOGS_DIR
-from happypose.pose_estimators.cosypose.cosypose.config import SLURM_GPU_QUEUE, SLURM_QOS, DASK_NETWORK_INTERFACE
+from happypose.pose_estimators.cosypose.cosypose.config import (
+ CONDA_BASE_DIR,
+ CONDA_ENV,
+ DASK_LOGS_DIR,
+ DASK_NETWORK_INTERFACE,
+ PROJECT_DIR,
+ SLURM_GPU_QUEUE,
+ SLURM_QOS,
+)
-import dask
-dask.config.set({'distributed.scheduler.allowed-failures': 1000})
+from .record_chunk import record_chunk
+dask.config.set({"distributed.scheduler.allowed-failures": 1000})
-def record_dataset_dask(client, ds_dir,
- scene_cls, scene_kwargs,
- n_chunks, n_frames_per_chunk,
- start_seed=0, resume=False):
+def record_dataset_dask(
+ client,
+ ds_dir,
+ scene_cls,
+ scene_kwargs,
+ n_chunks,
+ n_frames_per_chunk,
+ start_seed=0,
+ resume=False,
+):
seeds = set(range(start_seed, start_seed + n_chunks))
if resume:
- done_seeds = (ds_dir / 'seeds_recorded.txt').read_text().strip().split('\n')
+ done_seeds = (ds_dir / "seeds_recorded.txt").read_text().strip().split("\n")
seeds = set(seeds) - set(map(int, done_seeds))
- all_keys = (ds_dir / 'keys_recorded.txt').read_text().strip().split('\n')
+ all_keys = (ds_dir / "keys_recorded.txt").read_text().strip().split("\n")
else:
all_keys = []
seeds = tuple(seeds)
future_kwargs = []
for seed in seeds:
- kwargs = dict(ds_dir=ds_dir, seed=seed,
- n_frames=n_frames_per_chunk,
- scene_cls=scene_cls,
- scene_kwargs=scene_kwargs)
+ kwargs = {
+ "ds_dir": ds_dir,
+ "seed": seed,
+ "n_frames": n_frames_per_chunk,
+ "scene_cls": scene_cls,
+ "scene_kwargs": scene_kwargs,
+ }
future_kwargs.append(kwargs)
futures = []
@@ -42,20 +58,26 @@ def record_dataset_dask(client, ds_dir,
futures.append(client.submit(record_chunk, **kwargs))
iterator = as_completed(futures)
- unit = 'frame'
+ unit = "frame"
unit_scale = n_frames_per_chunk
n_futures = len(future_kwargs)
- tqdm_iterator = tqdm(iterator, total=n_futures, unit_scale=unit_scale, unit=unit, ncols=80)
+ tqdm_iterator = tqdm(
+ iterator,
+ total=n_futures,
+ unit_scale=unit_scale,
+ unit=unit,
+ ncols=80,
+ )
- seeds_file = open(ds_dir / 'seeds_recorded.txt', 'a')
- keys_file = open(ds_dir / 'keys_recorded.txt', 'a')
+ seeds_file = open(ds_dir / "seeds_recorded.txt", "a")
+ keys_file = open(ds_dir / "keys_recorded.txt", "a")
for future in tqdm_iterator:
keys, seed = future.result()
all_keys += keys
- seeds_file.write(f'{seed}\n')
+ seeds_file.write(f"{seed}\n")
seeds_file.flush()
- keys_file.write('\n'.join(keys) + '\n')
+ keys_file.write("\n".join(keys) + "\n")
keys_file.flush()
client.cancel(future)
@@ -66,72 +88,80 @@ def record_dataset_dask(client, ds_dir,
def record_dataset(args):
if args.resume and not args.overwrite:
- resume_args = yaml.load((Path(args.resume) / 'config.yaml').read_text())
- vars(args).update({k: v for k, v in vars(resume_args).items() if 'resume' not in k})
+ resume_args = yaml.load((Path(args.resume) / "config.yaml").read_text())
+ vars(args).update(
+ {k: v for k, v in vars(resume_args).items() if "resume" not in k},
+ )
args.ds_dir = Path(args.ds_dir)
if args.ds_dir.is_dir():
if args.resume:
- assert (args.ds_dir / 'seeds_recorded.txt').exists()
+ assert (args.ds_dir / "seeds_recorded.txt").exists()
elif args.overwrite:
shutil.rmtree(args.ds_dir)
else:
- raise ValueError('There is already a dataset with this name')
+ msg = "There is already a dataset with this name"
+ raise ValueError(msg)
args.ds_dir.mkdir(exist_ok=True)
- (args.ds_dir / 'config.yaml').write_text(yaml.dump(args))
+ (args.ds_dir / "config.yaml").write_text(yaml.dump(args))
log_dir = DASK_LOGS_DIR.as_posix()
if args.distributed:
env_extra = [
- 'module purge',
- f'source {CONDA_BASE_DIR}/bin/activate',
- f'conda activate {CONDA_ENV}',
- f'cd {PROJECT_DIR}',
- f'eval $(python -m job_runner.assign_gpu)',
- 'export OMP_NUM_THREADS=1',
- 'export MKL_NUM_THREADS=1',
+ "module purge",
+ f"source {CONDA_BASE_DIR}/bin/activate",
+ f"conda activate {CONDA_ENV}",
+ f"cd {PROJECT_DIR}",
+ "eval $(python -m job_runner.assign_gpu)",
+ "export OMP_NUM_THREADS=1",
+ "export MKL_NUM_THREADS=1",
]
n_processes = args.n_processes_per_gpu
- log_path = (DASK_LOGS_DIR / 'all_logs.out').as_posix()
-
- cluster = SLURMCluster(cores=n_processes,
- memory='160 GB',
- queue=f'{SLURM_GPU_QUEUE}',
- walltime='10:00:00',
- processes=n_processes,
- local_directory=log_dir,
- log_directory=log_dir,
- nthreads=1,
- memory_monitor_interval='1000000000000000s',
- env_extra=env_extra,
- job_extra=[
- f'--qos={SLURM_QOS}',
- '--hint=nomultithread',
- '--gres=gpu:1',
- f'--output={log_path}',
- f'--error={log_path}'
- ],
- interface=DASK_NETWORK_INTERFACE)
+ log_path = (DASK_LOGS_DIR / "all_logs.out").as_posix()
+
+ cluster = SLURMCluster(
+ cores=n_processes,
+ memory="160 GB",
+ queue=f"{SLURM_GPU_QUEUE}",
+ walltime="10:00:00",
+ processes=n_processes,
+ local_directory=log_dir,
+ log_directory=log_dir,
+ nthreads=1,
+ memory_monitor_interval="1000000000000000s",
+ env_extra=env_extra,
+ job_extra=[
+ f"--qos={SLURM_QOS}",
+ "--hint=nomultithread",
+ "--gres=gpu:1",
+ f"--output={log_path}",
+ f"--error={log_path}",
+ ],
+ interface=DASK_NETWORK_INTERFACE,
+ )
cluster.adapt(minimum_jobs=args.n_workers, maximum_jobs=args.n_workers)
else:
cluster = LocalCluster(local_directory=log_dir, processes=True, n_workers=4)
client = Client(cluster)
- all_keys = record_dataset_dask(client=client, ds_dir=args.ds_dir,
- scene_kwargs=args.scene_kwargs,
- scene_cls=args.scene_cls,
- start_seed=0,
- n_chunks=int(args.n_chunks),
- n_frames_per_chunk=int(args.n_frames_per_chunk),
- resume=args.resume)
+ all_keys = record_dataset_dask(
+ client=client,
+ ds_dir=args.ds_dir,
+ scene_kwargs=args.scene_kwargs,
+ scene_cls=args.scene_cls,
+ start_seed=0,
+ n_chunks=int(args.n_chunks),
+ n_frames_per_chunk=int(args.n_frames_per_chunk),
+ resume=args.resume,
+ )
n_train = int(args.train_ratio * len(all_keys))
train_keys, val_keys = all_keys[:n_train], all_keys[n_train:]
- Path(args.ds_dir / 'keys.pkl').write_bytes(pickle.dumps(all_keys))
- Path(args.ds_dir / 'train_keys.pkl').write_bytes(pickle.dumps(train_keys))
- Path(args.ds_dir / 'val_keys.pkl').write_bytes(pickle.dumps(val_keys))
+ Path(args.ds_dir / "keys.pkl").write_bytes(pickle.dumps(all_keys))
+ Path(args.ds_dir / "train_keys.pkl").write_bytes(pickle.dumps(train_keys))
+ Path(args.ds_dir / "val_keys.pkl").write_bytes(pickle.dumps(val_keys))
client.close()
del cluster
diff --git a/happypose/pose_estimators/cosypose/cosypose/rendering/bullet_batch_renderer.py b/happypose/pose_estimators/cosypose/cosypose/rendering/bullet_batch_renderer.py
index a0c1a067..00af66df 100644
--- a/happypose/pose_estimators/cosypose/cosypose/rendering/bullet_batch_renderer.py
+++ b/happypose/pose_estimators/cosypose/cosypose/rendering/bullet_batch_renderer.py
@@ -1,41 +1,57 @@
-import torch
-import numpy as np
import multiprocessing
+import numpy as np
+import torch
+
from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import invert_T
+
from .bullet_scene_renderer import BulletSceneRenderer
def init_renderer(urdf_ds, preload=True, gpu_renderer=True):
- renderer = BulletSceneRenderer(urdf_ds=urdf_ds,
- preload_cache=preload,
- background_color=(0, 0, 0),
- gpu_renderer=gpu_renderer)
+ renderer = BulletSceneRenderer(
+ urdf_ds=urdf_ds,
+ preload_cache=preload,
+ background_color=(0, 0, 0),
+ gpu_renderer=gpu_renderer,
+ )
return renderer
-def worker_loop(worker_id, in_queue, out_queue, object_set, preload=True, gpu_renderer=True):
+def worker_loop(
+ worker_id,
+ in_queue,
+ out_queue,
+ object_set,
+ preload=True,
+ gpu_renderer=True,
+):
renderer = init_renderer(object_set, preload=preload, gpu_renderer=gpu_renderer)
while True:
kwargs = in_queue.get()
if kwargs is None:
return
- obj_infos = kwargs['obj_infos']
- cam_infos = kwargs['cam_infos']
- render_depth = kwargs['render_depth']
- is_valid = np.isfinite(obj_infos[0]['TWO']).all() \
- and np.isfinite(cam_infos[0]['TWC']).all() \
- and np.isfinite(cam_infos[0]['K']).all()
+ obj_infos = kwargs["obj_infos"]
+ cam_infos = kwargs["cam_infos"]
+ render_depth = kwargs["render_depth"]
+ is_valid = (
+ np.isfinite(obj_infos[0]["TWO"]).all()
+ and np.isfinite(cam_infos[0]["TWC"]).all()
+ and np.isfinite(cam_infos[0]["K"]).all()
+ )
if is_valid:
- cam_obs = renderer.render_scene(cam_infos=cam_infos, obj_infos=obj_infos,
- render_depth=render_depth)
- images = np.stack([d['rgb'] for d in cam_obs])
- depth = np.stack([d['depth'] for d in cam_obs]) if render_depth else None
+ cam_obs = renderer.render_scene(
+ cam_infos=cam_infos,
+ obj_infos=obj_infos,
+ render_depth=render_depth,
+ )
+ images = np.stack([d["rgb"] for d in cam_obs])
+ depth = np.stack([d["depth"] for d in cam_obs]) if render_depth else None
else:
- res = cam_infos[0]['resolution']
+ res = cam_infos[0]["resolution"]
images = np.zeros((1, min(res), max(res), 3), dtype=np.uint8)
depth = np.zeros((1, min(res), max(res)), dtype=np.float32)
- out_queue.put((kwargs['data_id'], images, depth))
+ out_queue.put((kwargs["data_id"], images, depth))
class BulletBatchRenderer:
@@ -55,23 +71,29 @@ def render(self, obj_infos, TCO, K, resolution=(240, 320), render_depth=False):
# NOTE: Could be faster with pytorch 3.8's sharedmemory
for n in np.arange(bsz):
- obj_info = dict(
- name=obj_infos[n]['name'],
- TWO=np.eye(4)
- )
- cam_info = dict(
- resolution=resolution,
- K=K[n],
- TWC=TOC[n],
- )
- kwargs = dict(cam_infos=[cam_info], obj_infos=[obj_info], render_depth=render_depth)
+ obj_info = {
+ "name": obj_infos[n]["name"],
+ "TWO": np.eye(4),
+ }
+ cam_info = {
+ "resolution": resolution,
+ "K": K[n],
+ "TWC": TOC[n],
+ }
+ kwargs = {
+ "cam_infos": [cam_info],
+ "obj_infos": [obj_info],
+ "render_depth": render_depth,
+ }
if self.n_workers > 0:
- kwargs['data_id'] = n
+ kwargs["data_id"] = n
self.in_queue.put(kwargs)
else:
cam_obs = self.plotters[0].render_scene(**kwargs)
- images = np.stack([d['rgb'] for d in cam_obs])
- depth = np.stack([d['depth'] for d in cam_obs]) if render_depth else None
+ images = np.stack([d["rgb"] for d in cam_obs])
+ depth = (
+ np.stack([d["depth"] for d in cam_obs]) if render_depth else None
+ )
self.out_queue.put((n, images, depth))
images = [None for _ in np.arange(bsz)]
@@ -82,14 +104,22 @@ def render(self, obj_infos, TCO, K, resolution=(240, 320), render_depth=False):
if render_depth:
depths[data_id] = depth[0]
if self.gpu_renderer:
- images = torch.as_tensor(np.stack(images, axis=0)).pin_memory().cuda(non_blocking=True)
+ images = (
+ torch.as_tensor(np.stack(images, axis=0))
+ .pin_memory()
+ .cuda(non_blocking=True)
+ )
else:
images = torch.as_tensor(np.stack(images, axis=0))
images = images.float().permute(0, 3, 1, 2) / 255
if render_depth:
if self.gpu_renderer:
- depths = torch.as_tensor(np.stack(depths, axis=0)).pin_memory().cuda(non_blocking=True)
+ depths = (
+ torch.as_tensor(np.stack(depths, axis=0))
+ .pin_memory()
+ .cuda(non_blocking=True)
+ )
else:
depths = torch.as_tensor(np.stack(depths, axis=0))
depths = depths.float()
@@ -104,21 +134,27 @@ def init_plotters(self, preload_cache, gpu_renderer):
if self.n_workers > 0:
for n in range(self.n_workers):
- plotter = multiprocessing.Process(target=worker_loop,
- kwargs=dict(worker_id=n,
- in_queue=self.in_queue,
- out_queue=self.out_queue,
- object_set=self.object_set,
- preload=preload_cache,
- gpu_renderer=gpu_renderer))
+ plotter = multiprocessing.Process(
+ target=worker_loop,
+ kwargs={
+ "worker_id": n,
+ "in_queue": self.in_queue,
+ "out_queue": self.out_queue,
+ "object_set": self.object_set,
+ "preload": preload_cache,
+ "gpu_renderer": gpu_renderer,
+ },
+ )
plotter.start()
self.plotters.append(plotter)
else:
- self.plotters = [init_renderer(self.object_set, preload_cache, gpu_renderer)]
+ self.plotters = [
+ init_renderer(self.object_set, preload_cache, gpu_renderer),
+ ]
def stop(self):
if self.n_workers > 0:
- for p in self.plotters:
+ for _p in self.plotters:
self.in_queue.put(None)
for p in self.plotters:
p.join()
diff --git a/happypose/pose_estimators/cosypose/cosypose/rendering/bullet_scene_renderer.py b/happypose/pose_estimators/cosypose/cosypose/rendering/bullet_scene_renderer.py
index c344d56e..8e7c46c4 100644
--- a/happypose/pose_estimators/cosypose/cosypose/rendering/bullet_scene_renderer.py
+++ b/happypose/pose_estimators/cosypose/cosypose/rendering/bullet_scene_renderer.py
@@ -1,22 +1,24 @@
import numpy as np
import pybullet as pb
-from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_urdf_dataset
-from happypose.pose_estimators.cosypose.cosypose.lib3d import Transform
-
+from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import (
+ make_urdf_dataset,
+)
from happypose.pose_estimators.cosypose.cosypose.simulator.base_scene import BaseScene
from happypose.pose_estimators.cosypose.cosypose.simulator.caching import BodyCache
from happypose.pose_estimators.cosypose.cosypose.simulator.camera import Camera
+from happypose.toolbox.lib3d.transform import Transform
class BulletSceneRenderer(BaseScene):
- def __init__(self,
- urdf_ds='ycbv',
- preload_cache=False,
- background_color=(0, 0, 0),
- gpu_renderer=True,
- gui=False):
-
+ def __init__(
+ self,
+ urdf_ds="ycbv",
+ preload_cache=False,
+ background_color=(0, 0, 0),
+ gpu_renderer=True,
+ gui=False,
+ ):
self.urdf_ds = make_urdf_dataset(urdf_ds)
self.connect(gpu_renderer=gpu_renderer, gui=gui)
self.body_cache = BodyCache(self.urdf_ds, self.client_id)
@@ -25,37 +27,42 @@ def __init__(self,
self.background_color = background_color
def setup_scene(self, obj_infos):
- labels = [obj['name'] for obj in obj_infos]
+ labels = [obj["name"] for obj in obj_infos]
bodies = self.body_cache.get_bodies_by_labels(labels)
- for (obj_info, body) in zip(obj_infos, bodies):
- TWO = Transform(obj_info['TWO'])
+ for obj_info, body in zip(obj_infos, bodies):
+ TWO = Transform(obj_info["TWO"])
body.pose = TWO
- color = obj_info.get('color', None)
+ color = obj_info.get("color", None)
if color is not None:
- pb.changeVisualShape(body.body_id, -1, physicsClientId=0, rgbaColor=color)
+ pb.changeVisualShape(
+ body.body_id,
+ -1,
+ physicsClientId=0,
+ rgbaColor=color,
+ )
return bodies
def render_images(self, cam_infos, render_depth=False):
cam_obs = []
for cam_info in cam_infos:
- K = cam_info['K']
- TWC = Transform(cam_info['TWC'])
- resolution = cam_info['resolution']
+ K = cam_info["K"]
+ TWC = Transform(cam_info["TWC"])
+ resolution = cam_info["resolution"]
cam = Camera(resolution=resolution, client_id=self.client_id)
cam.set_intrinsic_K(K)
cam.set_extrinsic_T(TWC)
cam_obs_ = cam.get_state()
if self.background_color is not None:
- im = cam_obs_['rgb']
- mask = cam_obs_['mask']
+ im = cam_obs_["rgb"]
+ mask = cam_obs_["mask"]
im[np.logical_or(mask < 0, mask == 255)] = self.background_color
if render_depth:
- depth = cam_obs_['depth']
- near, far = cam_obs_['near'], cam_obs_['far']
+ depth = cam_obs_["depth"]
+ near, far = cam_obs_["near"], cam_obs_["far"]
z_n = 2 * depth - 1
z_e = 2 * near * far / (far + near - z_n * (far - near))
- z_e[np.logical_or(mask < 0, mask == 255)] = 0.
- cam_obs_['depth'] = z_e
+ z_e[np.logical_or(mask < 0, mask == 255)] = 0.0
+ cam_obs_["depth"] = z_e
cam_obs.append(cam_obs_)
return cam_obs
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/convert_models_to_urdf.py b/happypose/pose_estimators/cosypose/cosypose/scripts/convert_models_to_urdf.py
index f18ad0df..3467c065 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/convert_models_to_urdf.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/convert_models_to_urdf.py
@@ -1,40 +1,46 @@
-from pathlib import Path
import argparse
import shutil
-from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
+from pathlib import Path
+
from tqdm import tqdm
-from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_object_dataset
-from happypose.pose_estimators.cosypose.cosypose.libmesh import ply_to_obj, obj_to_urdf
-from happypose.pose_estimators.cosypose.cosypose.libmesh import downsample_obj
+from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
+from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import (
+ make_object_dataset,
+)
+from happypose.pose_estimators.cosypose.cosypose.libmesh import (
+ downsample_obj,
+ obj_to_urdf,
+ ply_to_obj,
+)
def convert_obj_dataset_to_urdfs(obj_ds_name, texture_size=(1024, 1024), n_faces=None):
obj_dataset = make_object_dataset(obj_ds_name)
- urdf_dir = LOCAL_DATA_DIR / 'urdfs' / obj_ds_name
+ urdf_dir = LOCAL_DATA_DIR / "urdfs" / obj_ds_name
urdf_dir.mkdir(exist_ok=True, parents=True)
for n in tqdm(range(len(obj_dataset))):
obj = obj_dataset[n]
- ply_path = Path(obj['mesh_path'])
- out_dir = urdf_dir / obj['label']
+ ply_path = Path(obj["mesh_path"])
+ out_dir = urdf_dir / obj["label"]
out_dir.mkdir(exist_ok=True)
- obj_path = out_dir / ply_path.with_suffix('.obj').name
+ obj_path = out_dir / ply_path.with_suffix(".obj").name
ply_to_obj(ply_path, obj_path, texture_size=texture_size)
if n_faces is not None:
- downsample_path = obj_path.parent / 'downsample.obj'
+ downsample_path = obj_path.parent / "downsample.obj"
downsample_obj(obj_path, downsample_path, n_faces=n_faces)
shutil.copy(downsample_path, obj_path)
- obj_to_urdf(obj_path, obj_path.with_suffix('.urdf'))
+ obj_to_urdf(obj_path, obj_path.with_suffix(".urdf"))
def main():
- parser = argparse.ArgumentParser('3D ply object models -> pybullet URDF converter')
- parser.add_argument('--models', default='', type=str)
+ parser = argparse.ArgumentParser("3D ply object models -> pybullet URDF converter")
+ parser.add_argument("--models", default="", type=str)
args = parser.parse_args()
convert_obj_dataset_to_urdfs(args.models)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/example_multigpu.py b/happypose/pose_estimators/cosypose/cosypose/scripts/example_multigpu.py
index 69f2c3b0..53338423 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/example_multigpu.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/example_multigpu.py
@@ -1,17 +1,25 @@
import os
+
import torch
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import init_distributed_mode, get_world_size, get_tmp_dir, get_rank
+
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_tmp_dir,
+ get_world_size,
+ init_distributed_mode,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+
logger = get_logger(__name__)
-if __name__ == '__main__':
+if __name__ == "__main__":
init_distributed_mode()
proc_id = get_rank()
n_tasks = get_world_size()
- n_cpus = os.environ.get('N_CPUS', 'not specified')
- logger.info(f'Number of processes (=num GPUs): {n_tasks}')
- logger.info(f'Process ID: {proc_id}')
- logger.info(f'TMP Directory for this job: {get_tmp_dir()}')
- logger.info(f'GPU CUDA ID: {torch.cuda.current_device()}')
- logger.info(f'Max number of CPUs for this process: {n_cpus}')
+ n_cpus = os.environ.get("N_CPUS", "not specified")
+ logger.info(f"Number of processes (=num GPUs): {n_tasks}")
+ logger.info(f"Process ID: {proc_id}")
+ logger.info(f"TMP Directory for this job: {get_tmp_dir()}")
+ logger.info(f"GPU CUDA ID: {torch.cuda.current_device()}")
+ logger.info(f"Max number of CPUs for this process: {n_cpus}")
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/make_ycbv_compat_models.py b/happypose/pose_estimators/cosypose/cosypose/scripts/make_ycbv_compat_models.py
index c5109de5..70e0cbd6 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/make_ycbv_compat_models.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/make_ycbv_compat_models.py
@@ -1,35 +1,39 @@
-import trimesh
+import json
import shutil
from copy import deepcopy
+
import numpy as np
-import json
-from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
+import trimesh
+from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
-if __name__ == '__main__':
- ds_dir = LOCAL_DATA_DIR / 'bop_datasets/ycbv'
- models_dir = ds_dir / 'models'
+if __name__ == "__main__":
+ ds_dir = LOCAL_DATA_DIR / "bop_datasets/ycbv"
+ models_dir = ds_dir / "models"
- orig_names = (ds_dir / 'ycbv_friendly_names.txt').read_text()
- orig_names = {str(int(l.split(' ')[0])): l.split(' ')[1] for l in orig_names.split('\n')[:-1]}
+ orig_names = (ds_dir / "ycbv_friendly_names.txt").read_text()
+ orig_names = {
+ str(int(line.split(" ")[0])): line.split(" ")[1]
+ for line in orig_names.split("\n")[:-1]
+ }
- infos = json.loads((models_dir / 'models_info.json').read_text())
+ infos = json.loads((models_dir / "models_info.json").read_text())
compat_infos = deepcopy(infos)
# Consider these 2 objects asymmetric
for str_obj_id, orig_name in orig_names.items():
- if orig_name == '002_master_chef_can' or orig_name == '040_large_marker':
- compat_infos[str_obj_id]['symmetries_discrete'] = []
- compat_infos[str_obj_id]['symmetries_continuous'] = []
+ if orig_name == "002_master_chef_can" or orig_name == "040_large_marker":
+ compat_infos[str_obj_id]["symmetries_discrete"] = []
+ compat_infos[str_obj_id]["symmetries_continuous"] = []
- bop_compat_dir = ds_dir / 'models_bop-compat'
+ bop_compat_dir = ds_dir / "models_bop-compat"
bop_compat_dir.mkdir(exist_ok=True)
for file_path in models_dir.iterdir():
shutil.copy(file_path, bop_compat_dir / file_path.name)
- (bop_compat_dir / 'models_info.json').write_text(json.dumps(compat_infos))
+ (bop_compat_dir / "models_info.json").write_text(json.dumps(compat_infos))
- l_offsets = (ds_dir / 'offsets.txt').read_text().split('\n')[:-1]
- offsets = dict()
+ l_offsets = (ds_dir / "offsets.txt").read_text().split("\n")[:-1]
+ offsets = {}
for l_n in l_offsets:
obj_id, offset = l_n[:2], l_n[3:]
obj_id = int(obj_id)
@@ -37,13 +41,13 @@
offsets[str(obj_id)] = offset
# Models used in the original evaluation
- bop_compat_eval_dir = ds_dir / 'models_bop-compat_eval'
+ bop_compat_eval_dir = ds_dir / "models_bop-compat_eval"
bop_compat_eval_dir.mkdir(exist_ok=True)
- (bop_compat_eval_dir / 'models_info.json').write_text(json.dumps(compat_infos))
+ (bop_compat_eval_dir / "models_info.json").write_text(json.dumps(compat_infos))
for obj_id, orig_name in orig_names.items():
- xyz = (ds_dir / 'models_original' / orig_name / 'points.xyz').read_text()
- xyz = xyz.split('\n')[:-1]
- xyz = [list(map(float, xyz_n.split(' '))) for xyz_n in xyz]
+ xyz = (ds_dir / "models_original" / orig_name / "points.xyz").read_text()
+ xyz = xyz.split("\n")[:-1]
+ xyz = [list(map(float, xyz_n.split(" "))) for xyz_n in xyz]
vertices = np.array(xyz) * 1000 + offsets[obj_id]
mesh = trimesh.Trimesh(vertices=vertices)
- mesh.export(bop_compat_eval_dir / f'obj_{int(obj_id):06d}.ply')
+ mesh.export(bop_compat_eval_dir / f"obj_{int(obj_id):06d}.ply")
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/preprocess_bop_dataset.py b/happypose/pose_estimators/cosypose/cosypose/scripts/preprocess_bop_dataset.py
index a5ccc8a4..80dc6533 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/preprocess_bop_dataset.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/preprocess_bop_dataset.py
@@ -1,19 +1,22 @@
-from tqdm import tqdm
from PIL import Image
-from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_scene_dataset
+from tqdm import tqdm
+
+from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import (
+ make_scene_dataset,
+)
-if __name__ == '__main__':
- ds_name = 'itodd.pbr'
+if __name__ == "__main__":
+ ds_name = "itodd.pbr"
scene_ds = make_scene_dataset(ds_name)
for n in tqdm(range(len(scene_ds))):
rgb, mask, state = scene_ds[n]
- row = state['frame_info']
- scene_id, view_id = row['scene_id'], row['view_id']
+ row = state["frame_info"]
+ scene_id, view_id = row["scene_id"], row["view_id"]
view_id = int(view_id)
- view_id_str = f'{view_id:06d}'
- scene_id_str = f'{int(scene_id):06d}'
+ view_id_str = f"{view_id:06d}"
+ scene_id_str = f"{int(scene_id):06d}"
scene_dir = scene_ds.base_dir / scene_id_str
- p = scene_dir / 'mask_visib' / f'{view_id_str}_all.png'
+ p = scene_dir / "mask_visib" / f"{view_id_str}_all.png"
Image.fromarray(mask.numpy()).save(p)
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop20_eval.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop20_eval.py
index f8bcafc9..6f4591b1 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop20_eval.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop20_eval.py
@@ -1,43 +1,50 @@
-import subprocess
-import shutil
-from tqdm import tqdm
-import torch
-import os
import argparse
+import os
+import shutil
+import subprocess
import sys
from pathlib import Path
-from happypose.pose_estimators.cosypose.cosypose.config import PROJECT_DIR, RESULTS_DIR
+import torch
+from tqdm import tqdm
+
+from happypose.pose_estimators.cosypose.cosypose.config import PROJECT_DIR, RESULTS_DIR
-TOOLKIT_DIR = Path(PROJECT_DIR / 'deps' / 'bop_toolkit_challenge')
-EVAL_SCRIPT_PATH = TOOLKIT_DIR / 'scripts/eval_bop19.py'
-DUMMY_EVAL_SCRIPT_PATH = TOOLKIT_DIR / 'scripts/eval_bop19_dummy.py'
+TOOLKIT_DIR = Path(PROJECT_DIR / "deps" / "bop_toolkit_challenge")
+EVAL_SCRIPT_PATH = TOOLKIT_DIR / "scripts/eval_bop19.py"
+DUMMY_EVAL_SCRIPT_PATH = TOOLKIT_DIR / "scripts/eval_bop19_dummy.py"
sys.path.append(TOOLKIT_DIR.as_posix())
from bop_toolkit_lib import inout # noqa
-# from bop_toolkit_lib.config import results_path as BOP_RESULTS_PATH # noqa
+
+# from bop_toolkit_lib.config import results_path as BOP_RESULTS_PATH
def main():
- parser = argparse.ArgumentParser('Bop evaluation')
- parser.add_argument('--result_id', default='', type=str)
- parser.add_argument('--method', default='', type=str)
- parser.add_argument('--dataset', default='', type=str)
- parser.add_argument('--split', default='test', type=str)
- parser.add_argument('--csv_path', default='', type=str)
- parser.add_argument('--dummy', action='store_true')
- parser.add_argument('--convert_only', action='store_true')
+ parser = argparse.ArgumentParser("Bop evaluation")
+ parser.add_argument("--result_id", default="", type=str)
+ parser.add_argument("--method", default="", type=str)
+ parser.add_argument("--dataset", default="", type=str)
+ parser.add_argument("--split", default="test", type=str)
+ parser.add_argument("--csv_path", default="", type=str)
+ parser.add_argument("--dummy", action="store_true")
+ parser.add_argument("--convert_only", action="store_true")
args = parser.parse_args()
run_evaluation(args)
def run_evaluation(args):
- results_path = RESULTS_DIR / args.result_id / f'dataset={args.dataset}' / 'results.pth.tar'
+ results_path = (
+ RESULTS_DIR / args.result_id / f"dataset={args.dataset}" / "results.pth.tar"
+ )
csv_path = args.csv_path
convert_results(results_path, csv_path, method=args.method)
if not args.dummy:
- shutil.copy(csv_path, RESULTS_DIR / args.result_id / f'dataset={args.dataset}' / csv_path.name)
+ shutil.copy(
+ csv_path,
+ RESULTS_DIR / args.result_id / f"dataset={args.dataset}" / csv_path.name,
+ )
if not args.convert_only:
run_bop_evaluation(csv_path, dummy=args.dummy)
@@ -45,7 +52,7 @@ def run_evaluation(args):
def convert_results(results_path, out_csv_path, method):
- predictions = torch.load(results_path)['predictions']
+ predictions = torch.load(results_path)["predictions"]
predictions = predictions[method]
print("Predictions from:", results_path)
print("Method:", method)
@@ -57,14 +64,18 @@ def convert_results(results_path, out_csv_path, method):
t = TCO_n[:3, -1] * 1e3 # m -> mm conversion
R = TCO_n[:3, :3]
row = predictions.infos.iloc[n]
- obj_id = int(row.label.split('_')[-1])
+ obj_id = int(row.label.split("_")[-1])
score = row.score
time = row.time
- pred = dict(scene_id=row.scene_id,
- im_id=row.view_id,
- obj_id=obj_id,
- score=score,
- t=t, R=R, time=time)
+ pred = {
+ "scene_id": row.scene_id,
+ "im_id": row.view_id,
+ "obj_id": obj_id,
+ "score": score,
+ "t": t,
+ "R": R,
+ "time": time,
+ }
preds.append(pred)
print("Wrote:", out_csv_path)
inout.save_bop_results(out_csv_path, preds)
@@ -73,17 +84,25 @@ def convert_results(results_path, out_csv_path, method):
def run_bop_evaluation(filename, dummy=False):
myenv = os.environ.copy()
- myenv['PYTHONPATH'] = TOOLKIT_DIR.as_posix()
- myenv['COSYPOSE_DIR'] = PROJECT_DIR.as_posix()
+ myenv["PYTHONPATH"] = TOOLKIT_DIR.as_posix()
+ myenv["COSYPOSE_DIR"] = PROJECT_DIR.as_posix()
if dummy:
script_path = DUMMY_EVAL_SCRIPT_PATH
else:
script_path = EVAL_SCRIPT_PATH
- subprocess.call(['python', script_path.as_posix(),
- '--renderer_type', 'python',
- '--result_filenames', filename],
- env=myenv, cwd=TOOLKIT_DIR.as_posix())
-
-
-if __name__ == '__main__':
+ subprocess.call(
+ [
+ "python",
+ script_path.as_posix(),
+ "--renderer_type",
+ "python",
+ "--result_filenames",
+ filename,
+ ],
+ env=myenv,
+ cwd=TOOLKIT_DIR.as_posix(),
+ )
+
+
+if __name__ == "__main__":
main()
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop20_eval_multi.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop20_eval_multi.py
index 840aefaa..fb675691 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop20_eval_multi.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop20_eval_multi.py
@@ -1,45 +1,53 @@
import argparse
import multiprocessing
from copy import deepcopy
-from happypose.pose_estimators.cosypose.cosypose.config import RESULTS_DIR
+
from happypose.pose_estimators.cosypose.cosypose.bop_config import BOP_CONFIG
-from happypose.pose_estimators.cosypose.cosypose.scripts.run_bop20_eval import run_evaluation
-from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
+from happypose.pose_estimators.cosypose.cosypose.config import (
+ LOCAL_DATA_DIR,
+ RESULTS_DIR,
+)
+from happypose.pose_estimators.cosypose.cosypose.scripts.run_bop20_eval import (
+ run_evaluation,
+)
def main():
- parser = argparse.ArgumentParser('Bop multi evaluation')
- parser.add_argument('--result_id', default='', type=str)
- parser.add_argument('--method', default='maskrcnn_detections/refiner/iteration=4', type=str)
- parser.add_argument('--dummy', action='store_true')
- parser.add_argument('--convert_only', action='store_true')
+ parser = argparse.ArgumentParser("Bop multi evaluation")
+ parser.add_argument("--result_id", default="", type=str)
+ parser.add_argument(
+ "--method",
+ default="maskrcnn_detections/refiner/iteration=4",
+ type=str,
+ )
+ parser.add_argument("--dummy", action="store_true")
+ parser.add_argument("--convert_only", action="store_true")
args = parser.parse_args()
result_dir = RESULTS_DIR / args.result_id
result_ds_dirs = list(result_dir.iterdir())
- processes = dict()
+ processes = {}
for result_ds_dir in result_ds_dirs:
this_cfg = deepcopy(args)
- ds_name = str(result_ds_dir).split('=')[-1]
- has_test_set = len(BOP_CONFIG[ds_name]['test_ds_name']) > 0
+ ds_name = str(result_ds_dir).split("=")[-1]
+ has_test_set = len(BOP_CONFIG[ds_name]["test_ds_name"]) > 0
dummy = args.dummy or not has_test_set
convert_only = args.convert_only or not has_test_set
this_cfg.dummy = dummy
this_cfg.convert_only = convert_only
this_cfg.dataset = ds_name
- this_cfg.split = 'test'
+ this_cfg.split = "test"
- start_str = 'challenge2020dummy' if args.dummy else 'challenge2020'
- result_id_int = args.result_id.split('-')[-1]
- csv_path = LOCAL_DATA_DIR / 'bop_predictions_csv'
- split = 'test'
- csv_path = csv_path / f'{start_str}-{result_id_int}_{ds_name}-{split}.csv'
+ start_str = "challenge2020dummy" if args.dummy else "challenge2020"
+ result_id_int = args.result_id.split("-")[-1]
+ csv_path = LOCAL_DATA_DIR / "bop_predictions_csv"
+ split = "test"
+ csv_path = csv_path / f"{start_str}-{result_id_int}_{ds_name}-{split}.csv"
csv_path.parent.mkdir(exist_ok=True)
this_cfg.csv_path = csv_path
- proc = multiprocessing.Process(target=run_evaluation,
- kwargs=dict(args=this_cfg))
+ proc = multiprocessing.Process(target=run_evaluation, kwargs={"args": this_cfg})
proc.start()
processes[ds_name] = (this_cfg, proc)
@@ -50,12 +58,14 @@ def main():
print(f"{'-'*80}")
for ds_name, (cfg, _) in processes.items():
- results_dir = LOCAL_DATA_DIR / 'bop_eval_outputs' / cfg.csv_path.with_suffix('').name
- scores_path = results_dir / 'scores_bop19.json'
+ results_dir = (
+ LOCAL_DATA_DIR / "bop_eval_outputs" / cfg.csv_path.with_suffix("").name
+ )
+ scores_path = results_dir / "scores_bop19.json"
print(f"{'-'*80}")
- print(f'{ds_name}: {scores_path}')
+ print(f"{ds_name}: {scores_path}")
print(scores_path.read_text())
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_eval.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_eval.py
index 524c3fe7..b8704400 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_eval.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_eval.py
@@ -1,36 +1,48 @@
-import subprocess
-from tqdm import tqdm
-import torch
-import numpy as np
-import os
import argparse
+import os
+import subprocess
import sys
-from happypose.pose_estimators.cosypose.cosypose.config import PROJECT_DIR, LOCAL_DATA_DIR, RESULTS_DIR, MEMORY, BOP_TOOLKIT_DIR
-SISO_SCRIPT_PATH = BOP_TOOLKIT_DIR / 'scripts/eval_siso.py'
-VIVO_SCRIPT_PATH = BOP_TOOLKIT_DIR / 'scripts/eval_vivo.py'
+import numpy as np
+import torch
+from tqdm import tqdm
+
+from happypose.pose_estimators.cosypose.cosypose.config import (
+ BOP_TOOLKIT_DIR,
+ LOCAL_DATA_DIR,
+ MEMORY,
+ PROJECT_DIR,
+ RESULTS_DIR,
+)
+
+SISO_SCRIPT_PATH = BOP_TOOLKIT_DIR / "scripts/eval_siso.py"
+VIVO_SCRIPT_PATH = BOP_TOOLKIT_DIR / "scripts/eval_vivo.py"
sys.path.append(BOP_TOOLKIT_DIR.as_posix())
from bop_toolkit_lib import inout # noqa
def main():
- parser = argparse.ArgumentParser('Bop evaluation')
- parser.add_argument('--result_id', default='', type=str)
- parser.add_argument('--method', default='', type=str)
- parser.add_argument('--vivo', action='store_true')
+ parser = argparse.ArgumentParser("Bop evaluation")
+ parser.add_argument("--result_id", default="", type=str)
+ parser.add_argument("--method", default="", type=str)
+ parser.add_argument("--vivo", action="store_true")
args = parser.parse_args()
n_rand = np.random.randint(1e6)
- csv_path = LOCAL_DATA_DIR / 'bop_predictions_csv' / f'cosypose{n_rand}-eccv2020_tless-test-primesense.csv'
+ csv_path = (
+ LOCAL_DATA_DIR
+ / "bop_predictions_csv"
+ / f"cosypose{n_rand}-eccv2020_tless-test-primesense.csv"
+ )
csv_path.parent.mkdir(exist_ok=True)
- results_path = RESULTS_DIR / args.result_id / 'results.pth.tar'
+ results_path = RESULTS_DIR / args.result_id / "results.pth.tar"
convert_results(results_path, csv_path, method=args.method)
run_evaluation(csv_path, args.vivo)
@MEMORY.cache
def convert_results(results_path, out_csv_path, method):
- predictions = torch.load(results_path)['predictions'][method]
+ predictions = torch.load(results_path)["predictions"][method]
print("Predictions from:", results_path)
print("Method:", method)
print("Number of predictions: ", len(predictions))
@@ -41,14 +53,18 @@ def convert_results(results_path, out_csv_path, method):
t = TCO_n[:3, -1] * 1e3 # m -> mm conversion
R = TCO_n[:3, :3]
row = predictions.infos.iloc[n]
- obj_id = int(row.label.split('_')[-1])
+ obj_id = int(row.label.split("_")[-1])
score = row.score
time = -1.0
- pred = dict(scene_id=row.scene_id,
- im_id=row.view_id,
- obj_id=obj_id,
- score=score,
- t=t, R=R, time=time)
+ pred = {
+ "scene_id": row.scene_id,
+ "im_id": row.view_id,
+ "obj_id": obj_id,
+ "score": score,
+ "t": t,
+ "R": R,
+ "time": time,
+ }
preds.append(pred)
print("Wrote:", out_csv_path)
inout.save_bop_results(out_csv_path, preds)
@@ -61,14 +77,22 @@ def run_evaluation(filename, is_vivo):
else:
script_path = SISO_SCRIPT_PATH
myenv = os.environ.copy()
- myenv['PYTHONPATH'] = BOP_TOOLKIT_DIR.as_posix()
- myenv['COSYPOSE_DIR'] = PROJECT_DIR.as_posix()
+ myenv["PYTHONPATH"] = BOP_TOOLKIT_DIR.as_posix()
+ myenv["COSYPOSE_DIR"] = PROJECT_DIR.as_posix()
print(script_path)
- subprocess.call(['python', script_path.as_posix(),
- '--renderer_type', 'python',
- '--result_filename', filename],
- env=myenv, cwd=BOP_TOOLKIT_DIR.as_posix())
+ subprocess.call(
+ [
+ "python",
+ script_path.as_posix(),
+ "--renderer_type",
+ "python",
+ "--result_filename",
+ filename,
+ ],
+ env=myenv,
+ cwd=BOP_TOOLKIT_DIR.as_posix(),
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_inference.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_inference.py
index b6db3c3f..997c955e 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_inference.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_inference.py
@@ -1,44 +1,77 @@
+import argparse
import logging
-import numpy as np
from copy import deepcopy
from pathlib import Path
-import yaml
-import torch
-import argparse
-from happypose.pose_estimators.cosypose.cosypose.bop_config import BOP_CONFIG
-from happypose.pose_estimators.cosypose.cosypose.bop_config import PBR_COARSE, PBR_REFINER, PBR_DETECTORS
-from happypose.pose_estimators.cosypose.cosypose.bop_config import SYNT_REAL_COARSE, SYNT_REAL_REFINER, SYNT_REAL_DETECTORS
-from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_scene_dataset, make_object_dataset
+import numpy as np
+import torch
+import torch.multiprocessing
+import yaml
-from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import format_results
+from happypose.pose_estimators.cosypose.cosypose.bop_config import (
+ BOP_CONFIG,
+ PBR_COARSE,
+ PBR_DETECTORS,
+ PBR_REFINER,
+ SYNT_REAL_COARSE,
+ SYNT_REAL_DETECTORS,
+ SYNT_REAL_REFINER,
+)
+from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR, RESULTS_DIR
+from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import (
+ make_object_dataset,
+ make_scene_dataset,
+)
+from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( # noqa: E501
+ MultiViewWrapper,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.bop_predictions import ( # noqa: E501
+ BopPredictionRunner,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import (
+ format_results,
+)
+from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector
+from happypose.pose_estimators.cosypose.cosypose.integrated.icp_refiner import (
+ ICPRefiner,
+)
+from happypose.pose_estimators.cosypose.cosypose.integrated.multiview_predictor import (
+ MultiviewScenePredictor,
+)
+from happypose.pose_estimators.cosypose.cosypose.integrated.pose_predictor import (
+ CoarseRefinePosePredictor,
+)
# Pose estimator
-from happypose.pose_estimators.cosypose.cosypose.lib3d.rigid_mesh_database import MeshDataBase
-from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import create_model_refiner, create_model_coarse
-from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import check_update_config as check_update_config_pose
-from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import BulletBatchRenderer
-from happypose.pose_estimators.cosypose.cosypose.integrated.pose_predictor import CoarseRefinePosePredictor
-from happypose.pose_estimators.cosypose.cosypose.integrated.icp_refiner import ICPRefiner
-from happypose.pose_estimators.cosypose.cosypose.integrated.multiview_predictor import MultiviewScenePredictor
-from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import MultiViewWrapper
+from happypose.pose_estimators.cosypose.cosypose.lib3d.rigid_mesh_database import (
+ MeshDataBase,
+)
+from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import ( # noqa: E501
+ BulletBatchRenderer,
+)
# Detection
-from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import create_model_detector
-from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import check_update_config as check_update_config_detector
-from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector
-
-from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.bop_predictions import BopPredictionRunner
-
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_tmp_dir, get_rank
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import init_distributed_mode
-
-from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR, RESULTS_DIR
+from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import (
+ check_update_config as check_update_config_detector,
+)
+from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import (
+ create_model_detector,
+)
+from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
+ check_update_config as check_update_config_pose,
+)
+from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
+ create_model_coarse,
+ create_model_refiner,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_tmp_dir,
+ init_distributed_mode,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
-
-import torch.multiprocessing
-torch.multiprocessing.set_sharing_strategy('file_system')
+torch.multiprocessing.set_sharing_strategy("file_system")
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@@ -47,12 +80,12 @@
def load_detector(run_id):
run_dir = EXP_DIR / run_id
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.FullLoader)
cfg = check_update_config_detector(cfg)
label_to_category_id = cfg.label_to_category_id
model = create_model_detector(cfg, len(label_to_category_id))
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar')
- ckpt = ckpt['state_dict']
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar")
+ ckpt = ckpt["state_dict"]
model.load_state_dict(ckpt)
model = model.cuda().eval()
model.cfg = cfg
@@ -63,7 +96,7 @@ def load_detector(run_id):
def load_pose_models(coarse_run_id, refiner_run_id=None, n_workers=8):
run_dir = EXP_DIR / coarse_run_id
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.FullLoader)
cfg = check_update_config_pose(cfg)
object_ds = make_object_dataset(cfg.object_ds_name)
@@ -73,16 +106,20 @@ def load_pose_models(coarse_run_id, refiner_run_id=None, n_workers=8):
def load_model(run_id):
if run_id is None:
- return
+ return None
run_dir = EXP_DIR / run_id
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.FullLoader)
cfg = check_update_config_pose(cfg)
if cfg.train_refiner:
- model = create_model_refiner(cfg, renderer=renderer, mesh_db=mesh_db_batched)
+ model = create_model_refiner(
+ cfg,
+ renderer=renderer,
+ mesh_db=mesh_db_batched,
+ )
else:
model = create_model_coarse(cfg, renderer=renderer, mesh_db=mesh_db_batched)
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar')
- ckpt = ckpt['state_dict']
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar")
+ ckpt = ckpt["state_dict"]
model.load_state_dict(ckpt)
model = model.cuda().eval()
model.cfg = cfg
@@ -91,8 +128,10 @@ def load_model(run_id):
coarse_model = load_model(coarse_run_id)
refiner_model = load_model(refiner_run_id)
- model = CoarseRefinePosePredictor(coarse_model=coarse_model,
- refiner_model=refiner_model)
+ model = CoarseRefinePosePredictor(
+ coarse_model=coarse_model,
+ refiner_model=refiner_model,
+ )
return model, mesh_db
@@ -120,11 +159,17 @@ def run_inference(args):
scene_ds_multi = MultiViewWrapper(scene_ds, n_views=args.n_views)
if args.n_groups is not None:
- scene_ds_multi.frame_index = scene_ds_multi.frame_index[:args.n_groups].reset_index(drop=True)
-
- pred_kwargs = dict()
- pred_runner = BopPredictionRunner(scene_ds_multi, batch_size=args.pred_bsz,
- cache_data=False, n_workers=args.n_workers)
+ scene_ds_multi.frame_index = scene_ds_multi.frame_index[
+ : args.n_groups
+ ].reset_index(drop=True)
+
+ pred_kwargs = {}
+ pred_runner = BopPredictionRunner(
+ scene_ds_multi,
+ batch_size=args.pred_bsz,
+ cache_data=False,
+ n_workers=args.n_workers,
+ )
detector = load_detector(args.detector_run_id)
pose_predictor, mesh_db = load_pose_models(
@@ -136,31 +181,35 @@ def run_inference(args):
icp_refiner = None
if args.icp:
renderer = pose_predictor.coarse_model.renderer
- icp_refiner = ICPRefiner(mesh_db,
- renderer=renderer,
- resolution=pose_predictor.coarse_model.cfg.input_resize)
+ icp_refiner = ICPRefiner(
+ mesh_db,
+ renderer=renderer,
+ resolution=pose_predictor.coarse_model.cfg.input_resize,
+ )
mv_predictor = None
if args.n_views > 1:
mv_predictor = MultiviewScenePredictor(mesh_db)
- pred_kwargs.update({
- 'maskrcnn_detections': dict(
- detector=detector,
- pose_predictor=pose_predictor,
- n_coarse_iterations=args.n_coarse_iterations,
- n_refiner_iterations=args.n_refiner_iterations,
- icp_refiner=icp_refiner,
- mv_predictor=mv_predictor,
- )
- })
+ pred_kwargs.update(
+ {
+ "maskrcnn_detections": {
+ "detector": detector,
+ "pose_predictor": pose_predictor,
+ "n_coarse_iterations": args.n_coarse_iterations,
+ "n_refiner_iterations": args.n_refiner_iterations,
+ "icp_refiner": icp_refiner,
+ "mv_predictor": mv_predictor,
+ },
+ },
+ )
- all_predictions = dict()
+ all_predictions = {}
for pred_prefix, pred_kwargs_n in pred_kwargs.items():
logger.info(f"Prediction: {pred_prefix}")
preds = pred_runner.get_predictions(**pred_kwargs_n)
for preds_name, preds_n in preds.items():
- all_predictions[f'{pred_prefix}/{preds_name}'] = preds_n
+ all_predictions[f"{pred_prefix}/{preds_name}"] = preds_n
logger.info("Done with inference.")
torch.distributed.barrier()
@@ -171,11 +220,11 @@ def run_inference(args):
if get_rank() == 0:
save_dir = Path(args.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
- logger.info(f'Finished inference on {args.ds_name}')
- results = format_results(all_predictions, dict(), dict())
- torch.save(results, save_dir / 'results.pth.tar')
- (save_dir / 'config.yaml').write_text(yaml.dump(args))
- logger.info(f'Saved predictions in {save_dir}')
+ logger.info(f"Finished inference on {args.ds_name}")
+ results = format_results(all_predictions, {}, {})
+ torch.save(results, save_dir / "results.pth.tar")
+ (save_dir / "config.yaml").write_text(yaml.dump(args))
+ logger.info(f"Saved predictions in {save_dir}")
torch.distributed.barrier()
return
@@ -184,21 +233,21 @@ def run_inference(args):
def main():
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
- if 'cosypose' in logger.name:
+ if "cosypose" in logger.name:
logger.setLevel(logging.DEBUG)
- parser = argparse.ArgumentParser('Evaluation')
- parser.add_argument('--debug', action='store_true')
- parser.add_argument('--comment', default='', type=str)
- parser.add_argument('--id', default=-1, type=int)
- parser.add_argument('--config', default='bop-pbr', type=str)
- parser.add_argument('--nviews', dest='n_views', default=1, type=int)
- parser.add_argument('--icp', action='store_true')
+ parser = argparse.ArgumentParser("Evaluation")
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument("--comment", default="", type=str)
+ parser.add_argument("--id", default=-1, type=int)
+ parser.add_argument("--config", default="bop-pbr", type=str)
+ parser.add_argument("--nviews", dest="n_views", default=1, type=int)
+ parser.add_argument("--icp", action="store_true")
args = parser.parse_args()
init_distributed_mode()
- cfg = argparse.ArgumentParser('').parse_args([])
+ cfg = argparse.ArgumentParser("").parse_args([])
cfg.n_workers = 8
cfg.pred_bsz = 1
@@ -224,46 +273,48 @@ def main():
args.id = n_rand
if args.icp:
- args.comment = f'icp-{args.comment}'
+ args.comment = f"icp-{args.comment}"
if args.n_views > 1:
- args.comment = f'nviews={args.n_views}-{args.comment}'
+ args.comment = f"nviews={args.n_views}-{args.comment}"
- save_dir = RESULTS_DIR / f'{args.config}-{args.comment}-{args.id}'
- logger.info(f'Save dir: {save_dir}')
+ save_dir = RESULTS_DIR / f"{args.config}-{args.comment}-{args.id}"
+ logger.info(f"Save dir: {save_dir}")
- if args.config == 'bop-pbr':
+ if args.config == "bop-pbr":
MODELS_DETECTORS = PBR_DETECTORS
MODELS_COARSE = PBR_COARSE
MODELS_REFINER = PBR_REFINER
- elif args.config == 'bop-synt+real':
+ elif args.config == "bop-synt+real":
MODELS_DETECTORS = SYNT_REAL_DETECTORS
MODELS_COARSE = SYNT_REAL_COARSE
MODELS_REFINER = SYNT_REAL_REFINER
if args.n_views > 1:
- ds_names = ['hb', 'tless', 'ycbv']
+ ds_names = ["hb", "tless", "ycbv"]
else:
- ds_names = ['hb', 'icbin', 'itodd', 'lmo', 'tless', 'tudl', 'ycbv']
+ ds_names = ["hb", "icbin", "itodd", "lmo", "tless", "tudl", "ycbv"]
for ds_name in ds_names:
this_cfg = deepcopy(cfg)
- this_cfg.ds_name = BOP_CONFIG[ds_name]['inference_ds_name'][0]
- this_cfg.save_dir = save_dir / f'dataset={ds_name}'
+ this_cfg.ds_name = BOP_CONFIG[ds_name]["inference_ds_name"][0]
+ this_cfg.save_dir = save_dir / f"dataset={ds_name}"
this_cfg.detector_run_id = MODELS_DETECTORS.get(ds_name)
this_cfg.coarse_run_id = MODELS_COARSE.get(ds_name)
this_cfg.refiner_run_id = MODELS_REFINER.get(ds_name)
- if this_cfg.detector_run_id is None \
- or this_cfg.coarse_run_id is None \
- or this_cfg.refiner_run_id is None:
- logger.info(f'Skipped {ds_name}')
+ if (
+ this_cfg.detector_run_id is None
+ or this_cfg.coarse_run_id is None
+ or this_cfg.refiner_run_id is None
+ ):
+ logger.info(f"Skipped {ds_name}")
continue
run_inference(this_cfg)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_colmap_reconstruction.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_colmap_reconstruction.py
index 9f9d0cc8..26d87463 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_colmap_reconstruction.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_colmap_reconstruction.py
@@ -1,43 +1,51 @@
import argparse
-import subprocess
-from tqdm import tqdm
import os
+import subprocess
+
import numpy as np
-from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_scene_dataset
-from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import MultiViewWrapper
-from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
+from tqdm import tqdm
+from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
+from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import (
+ make_scene_dataset,
+)
+from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( # noqa: E501
+ MultiViewWrapper,
+)
-if __name__ == '__main__':
- parser = argparse.ArgumentParser('Running COLMAP')
- parser.add_argument('--dataset', type=str)
- parser.add_argument('--nviews', type=int, default=4)
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser("Running COLMAP")
+ parser.add_argument("--dataset", type=str)
+ parser.add_argument("--nviews", type=int, default=4)
args = parser.parse_args()
assert args.nviews > 1
- if args.dataset == 'tless':
- scene_ds = make_scene_dataset('tless.primesense.test.bop19')
- elif args.dataset == 'ycbv':
- scene_ds = make_scene_dataset('ycbv.test.keyframes')
+ if args.dataset == "tless":
+ scene_ds = make_scene_dataset("tless.primesense.test.bop19")
+ elif args.dataset == "ycbv":
+ scene_ds = make_scene_dataset("ycbv.test.keyframes")
else:
raise ValueError
scene_ds = MultiViewWrapper(scene_ds, n_views=args.nviews)
- colmap_dir = LOCAL_DATA_DIR / 'colmap' / f'{args.dataset}_nviews={args.nviews}'
+ colmap_dir = LOCAL_DATA_DIR / "colmap" / f"{args.dataset}_nviews={args.nviews}"
colmap_dir.mkdir(exist_ok=True, parents=True)
def path_to_im(scene_id, view_id):
- scene = f'{int(scene_id):06d}'
- view = f'{int(view_id):06d}.png'
- path = scene_ds.unwrapped.base_dir / scene / 'rgb' / view
+ scene = f"{int(scene_id):06d}"
+ view = f"{int(view_id):06d}.png"
+ path = scene_ds.unwrapped.base_dir / scene / "rgb" / view
return path
- for group_id, group in tqdm(scene_ds.frame_index.groupby('group_id')):
- view_ids = group['view_ids'].values[0]
- scene_id = np.unique(group['scene_id']).item()
- views_str = '-'.join(map(str, view_ids))
- group_dir = colmap_dir / f'{args.dataset}_groupid={group_id}_scene={scene_id}-views={views_str}'
- group_images_dir = group_dir / 'images'
+ for group_id, group in tqdm(scene_ds.frame_index.groupby("group_id")):
+ view_ids = group["view_ids"].values[0]
+ scene_id = np.unique(group["scene_id"]).item()
+ views_str = "-".join(map(str, view_ids))
+ group_dir = (
+ colmap_dir
+ / f"{args.dataset}_groupid={group_id}_scene={scene_id}-views={views_str}"
+ )
+ group_images_dir = group_dir / "images"
group_dir.mkdir(exist_ok=True)
group_images_dir.mkdir(exist_ok=True)
for view_id in view_ids:
@@ -48,8 +56,13 @@ def path_to_im(scene_id, view_id):
pass
colmap_ds_path = group_dir
- cmd = ['colmap', 'automatic_reconstructor',
- '--workspace_path', colmap_ds_path.as_posix(),
- '--image_path', (colmap_ds_path / 'images').as_posix()]
+ cmd = [
+ "colmap",
+ "automatic_reconstructor",
+ "--workspace_path",
+ colmap_ds_path.as_posix(),
+ "--image_path",
+ (colmap_ds_path / "images").as_posix(),
+ ]
print(group_dir)
subprocess.run(cmd)
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py
index 678db8ff..848bf112 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py
@@ -1,60 +1,94 @@
-from happypose.pose_estimators.cosypose.cosypose.utils.tqdm import patch_tqdm; patch_tqdm() # noqa
-import torch.multiprocessing
-import time
-import json
+# ruff: noqa: E402
+from happypose.pose_estimators.cosypose.cosypose.utils.tqdm import patch_tqdm
-from collections import OrderedDict
-import yaml
+patch_tqdm()
import argparse
+import json
+import logging
+import pickle as pkl
+import time
+from collections import OrderedDict
-import torch
import numpy as np
import pandas as pd
-import pickle as pkl
-import logging
-
-from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR, MEMORY, RESULTS_DIR, LOCAL_DATA_DIR
-
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import init_distributed_mode, get_world_size
-
-from happypose.pose_estimators.cosypose.cosypose.lib3d import Transform
-
-from happypose.pose_estimators.cosypose.cosypose.lib3d.rigid_mesh_database import MeshDataBase
-from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import create_model_refiner, create_model_coarse, check_update_config
-from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import BulletBatchRenderer
-from happypose.pose_estimators.cosypose.cosypose.integrated.pose_predictor import CoarseRefinePosePredictor
-from happypose.pose_estimators.cosypose.cosypose.integrated.multiview_predictor import MultiviewScenePredictor
-
-from happypose.pose_estimators.cosypose.cosypose.evaluation.meters.pose_meters import PoseErrorMeter
-from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.multiview_predictions import MultiviewPredictionRunner
-from happypose.pose_estimators.cosypose.cosypose.evaluation.eval_runner.pose_eval import PoseEvaluation
-
-import cosypose.utils.tensor_collection as tc
-from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import format_results, gather_predictions
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_rank
-
+import torch
+import torch.multiprocessing
+import yaml
-from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_scene_dataset, make_object_dataset
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
+from happypose.pose_estimators.cosypose.cosypose.config import (
+ EXP_DIR,
+ LOCAL_DATA_DIR,
+ MEMORY,
+ RESULTS_DIR,
+)
from happypose.pose_estimators.cosypose.cosypose.datasets.bop import remap_bop_targets
-from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import MultiViewWrapper
-
+from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import (
+ make_object_dataset,
+ make_scene_dataset,
+)
from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import ListSampler
+from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( # noqa: E501
+ MultiViewWrapper,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.eval_runner.pose_eval import ( # noqa: E501
+ PoseEvaluation,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.meters.pose_meters import (
+ PoseErrorMeter,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.multiview_predictions import ( # noqa: E501
+ MultiviewPredictionRunner,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import (
+ format_results,
+ gather_predictions,
+)
+from happypose.pose_estimators.cosypose.cosypose.integrated.multiview_predictor import (
+ MultiviewScenePredictor,
+)
+from happypose.pose_estimators.cosypose.cosypose.integrated.pose_predictor import (
+ CoarseRefinePosePredictor,
+)
+from happypose.pose_estimators.cosypose.cosypose.lib3d.rigid_mesh_database import (
+ MeshDataBase,
+)
+from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import ( # noqa: E501
+ BulletBatchRenderer,
+)
+from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
+ check_update_config,
+ create_model_coarse,
+ create_model_refiner,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_world_size,
+ init_distributed_mode,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+from happypose.toolbox.lib3d.transform import Transform
+
logger = get_logger(__name__)
-torch.multiprocessing.set_sharing_strategy('file_system')
+torch.multiprocessing.set_sharing_strategy("file_system")
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@MEMORY.cache
def load_posecnn_results():
- results_path = LOCAL_DATA_DIR / 'saved_detections' / 'ycbv_posecnn.pkl'
+ results_path = LOCAL_DATA_DIR / "saved_detections" / "ycbv_posecnn.pkl"
results = pkl.loads(results_path.read_bytes())
infos, poses, bboxes = [], [], []
- l_offsets = (LOCAL_DATA_DIR / 'bop_datasets/ycbv' / 'offsets.txt').read_text().strip().split('\n')
- ycb_offsets = dict()
+ l_offsets = (
+ (LOCAL_DATA_DIR / "bop_datasets/ycbv" / "offsets.txt")
+ .read_text()
+ .strip()
+ .split("\n")
+ )
+ ycb_offsets = {}
for l_n in l_offsets:
obj_id, offset = l_n[:2], l_n[3:]
obj_id = int(obj_id)
@@ -68,20 +102,22 @@ def mat_from_qt(qt):
return Transform(xyzw, t)
for scene_view_str, result in results.items():
- scene_id, view_id = scene_view_str.split('/')
+ scene_id, view_id = scene_view_str.split("/")
scene_id, view_id = int(scene_id), int(view_id)
- n_dets = result['rois'].shape[0]
+ n_dets = result["rois"].shape[0]
for n in range(n_dets):
- obj_id = result['rois'][:, 1].astype(int)[n]
- label = f'ycbv-obj_{obj_id:06d}'
- infos.append(dict(
- scene_id=scene_id,
- view_id=view_id,
- score=result['rois'][n, 1],
- label=label,
- ))
- bboxes.append(result['rois'][n, 2:6])
- pose = mat_from_qt(result['poses'][n])
+ obj_id = result["rois"][:, 1].astype(int)[n]
+ label = f"ycbv-obj_{obj_id:06d}"
+ infos.append(
+ {
+ "scene_id": scene_id,
+ "view_id": view_id,
+ "score": result["rois"][n, 1],
+ "label": label,
+ },
+ )
+ bboxes.append(result["rois"][n, 2:6])
+ pose = mat_from_qt(result["poses"][n])
offset = ycb_offsets[obj_id]
pose = pose * Transform((0, 0, 0, 1), offset).inverse()
poses.append(pose.toHomogeneousMatrix())
@@ -97,35 +133,45 @@ def mat_from_qt(qt):
@MEMORY.cache
def load_pix2pose_results(all_detections=True, remove_incorrect_poses=False):
if all_detections:
- results_path = LOCAL_DATA_DIR / 'saved_detections' / 'tless_pix2pose_retinanet_vivo_all.pkl'
+ results_path = (
+ LOCAL_DATA_DIR
+ / "saved_detections"
+ / "tless_pix2pose_retinanet_vivo_all.pkl"
+ )
else:
- results_path = LOCAL_DATA_DIR / 'saved_detections' / 'tless_pix2pose_retinanet_siso_top1.pkl'
+ results_path = (
+ LOCAL_DATA_DIR
+ / "saved_detections"
+ / "tless_pix2pose_retinanet_siso_top1.pkl"
+ )
pix2pose_results = pkl.loads(results_path.read_bytes())
infos, poses, bboxes = [], [], []
for key, result in pix2pose_results.items():
- scene_id, view_id = key.split('/')
+ scene_id, view_id = key.split("/")
scene_id, view_id = int(scene_id), int(view_id)
- boxes = result['rois']
- scores = result['scores']
- poses_ = result['poses']
+ boxes = result["rois"]
+ scores = result["scores"]
+ poses_ = result["poses"]
- labels = result['labels_txt']
+ labels = result["labels_txt"]
new_boxes = boxes.copy()
- new_boxes[:,0] = boxes[:,1]
- new_boxes[:,1] = boxes[:,0]
- new_boxes[:,2] = boxes[:,3]
- new_boxes[:,3] = boxes[:,2]
+ new_boxes[:, 0] = boxes[:, 1]
+ new_boxes[:, 1] = boxes[:, 0]
+ new_boxes[:, 2] = boxes[:, 3]
+ new_boxes[:, 3] = boxes[:, 2]
for o, label in enumerate(labels):
t = poses_[o][:3, -1]
if remove_incorrect_poses and (np.sum(t) == 0 or np.max(t) > 100):
pass
else:
- infos.append(dict(
- scene_id=scene_id,
- view_id=view_id,
- score=scores[o],
- label=label,
- ))
+ infos.append(
+ {
+ "scene_id": scene_id,
+ "view_id": view_id,
+ "score": scores[o],
+ "label": label,
+ },
+ )
bboxes.append(new_boxes[o])
poses.append(poses_[o])
@@ -143,15 +189,15 @@ def get_pose_meters(scene_ds, ds_name):
compute_add = False
spheres_overlap_check = True
large_match_threshold_diameter_ratio = 0.5
- if ds_name == 'tless.primesense.test.bop19':
- targets_filename = 'test_targets_bop19.json'
+ if ds_name == "tless.primesense.test.bop19":
+ targets_filename = "test_targets_bop19.json"
visib_gt_min = -1
n_top = -1 # Given by targets
- elif ds_name == 'tless.primesense.test':
- targets_filename = 'all_target_tless.json'
+ elif ds_name == "tless.primesense.test":
+ targets_filename = "all_target_tless.json"
n_top = 1
visib_gt_min = 0.1
- elif 'ycbv' in ds_name:
+ elif "ycbv" in ds_name:
compute_add = True
visib_gt_min = -1
targets_filename = None
@@ -160,10 +206,11 @@ def get_pose_meters(scene_ds, ds_name):
else:
raise ValueError
- if 'tless' in ds_name:
- object_ds_name = 'tless.eval'
- elif 'ycbv' in ds_name:
- object_ds_name = 'ycbv.bop-compat.eval' # This is important for definition of symmetric objects
+ if "tless" in ds_name:
+ object_ds_name = "tless.eval"
+ elif "ycbv" in ds_name:
+ # This is important for definition of symmetric objects
+ object_ds_name = "ycbv.bop-compat.eval"
else:
raise ValueError
@@ -177,52 +224,70 @@ def get_pose_meters(scene_ds, ds_name):
object_ds = make_object_dataset(object_ds_name)
mesh_db = MeshDataBase.from_object_ds(object_ds)
- error_types = ['ADD-S'] + (['ADD(-S)'] if compute_add else [])
-
- base_kwargs = dict(
- mesh_db=mesh_db,
- exact_meshes=True,
- sample_n_points=None,
- errors_bsz=1,
+ error_types = ["ADD-S"] + (["ADD(-S)"] if compute_add else [])
+ base_kwargs = {
+ "mesh_db": mesh_db,
+ "exact_meshes": True,
+ "sample_n_points": None,
+ "errors_bsz": 1,
# BOP-Like parameters
- n_top=n_top,
- visib_gt_min=visib_gt_min,
- targets=targets,
- spheres_overlap_check=spheres_overlap_check,
- )
+ "n_top": n_top,
+ "visib_gt_min": visib_gt_min,
+ "targets": targets,
+ "spheres_overlap_check": spheres_overlap_check,
+ }
- meters = dict()
+ meters = {}
for error_type in error_types:
# For measuring ADD-S AUC on T-LESS and average errors on ycbv/tless.
- meters[f'{error_type}_ntop=BOP_matching=OVERLAP'] = PoseErrorMeter(
- error_type=error_type, consider_all_predictions=False,
+ meters[f"{error_type}_ntop=BOP_matching=OVERLAP"] = PoseErrorMeter(
+ error_type=error_type,
+ consider_all_predictions=False,
match_threshold=large_match_threshold_diameter_ratio,
- report_error_stats=True, report_error_AUC=True, **base_kwargs)
+ report_error_stats=True,
+ report_error_AUC=True,
+ **base_kwargs,
+ )
- if 'ycbv' in ds_name:
+ if "ycbv" in ds_name:
# For fair comparison with PoseCNN/DeepIM on YCB-Video ADD(-S) AUC
- meters[f'{error_type}_ntop=1_matching=CLASS'] = PoseErrorMeter(
- error_type=error_type, consider_all_predictions=False,
+ meters[f"{error_type}_ntop=1_matching=CLASS"] = PoseErrorMeter(
+ error_type=error_type,
+ consider_all_predictions=False,
match_threshold=np.inf,
- report_error_stats=False, report_error_AUC=True, **base_kwargs)
-
- if 'tless' in ds_name:
- meters.update({f'{error_type}_ntop=BOP_matching=BOP': # For ADD-S<0.1d
- PoseErrorMeter(error_type=error_type, match_threshold=0.1, **base_kwargs),
-
- f'{error_type}_ntop=ALL_matching=BOP': # For mAP
- PoseErrorMeter(error_type=error_type, match_threshold=0.1,
- consider_all_predictions=True,
- report_AP=True, **base_kwargs)})
+ report_error_stats=False,
+ report_error_AUC=True,
+ **base_kwargs,
+ )
+
+ if "tless" in ds_name:
+ meters.update(
+ {
+ # For ADD-S<0.1d
+ f"{error_type}_ntop=BOP_matching=BOP": PoseErrorMeter(
+ error_type=error_type,
+ match_threshold=0.1,
+ **base_kwargs,
+ ),
+ # For mAP
+ f"{error_type}_ntop=ALL_matching=BOP": PoseErrorMeter(
+ error_type=error_type,
+ match_threshold=0.1,
+ consider_all_predictions=True,
+ report_AP=True,
+ **base_kwargs,
+ ),
+ },
+ )
return meters
-def load_models(coarse_run_id, refiner_run_id=None, n_workers=8, object_set='tless'):
- if object_set == 'tless':
- object_ds_name, urdf_ds_name = 'tless.bop', 'tless.cad'
+def load_models(coarse_run_id, refiner_run_id=None, n_workers=8, object_set="tless"):
+ if object_set == "tless":
+ object_ds_name, urdf_ds_name = "tless.bop", "tless.cad"
else:
- object_ds_name, urdf_ds_name = 'ycbv.bop-compat.eval', 'ycbv'
+ object_ds_name, urdf_ds_name = "ycbv.bop-compat.eval", "ycbv"
object_ds = make_object_dataset(object_ds_name)
mesh_db = MeshDataBase.from_object_ds(object_ds)
@@ -231,17 +296,21 @@ def load_models(coarse_run_id, refiner_run_id=None, n_workers=8, object_set='tle
def load_model(run_id):
if run_id is None:
- return
+ return None
run_dir = EXP_DIR / run_id
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.FullLoader)
cfg = check_update_config(cfg)
if cfg.train_refiner:
- model = create_model_refiner(cfg, renderer=renderer, mesh_db=mesh_db_batched)
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar')
+ model = create_model_refiner(
+ cfg,
+ renderer=renderer,
+ mesh_db=mesh_db_batched,
+ )
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar")
else:
model = create_model_coarse(cfg, renderer=renderer, mesh_db=mesh_db_batched)
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar')
- ckpt = ckpt['state_dict']
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar")
+ ckpt = ckpt["state_dict"]
model.load_state_dict(ckpt)
model = model.cuda().eval()
model.cfg = cfg
@@ -249,26 +318,28 @@ def load_model(run_id):
coarse_model = load_model(coarse_run_id)
refiner_model = load_model(refiner_run_id)
- model = CoarseRefinePosePredictor(coarse_model=coarse_model,
- refiner_model=refiner_model)
+ model = CoarseRefinePosePredictor(
+ coarse_model=coarse_model,
+ refiner_model=refiner_model,
+ )
return model, mesh_db
def main():
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
- if 'cosypose' in logger.name:
+ if "cosypose" in logger.name:
logger.setLevel(logging.DEBUG)
logger.info("Starting ...")
init_distributed_mode()
- parser = argparse.ArgumentParser('Evaluation')
- parser.add_argument('--config', default='tless-bop', type=str)
- parser.add_argument('--debug', action='store_true')
- parser.add_argument('--job_dir', default='', type=str)
- parser.add_argument('--comment', default='', type=str)
- parser.add_argument('--nviews', dest='n_views', default=1, type=int)
+ parser = argparse.ArgumentParser("Evaluation")
+ parser.add_argument("--config", default="tless-bop", type=str)
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument("--job_dir", default="", type=str)
+ parser.add_argument("--comment", default="", type=str)
+ parser.add_argument("--nviews", dest="n_views", default=1, type=int)
args = parser.parse_args()
coarse_run_id = None
@@ -285,33 +356,33 @@ def main():
skip_mv = args.n_views < 2
skip_predictions = False
- object_set = 'tless'
- if 'tless' in args.config:
- object_set = 'tless'
- coarse_run_id = 'tless-coarse--10219'
- refiner_run_id = 'tless-refiner--585928'
+ object_set = "tless"
+ if "tless" in args.config:
+ object_set = "tless"
+ coarse_run_id = "tless-coarse--10219"
+ refiner_run_id = "tless-refiner--585928"
n_coarse_iterations = 1
n_refiner_iterations = 4
- elif 'ycbv' in args.config:
- object_set = 'ycbv'
- refiner_run_id = 'ycbv-refiner-finetune--251020'
+ elif "ycbv" in args.config:
+ object_set = "ycbv"
+ refiner_run_id = "ycbv-refiner-finetune--251020"
n_coarse_iterations = 0
n_refiner_iterations = 2
else:
raise ValueError(args.config)
- if args.config == 'tless-siso':
- ds_name = 'tless.primesense.test'
+ if args.config == "tless-siso":
+ ds_name = "tless.primesense.test"
assert n_views == 1
- elif args.config == 'tless-vivo':
- ds_name = 'tless.primesense.test.bop19'
- elif args.config == 'ycbv':
- ds_name = 'ycbv.test.keyframes'
+ elif args.config == "tless-vivo":
+ ds_name = "tless.primesense.test.bop19"
+ elif args.config == "ycbv":
+ ds_name = "ycbv.test.keyframes"
else:
raise ValueError(args.config)
if args.debug:
- if 'tless' in args.config:
+ if "tless" in args.config:
scene_id = None
group_id = 64
n_groups = 2
@@ -323,7 +394,7 @@ def main():
n_plotters = 0
n_rand = np.random.randint(1e10)
- save_dir = RESULTS_DIR / f'{args.config}-n_views={n_views}-{args.comment}-{n_rand}'
+ save_dir = RESULTS_DIR / f"{args.config}-n_views={n_views}-{args.comment}-{n_rand}"
logger.info(f"SAVE DIR: {save_dir}")
logger.info(f"Coarse: {coarse_run_id}")
logger.info(f"Refiner: {refiner_run_id}")
@@ -332,41 +403,50 @@ def main():
scene_ds = make_scene_dataset(ds_name)
if scene_id is not None:
- mask = scene_ds.frame_index['scene_id'] == scene_id
+ mask = scene_ds.frame_index["scene_id"] == scene_id
scene_ds.frame_index = scene_ds.frame_index[mask].reset_index(drop=True)
if n_frames is not None:
- scene_ds.frame_index = scene_ds.frame_index[mask].reset_index(drop=True)[:n_frames]
+ scene_ds.frame_index = scene_ds.frame_index[mask].reset_index(drop=True)[
+ :n_frames
+ ]
# Predictions
- predictor, mesh_db = load_models(coarse_run_id, refiner_run_id, n_workers=n_plotters, object_set=object_set)
+ predictor, mesh_db = load_models(
+ coarse_run_id,
+ refiner_run_id,
+ n_workers=n_plotters,
+ object_set=object_set,
+ )
mv_predictor = MultiviewScenePredictor(mesh_db)
- base_pred_kwargs = dict(
- n_coarse_iterations=n_coarse_iterations,
- n_refiner_iterations=n_refiner_iterations,
- skip_mv=skip_mv,
- pose_predictor=predictor,
- mv_predictor=mv_predictor,
- )
+ base_pred_kwargs = {
+ "n_coarse_iterations": n_coarse_iterations,
+ "n_refiner_iterations": n_refiner_iterations,
+ "skip_mv": skip_mv,
+ "pose_predictor": predictor,
+ "mv_predictor": mv_predictor,
+ }
if skip_predictions:
pred_kwargs = {}
- elif 'tless' in ds_name:
- pix2pose_detections = load_pix2pose_results(all_detections='bop19' in ds_name).cpu()
+ elif "tless" in ds_name:
+ pix2pose_detections = load_pix2pose_results(
+ all_detections="bop19" in ds_name,
+ ).cpu()
pred_kwargs = {
- 'pix2pose_detections': dict(
+ "pix2pose_detections": dict(
detections=pix2pose_detections,
- **base_pred_kwargs
+ **base_pred_kwargs,
),
}
- elif 'ycbv' in ds_name:
+ elif "ycbv" in ds_name:
posecnn_detections = load_posecnn_results()
pred_kwargs = {
- 'posecnn_init': dict(
+ "posecnn_init": dict(
detections=posecnn_detections,
use_detections_TCO=posecnn_detections,
- **base_pred_kwargs
+ **base_pred_kwargs,
),
}
else:
@@ -375,56 +455,71 @@ def main():
scene_ds_pred = MultiViewWrapper(scene_ds, n_views=n_views)
if group_id is not None:
- mask = scene_ds_pred.frame_index['group_id'] == group_id
- scene_ds_pred.frame_index = scene_ds_pred.frame_index[mask].reset_index(drop=True)
+ mask = scene_ds_pred.frame_index["group_id"] == group_id
+ scene_ds_pred.frame_index = scene_ds_pred.frame_index[mask].reset_index(
+ drop=True,
+ )
elif n_groups is not None:
scene_ds_pred.frame_index = scene_ds_pred.frame_index[:n_groups]
pred_runner = MultiviewPredictionRunner(
- scene_ds_pred, batch_size=1, n_workers=n_workers,
- cache_data=len(pred_kwargs) > 1)
+ scene_ds_pred,
+ batch_size=1,
+ n_workers=n_workers,
+ cache_data=len(pred_kwargs) > 1,
+ )
- all_predictions = dict()
+ all_predictions = {}
for pred_prefix, pred_kwargs_n in pred_kwargs.items():
logger.info(f"Prediction: {pred_prefix}")
preds = pred_runner.get_predictions(**pred_kwargs_n)
for preds_name, preds_n in preds.items():
- all_predictions[f'{pred_prefix}/{preds_name}'] = preds_n
+ all_predictions[f"{pred_prefix}/{preds_name}"] = preds_n
logger.info("Done with predictions")
torch.distributed.barrier()
# Evaluation
predictions_to_evaluate = set()
- if 'ycbv' in ds_name:
- det_key = 'posecnn_init'
- all_predictions['posecnn'] = posecnn_detections
- predictions_to_evaluate.add('posecnn')
- elif 'tless' in ds_name:
- det_key = 'pix2pose_detections'
+ if "ycbv" in ds_name:
+ det_key = "posecnn_init"
+ all_predictions["posecnn"] = posecnn_detections
+ predictions_to_evaluate.add("posecnn")
+ elif "tless" in ds_name:
+ det_key = "pix2pose_detections"
else:
raise ValueError(ds_name)
- predictions_to_evaluate.add(f'{det_key}/refiner/iteration={n_refiner_iterations}')
+ predictions_to_evaluate.add(f"{det_key}/refiner/iteration={n_refiner_iterations}")
if args.n_views > 1:
for k in [
- # f'ba_input',
- # f'ba_output',
- f'ba_output+all_cand'
+ # f'ba_input',
+ # f'ba_output',
+ "ba_output+all_cand",
]:
- predictions_to_evaluate.add(f'{det_key}/{k}')
+ predictions_to_evaluate.add(f"{det_key}/{k}")
- all_predictions = OrderedDict({k: v for k, v in sorted(all_predictions.items(), key=lambda item: item[0])})
+ all_predictions = OrderedDict(
+ dict(sorted(all_predictions.items(), key=lambda item: item[0])),
+ )
# Evaluation.
meters = get_pose_meters(scene_ds)
mv_group_ids = list(iter(pred_runner.sampler))
- scene_ds_ids = np.concatenate(scene_ds_pred.frame_index.loc[mv_group_ids, 'scene_ds_ids'].values)
+ scene_ds_ids = np.concatenate(
+ scene_ds_pred.frame_index.loc[mv_group_ids, "scene_ds_ids"].values,
+ )
sampler = ListSampler(scene_ds_ids)
- eval_runner = PoseEvaluation(scene_ds, meters, n_workers=n_workers,
- cache_data=True, batch_size=1, sampler=sampler)
+ eval_runner = PoseEvaluation(
+ scene_ds,
+ meters,
+ n_workers=n_workers,
+ cache_data=True,
+ batch_size=1,
+ sampler=sampler,
+ )
- eval_metrics, eval_dfs = dict(), dict()
+ eval_metrics, eval_dfs = {}, {}
for preds_k, preds in all_predictions.items():
if preds_k in predictions_to_evaluate:
logger.info(f"Evaluation : {preds_k} (N={len(preds)})")
@@ -437,43 +532,87 @@ def main():
all_predictions = gather_predictions(all_predictions)
- metrics_to_print = dict()
- if 'ycbv' in ds_name:
- metrics_to_print.update({
- f'posecnn/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean': f'PoseCNN/AUC of ADD(-S)',
-
- f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean': f'Singleview/AUC of ADD(-S)',
- f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=1_matching=CLASS/AUC/objects/mean': f'Singleview/AUC of ADD-S',
-
- f'{det_key}/ba_output+all_cand/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean': f'Multiview (n={args.n_views})/AUC of ADD(-S)',
- f'{det_key}/ba_output+all_cand/ADD-S_ntop=1_matching=CLASS/AUC/objects/mean': f'Multiview (n={args.n_views})/AUC of ADD-S',
- })
- elif 'tless' in ds_name:
- metrics_to_print.update({
- f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=BOP_matching=OVERLAP/AUC/objects/mean': f'Singleview/AUC of ADD-S',
- # f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=BOP_matching=BOP/0.1d': f'Singleview/ADD-S<0.1d',
- f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=ALL_matching=BOP/mAP': f'Singleview/mAP@ADD-S<0.1d',
-
-
- f'{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching=OVERLAP/AUC/objects/mean': f'Multiview (n={args.n_views})/AUC of ADD-S',
- # f'{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching=BOP/0.1d': f'Multiview (n={args.n_views})/ADD-S<0.1d',
- f'{det_key}/ba_output+all_cand/ADD-S_ntop=ALL_matching=BOP/mAP': f'Multiview (n={args.n_views}/mAP@ADD-S<0.1d)',
- })
+ metrics_to_print = {}
+ if "ycbv" in ds_name:
+ k_0 = "posecnn/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean"
+ k_1 = (
+ f"{det_key}/refiner/iteration={n_refiner_iterations}/"
+ f"ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean"
+ )
+ k_2 = (
+ f"{det_key}/refiner/iteration={n_refiner_iterations}/"
+ f"ADD-S_ntop=1_matching=CLASS/AUC/objects/mean"
+ )
+ k_3 = (
+ f"{det_key}/ba_output+all_cand/ADD(-S)_ntop=1_matching="
+ f"CLASS/AUC/objects/mean"
+ )
+ k_4 = (
+ f"{det_key}/ba_output+all_cand/ADD-S_ntop=1_matching="
+ f"CLASS/AUC/objects/mean"
+ )
+ metrics_to_print.update(
+ {
+ k_0: "PoseCNN/AUC of ADD(-S)",
+ k_1: "Singleview/AUC of ADD(-S)",
+ k_2: "Singleview/AUC of ADD-S",
+ k_3: f"Multiview (n={args.n_views})/AUC of ADD(-S)",
+ k_4: f"Multiview (n={args.n_views})/AUC of ADD-S",
+ },
+ )
+ elif "tless" in ds_name:
+ k_0 = (
+ f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop="
+ f"BOP_matching=OVERLAP/AUC/objects/mean"
+ )
+ k_1 = (
+ f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop="
+ f"BOP_matching=BOP/0.1d"
+ )
+ k_2 = (
+ f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop="
+ f"ALL_matching=BOP/mAP"
+ )
+ k_3 = (
+ f"{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching="
+ f"OVERLAP/AUC/objects/mean"
+ )
+ k_4 = f"{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching=BOP/0.1d"
+ k_5 = f"{det_key}/ba_output+all_cand/ADD-S_ntop=ALL_matching=BOP/mAP"
+ metrics_to_print.update(
+ {
+ k_0: "Singleview/AUC of ADD-S",
+ # k_1: f'Singleview/ADD-S<0.1d',
+ k_2: "Singleview/mAP@ADD-S<0.1d",
+ k_3: f"Multiview (n={args.n_views})/AUC of ADD-S",
+ # k_4: f'Multiview (n={args.n_views})/ADD-S<0.1d',
+ k_5: f"Multiview (n={args.n_views}/mAP@ADD-S<0.1d)",
+ },
+ )
else:
raise ValueError
- metrics_to_print.update({
- f'{det_key}/ba_input/ADD-S_ntop=BOP_matching=OVERLAP/norm': f'Multiview before BA/ADD-S (m)',
- f'{det_key}/ba_output/ADD-S_ntop=BOP_matching=OVERLAP/norm': f'Multiview after BA/ADD-S (m)',
- })
+ k_0 = f"{det_key}/ba_input/ADD-S_ntop=BOP_matching=OVERLAP/norm"
+ k_1 = f"{det_key}/ba_output/ADD-S_ntop=BOP_matching=OVERLAP/norm"
+ metrics_to_print.update(
+ {
+ k_0: "Multiview before BA/ADD-S (m)",
+ k_1: "Multiview after BA/ADD-S (m)",
+ },
+ )
if get_rank() == 0:
save_dir.mkdir()
- results = format_results(all_predictions, eval_metrics, eval_dfs, print_metrics=False)
- (save_dir / 'full_summary.txt').write_text(results.get('summary_txt', ''))
-
- full_summary = results['summary']
- summary_txt = 'Results:'
+ results = format_results(
+ all_predictions,
+ eval_metrics,
+ eval_dfs,
+ print_metrics=False,
+ )
+ (save_dir / "full_summary.txt").write_text(results.get("summary_txt", ""))
+
+ full_summary = results["summary"]
+ summary_txt = "Results:"
for k, v in metrics_to_print.items():
if k in full_summary:
summary_txt += f"\n{v}: {full_summary[k]}"
@@ -481,12 +620,12 @@ def main():
logger.info(summary_txt)
logger.info(f"{'-'*80}")
- torch.save(results, save_dir / 'results.pth.tar')
- (save_dir / 'summary.txt').write_text(summary_txt)
+ torch.save(results, save_dir / "results.pth.tar")
+ (save_dir / "summary.txt").write_text(summary_txt)
logger.info(f"Saved: {save_dir}")
-if __name__ == '__main__':
+if __name__ == "__main__":
patch_tqdm()
main()
time.sleep(2)
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_custom_scenario.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_custom_scenario.py
index 6eff8d01..e4d96469 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_custom_scenario.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_custom_scenario.py
@@ -1,21 +1,29 @@
-import torch
-import sys
-import pandas as pd
import argparse
-from pathlib import Path
import json
-import numpy as np
import logging
+import sys
+from pathlib import Path
+
+import numpy as np
+import pandas as pd
+import torch
-from happypose.pose_estimators.cosypose.cosypose.datasets.bop_object_datasets import BOPObjectDataset
-from happypose.pose_estimators.cosypose.cosypose.lib3d.rigid_mesh_database import MeshDataBase
-from happypose.pose_estimators.cosypose.cosypose.integrated.multiview_predictor import MultiviewScenePredictor
-import cosypose.utils.tensor_collection as tc
-from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import BulletSceneRenderer
-from happypose.pose_estimators.cosypose.cosypose.visualization.multiview import make_cosypose_plots
-from happypose.pose_estimators.cosypose.cosypose.visualization.multiview import make_scene_renderings, nms3d
+import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc
+from happypose.pose_estimators.cosypose.cosypose.config import (
+ BOP_TOOLKIT_DIR,
+ LOCAL_DATA_DIR,
+)
+from happypose.pose_estimators.cosypose.cosypose.datasets.bop_object_datasets import (
+ BOPObjectDataset,
+)
+from happypose.pose_estimators.cosypose.cosypose.integrated.multiview_predictor import (
+ MultiviewScenePredictor,
+)
+from happypose.pose_estimators.cosypose.cosypose.lib3d.rigid_mesh_database import (
+ MeshDataBase,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
-from happypose.pose_estimators.cosypose.cosypose.config import BOP_TOOLKIT_DIR, LOCAL_DATA_DIR
+from happypose.pose_estimators.cosypose.cosypose.visualization.multiview import nms3d
sys.path.append(str(BOP_TOOLKIT_DIR))
from bop_toolkit_lib import inout # noqa
@@ -30,25 +38,36 @@ def tc_to_csv(predictions, csv_path):
t = TCO_n[:3, -1] * 1e3 # m -> mm conversion
R = TCO_n[:3, :3]
row = predictions.infos.iloc[n]
- obj_id = int(row.label.split('_')[-1])
+ obj_id = int(row.label.split("_")[-1])
score = row.score
time = -1.0
- pred = dict(scene_id=row.scene_id,
- im_id=row.view_id,
- obj_id=obj_id,
- score=score,
- t=t, R=R, time=time)
+ pred = {
+ "scene_id": row.scene_id,
+ "im_id": row.view_id,
+ "obj_id": obj_id,
+ "score": score,
+ "t": t,
+ "R": R,
+ "time": time,
+ }
preds.append(pred)
inout.save_bop_results(csv_path, preds)
def read_csv_candidates(csv_path):
df = pd.read_csv(csv_path)
- infos = df.loc[:, ['im_id', 'scene_id', 'score', 'obj_id']]
- infos['obj_id'] = infos['obj_id'].apply(lambda x: f'obj_{x:06d}')
- infos = infos.rename(dict(im_id='view_id', obj_id='label'), axis=1)
- R = np.stack(df['R'].apply(lambda x: list(map(float, x.split(' '))))).reshape(-1, 3, 3)
- t = np.stack(df['t'].apply(lambda x: list(map(float, x.split(' '))))).reshape(-1, 3) * 1e-3
+ infos = df.loc[:, ["im_id", "scene_id", "score", "obj_id"]]
+ infos["obj_id"] = infos["obj_id"].apply(lambda x: f"obj_{x:06d}")
+ infos = infos.rename({"im_id": "view_id", "obj_id": "label"}, axis=1)
+ R = np.stack(df["R"].apply(lambda x: list(map(float, x.split(" "))))).reshape(
+ -1,
+ 3,
+ 3,
+ )
+ t = (
+ np.stack(df["t"].apply(lambda x: list(map(float, x.split(" "))))).reshape(-1, 3)
+ * 1e-3
+ )
R = torch.tensor(R, dtype=torch.float)
t = torch.tensor(t, dtype=torch.float)
TCO = torch.eye(4, dtype=torch.float).unsqueeze(0).repeat(len(R), 1, 1)
@@ -63,10 +82,10 @@ def read_cameras(json_path, view_ids):
all_K = []
for view_id in view_ids:
cam_info = cameras[str(view_id)]
- K = np.array(cam_info['cam_K']).reshape(3, 3)
+ K = np.array(cam_info["cam_K"]).reshape(3, 3)
all_K.append(K)
K = torch.as_tensor(np.stack(all_K))
- cameras = tc.PandasTensorCollection(K=K, infos=pd.DataFrame(dict(view_id=view_ids)))
+ cameras = tc.PandasTensorCollection(K=K, infos=pd.DataFrame({"view_id": view_ids}))
return cameras
@@ -75,41 +94,75 @@ def save_scene_json(objects, cameras, results_scene_path):
list_objects = []
for n in range(len(objects)):
- obj = objects.infos.loc[n, ['score', 'label', 'n_cand']].to_dict()
+ obj = objects.infos.loc[n, ["score", "label", "n_cand"]].to_dict()
obj = {k: np.asarray(v).item() for k, v in obj.items()}
- obj['TWO'] = objects.TWO[n].cpu().numpy().tolist()
+ obj["TWO"] = objects.TWO[n].cpu().numpy().tolist()
list_objects.append(obj)
for n in range(len(cameras)):
- cam = cameras.infos.loc[n, ['view_id']].to_dict()
- cam['TWC'] = cameras.TWC[n].cpu().numpy().tolist()
- cam['K'] = cameras.K[n].cpu().numpy().tolist()
+ cam = cameras.infos.loc[n, ["view_id"]].to_dict()
+ cam["TWC"] = cameras.TWC[n].cpu().numpy().tolist()
+ cam["K"] = cameras.K[n].cpu().numpy().tolist()
list_cameras.append(cam)
- scene = dict(objects=list_objects, cameras=list_cameras)
+ scene = {"objects": list_objects, "cameras": list_cameras}
results_scene_path.write_text(json.dumps(scene))
return
def main():
- parser = argparse.ArgumentParser('CosyPose multi-view reconstruction for a custom scenario')
- parser.add_argument('--scenario', default='', type=str, help='Id of the scenario, matching directory must be in local_data/scenarios')
- parser.add_argument('--sv_score_th', default=0.3, type=int, help="Score to filter single-view predictions")
- parser.add_argument('--n_symmetries_rot', default=64, type=int, help="Number of discretized symmetries to use for continuous symmetries")
- parser.add_argument('--ransac_n_iter', default=2000, type=int,
- help="Max number of RANSAC iterations per pair of views")
- parser.add_argument('--ransac_dist_threshold', default=0.02, type=float,
- help="Threshold (in meters) on symmetric distance to consider a tentative match an inlier")
- parser.add_argument('--ba_n_iter', default=10, type=int,
- help="Maximum number of LM iterations in stage 3")
- parser.add_argument('--nms_th', default=0.04, type=float,
- help='Threshold (meter) for NMS 3D')
- parser.add_argument('--no_visualization', action='store_true')
+ parser = argparse.ArgumentParser(
+ "CosyPose multi-view reconstruction for a custom scenario",
+ )
+ parser.add_argument(
+ "--scenario",
+ default="",
+ type=str,
+ help="Id of the scenario, matching directory must be in local_data/scenarios",
+ )
+ parser.add_argument(
+ "--sv_score_th",
+ default=0.3,
+ type=int,
+ help="Score to filter single-view predictions",
+ )
+ parser.add_argument(
+ "--n_symmetries_rot",
+ default=64,
+ type=int,
+ help="Number of discretized symmetries to use for continuous symmetries",
+ )
+ parser.add_argument(
+ "--ransac_n_iter",
+ default=2000,
+ type=int,
+ help="Max number of RANSAC iterations per pair of views",
+ )
+ parser.add_argument(
+ "--ransac_dist_threshold",
+ default=0.02,
+ type=float,
+ help="Threshold (in meters) on symmetric distance to consider "
+ "a tentative match an inlier",
+ )
+ parser.add_argument(
+ "--ba_n_iter",
+ default=10,
+ type=int,
+ help="Maximum number of LM iterations in stage 3",
+ )
+ parser.add_argument(
+ "--nms_th",
+ default=0.04,
+ type=float,
+ help="Threshold (meter) for NMS 3D",
+ )
+ parser.add_argument("--no_visualization", action="store_true")
args = parser.parse_args()
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
- if 'cosypose' in logger.name:
+ if "cosypose" in logger.name:
logger.setLevel(logging.DEBUG)
logger.info(f"{'-'*80}")
@@ -117,57 +170,68 @@ def main():
logger.info(f"{k}: {v}")
logger.info(f"{'-'*80}")
- scenario_dir = LOCAL_DATA_DIR / 'custom_scenarios' / args.scenario
+ scenario_dir = LOCAL_DATA_DIR / "custom_scenarios" / args.scenario
- candidates = read_csv_candidates(scenario_dir / 'candidates.csv').float().cuda()
- candidates.infos['group_id'] = 0
- scene_ids = np.unique(candidates.infos['scene_id'])
- assert len(scene_ids) == 1, 'Please only provide 6D pose estimations that correspond to the same scene.'
+ candidates = read_csv_candidates(scenario_dir / "candidates.csv").float().cuda()
+ candidates.infos["group_id"] = 0
+ scene_ids = np.unique(candidates.infos["scene_id"])
+ assert (
+ len(scene_ids) == 1
+ ), "Please only provide 6D pose estimations that correspond to the same scene."
scene_id = scene_ids.item()
- view_ids = np.unique(candidates.infos['view_id'])
+ view_ids = np.unique(candidates.infos["view_id"])
n_views = len(view_ids)
- logger.info(f'Loaded {len(candidates)} candidates in {n_views} views.')
+ logger.info(f"Loaded {len(candidates)} candidates in {n_views} views.")
- cameras = read_cameras(scenario_dir / 'scene_camera.json', view_ids).float().cuda()
- cameras.infos['scene_id'] = scene_id
- cameras.infos['batch_im_id'] = np.arange(len(view_ids))
- logger.info(f'Loaded cameras intrinsics.')
+ cameras = read_cameras(scenario_dir / "scene_camera.json", view_ids).float().cuda()
+ cameras.infos["scene_id"] = scene_id
+ cameras.infos["batch_im_id"] = np.arange(len(view_ids))
+ logger.info("Loaded cameras intrinsics.")
- object_ds = BOPObjectDataset(scenario_dir / 'models')
+ object_ds = BOPObjectDataset(scenario_dir / "models")
mesh_db = MeshDataBase.from_object_ds(object_ds)
- logger.info(f'Loaded {len(object_ds)} 3D object models.')
+ logger.info(f"Loaded {len(object_ds)} 3D object models.")
- logger.info('Running stage 2 and 3 of CosyPose...')
+ logger.info("Running stage 2 and 3 of CosyPose...")
mv_predictor = MultiviewScenePredictor(mesh_db)
- predictions = mv_predictor.predict_scene_state(candidates, cameras,
- score_th=args.sv_score_th,
- use_known_camera_poses=False,
- ransac_n_iter=args.ransac_n_iter,
- ransac_dist_threshold=args.ransac_dist_threshold,
- ba_n_iter=args.ba_n_iter)
-
- objects = predictions['scene/objects']
- cameras = predictions['scene/cameras']
- reproj = predictions['ba_output']
-
- for view_group in np.unique(objects.infos['view_group']):
- objects_ = objects[np.where(objects.infos['view_group'] == view_group)[0]]
- cameras_ = cameras[np.where(cameras.infos['view_group'] == view_group)[0]]
- reproj_ = reproj[np.where(reproj.infos['view_group'] == view_group)[0]]
- objects_ = nms3d(objects_, th=args.nms_th, poses_attr='TWO')
-
- view_group_dir = scenario_dir / 'results' / f'subscene={view_group}'
+ predictions = mv_predictor.predict_scene_state(
+ candidates,
+ cameras,
+ score_th=args.sv_score_th,
+ use_known_camera_poses=False,
+ ransac_n_iter=args.ransac_n_iter,
+ ransac_dist_threshold=args.ransac_dist_threshold,
+ ba_n_iter=args.ba_n_iter,
+ )
+
+ objects = predictions["scene/objects"]
+ cameras = predictions["scene/cameras"]
+ reproj = predictions["ba_output"]
+
+ for view_group in np.unique(objects.infos["view_group"]):
+ objects_ = objects[np.where(objects.infos["view_group"] == view_group)[0]]
+ cameras_ = cameras[np.where(cameras.infos["view_group"] == view_group)[0]]
+ reproj_ = reproj[np.where(reproj.infos["view_group"] == view_group)[0]]
+ objects_ = nms3d(objects_, th=args.nms_th, poses_attr="TWO")
+
+ view_group_dir = scenario_dir / "results" / f"subscene={view_group}"
view_group_dir.mkdir(exist_ok=True, parents=True)
- logger.info(f'Subscene {view_group} has {len(objects_)} objects and {len(cameras_)} cameras.')
+ logger.info(
+ f"Subscene {view_group} has {len(objects_)} objects and "
+ f"{len(cameras_)} cameras.",
+ )
- predicted_scene_path = view_group_dir / 'predicted_scene.json'
- scene_reprojected_path = view_group_dir / 'scene_reprojected.csv'
+ predicted_scene_path = view_group_dir / "predicted_scene.json"
+ scene_reprojected_path = view_group_dir / "scene_reprojected.csv"
save_scene_json(objects_, cameras_, predicted_scene_path)
tc_to_csv(reproj_, scene_reprojected_path)
- logger.info(f'Wrote predicted scene (objects+cameras): {predicted_scene_path}')
- logger.info(f'Wrote predicted objects with pose expressed in camera frame: {scene_reprojected_path}')
+ logger.info(f"Wrote predicted scene (objects+cameras): {predicted_scene_path}")
+ logger.info(
+ f"Wrote predicted objects with pose expressed in camera frame: "
+ f"{scene_reprojected_path}",
+ )
# if args.no_visualization:
# logger.info('Skipping visualization.')
@@ -180,5 +244,5 @@ def main():
# logger.info('Generating visualization ...')
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_dataset_recording.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_dataset_recording.py
index 6a4a03ba..5d37da5a 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_dataset_recording.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_dataset_recording.py
@@ -1,23 +1,28 @@
import argparse
+
from colorama import Fore, Style
from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
-from happypose.pose_estimators.cosypose.cosypose.recording.record_dataset import record_dataset
-
-
-def make_cfg(cfg_name,
- resume_ds_name='',
- debug=False,
- distributed=False,
- overwrite=False,
- datasets_dir=LOCAL_DATA_DIR):
- datasets_dir = datasets_dir / 'synt_datasets'
+from happypose.pose_estimators.cosypose.cosypose.recording.record_dataset import (
+ record_dataset,
+)
+
+
+def make_cfg(
+ cfg_name,
+ resume_ds_name="",
+ debug=False,
+ distributed=False,
+ overwrite=False,
+ datasets_dir=LOCAL_DATA_DIR,
+):
+ datasets_dir = datasets_dir / "synt_datasets"
datasets_dir.mkdir(exist_ok=True)
- cfg = argparse.ArgumentParser('').parse_args([])
+ cfg = argparse.ArgumentParser("").parse_args([])
cfg.overwrite = overwrite
- cfg.ds_name = 'default_dataset'
+ cfg.ds_name = "default_dataset"
n_frames = 1e6
cfg.n_frames_per_chunk = 100
@@ -28,31 +33,31 @@ def make_cfg(cfg_name,
cfg.n_workers = 6
cfg.n_processes_per_gpu = 10
- cfg.scene_cls = 'cosypose.recording.bop_recording_scene.BopRecordingScene'
- cfg.scene_kwargs = dict(
- gpu_renderer=True,
- texture_ds='shapenet',
- domain_randomization=True,
- n_objects_interval=(3, 9),
- proba_falling=0.5,
- border_check=False,
- n_textures_cache=100,
- objects_xyz_interval=((-0.25, -0.25, 0.), (0.25, 0.25, 0.25)),
- focal_interval=((1060, 1060), (1080, 1080)),
- )
- cfg.ds_name = f'{cfg_name}-1M'
-
- if cfg_name == 'ycbv':
+ cfg.scene_cls = "cosypose.recording.bop_recording_scene.BopRecordingScene"
+ cfg.scene_kwargs = {
+ "gpu_renderer": True,
+ "texture_ds": "shapenet",
+ "domain_randomization": True,
+ "n_objects_interval": (3, 9),
+ "proba_falling": 0.5,
+ "border_check": False,
+ "n_textures_cache": 100,
+ "objects_xyz_interval": ((-0.25, -0.25, 0.0), (0.25, 0.25, 0.25)),
+ "focal_interval": ((1060, 1060), (1080, 1080)),
+ }
+ cfg.ds_name = f"{cfg_name}-1M"
+
+ if cfg_name == "ycbv":
cfg.scene_kwargs.update(
- urdf_ds='ycbv',
+ urdf_ds="ycbv",
resolution=(640, 480),
textures_on_objects=False,
camera_distance_interval=(0.5, 1.5),
)
- elif cfg_name == 'tless':
+ elif cfg_name == "tless":
cfg.scene_kwargs.update(
- urdf_ds='tless.cad',
+ urdf_ds="tless.cad",
resolution=(720, 540),
textures_on_objects=True,
camera_distance_interval=(0.65, 0.94),
@@ -62,12 +67,13 @@ def make_cfg(cfg_name,
pass
else:
- raise ValueError('Unknown config')
+ msg = "Unknown config"
+ raise ValueError(msg)
if debug:
n_frames = 10
cfg.overwrite = True
- cfg.ds_name = 'debug'
+ cfg.ds_name = "debug"
cfg.n_frames_per_chunk = 1
cfg.n_chunks = n_frames // cfg.n_frames_per_chunk
@@ -76,36 +82,40 @@ def make_cfg(cfg_name,
cfg.ds_name = resume_ds_name
assert cfg.resume.exists()
else:
- cfg.resume = ''
+ cfg.resume = ""
cfg.ds_dir = datasets_dir / cfg.ds_name
return cfg
def main():
- parser = argparse.ArgumentParser('Dataset recording')
- parser.add_argument('--config', default='', type=str)
- parser.add_argument('--resume', default='', type=str)
- parser.add_argument('--debug', action='store_true')
- parser.add_argument('--local', action='store_true')
- parser.add_argument('--overwrite', action='store_true')
+ parser = argparse.ArgumentParser("Dataset recording")
+ parser.add_argument("--config", default="", type=str)
+ parser.add_argument("--resume", default="", type=str)
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument("--local", action="store_true")
+ parser.add_argument("--overwrite", action="store_true")
args = parser.parse_args()
print(f"{Fore.RED}using config {args.config} {Style.RESET_ALL}")
- cfg = make_cfg(args.config,
- resume_ds_name=args.resume,
- debug=args.debug,
- distributed=not args.local,
- overwrite=args.overwrite)
+ cfg = make_cfg(
+ args.config,
+ resume_ds_name=args.resume,
+ debug=args.debug,
+ distributed=not args.local,
+ overwrite=args.overwrite,
+ )
for k, v in vars(cfg).items():
print(k, v)
if cfg.resume:
print(f"RESUMING {Fore.RED} {cfg.ds_name} {Style.RESET_ALL} \n ")
else:
- print(f"STARTING DATASET RECORDING {Fore.GREEN} {cfg.ds_name} {Style.RESET_ALL} \n ")
+ print(
+ f"STARTING DATASET RECORDING {Fore.GREEN} {cfg.ds_name} {Style.RESET_ALL}"
+ )
record_dataset(cfg)
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_detection_eval.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_detection_eval.py
index a1277ffe..8fbf89af 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_detection_eval.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_detection_eval.py
@@ -1,33 +1,48 @@
-import numpy as np
+import argparse
import logging
-import pandas as pd
from copy import deepcopy
from pathlib import Path
-import yaml
+
+import numpy as np
+import pandas as pd
import torch
-import argparse
+import torch.multiprocessing
+import yaml
-from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_scene_dataset
+from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR, RESULTS_DIR
from happypose.pose_estimators.cosypose.cosypose.datasets.bop import remap_bop_targets
-
-from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import format_results
-
-from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import create_model_detector, check_update_config
+from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import (
+ make_scene_dataset,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.eval_runner.detection_eval import ( # noqa: E501
+ DetectionEvaluation,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.meters.detection_meters import ( # noqa: E501
+ DetectionMeter,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.detections import ( # noqa: E501
+ DetectionRunner,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import (
+ format_results,
+)
from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector
-from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.detections import DetectionRunner
-from happypose.pose_estimators.cosypose.cosypose.scripts.run_cosypose_eval import load_pix2pose_results, load_posecnn_results
-
-from happypose.pose_estimators.cosypose.cosypose.evaluation.meters.detection_meters import DetectionMeter
-from happypose.pose_estimators.cosypose.cosypose.evaluation.eval_runner.detection_eval import DetectionEvaluation
-
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_tmp_dir, get_rank
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import init_distributed_mode
-
-from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR
+from happypose.pose_estimators.cosypose.cosypose.scripts.run_cosypose_eval import (
+ load_pix2pose_results,
+ load_posecnn_results,
+)
+from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import (
+ check_update_config,
+ create_model_detector,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_tmp_dir,
+ init_distributed_mode,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
-import torch.multiprocessing
-torch.multiprocessing.set_sharing_strategy('file_system')
+torch.multiprocessing.set_sharing_strategy("file_system")
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
@@ -36,12 +51,12 @@
def load_detector(run_id):
run_dir = EXP_DIR / run_id
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.FullLoader)
cfg = check_update_config(cfg)
label_to_category_id = cfg.label_to_category_id
model = create_model_detector(cfg, len(label_to_category_id))
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar')
- ckpt = ckpt['state_dict']
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar")
+ ckpt = ckpt["state_dict"]
model.load_state_dict(ckpt)
model = model.cuda().eval()
model.cfg = cfg
@@ -54,25 +69,25 @@ def get_meters(scene_ds):
visib_gt_min = -1
n_top = -1 # Given by targets
- if 'bop19' in scene_ds.name:
- targets_filename = 'test_targets_bop19.json'
+ if "bop19" in scene_ds.name:
+ targets_filename = "test_targets_bop19.json"
targets_path = scene_ds.ds_dir / targets_filename
targets = pd.read_json(targets_path)
targets = remap_bop_targets(targets)
else:
targets = None
- base_kwargs = dict(
- errors_bsz=64,
+ base_kwargs = {
+ "errors_bsz": 64,
# BOP-Like parameters
- n_top=n_top,
- visib_gt_min=visib_gt_min,
- targets=targets,
- )
+ "n_top": n_top,
+ "visib_gt_min": visib_gt_min,
+ "targets": targets,
+ }
meters = {
- 'ntop=BOP': DetectionMeter(**base_kwargs, consider_all_predictions=False),
- 'ntop=ALL': DetectionMeter(**base_kwargs, consider_all_predictions=True),
+ "ntop=BOP": DetectionMeter(**base_kwargs, consider_all_predictions=False),
+ "ntop=ALL": DetectionMeter(**base_kwargs, consider_all_predictions=True),
}
return meters
@@ -85,10 +100,13 @@ def run_detection_eval(args, detector=None):
scene_ds = make_scene_dataset(args.ds_name, n_frames=args.n_frames)
- pred_kwargs = dict()
- pred_runner = DetectionRunner(scene_ds, batch_size=args.pred_bsz,
- cache_data=len(pred_kwargs) > 1,
- n_workers=args.n_workers)
+ pred_kwargs = {}
+ pred_runner = DetectionRunner(
+ scene_ds,
+ batch_size=args.pred_bsz,
+ cache_data=len(pred_kwargs) > 1,
+ n_workers=args.n_workers,
+ )
if not args.skip_model_predictions:
if detector is not None:
@@ -96,20 +114,24 @@ def run_detection_eval(args, detector=None):
else:
model = load_detector(args.detector_run_id)
- pred_kwargs.update({
- 'model': dict(
- detector=model,
- gt_detections=False
- )
- })
+ pred_kwargs.update(
+ {
+ "model": {
+ "detector": model,
+ "gt_detections": False,
+ },
+ },
+ )
- all_predictions = dict()
+ all_predictions = {}
if args.external_predictions:
- if 'ycbv' in args.ds_name:
- all_predictions['posecnn'] = load_posecnn_results().cpu()
- elif 'tless' in args.ds_name:
- all_predictions['retinanet/pix2pose'] = load_pix2pose_results(all_detections=True).cpu()
+ if "ycbv" in args.ds_name:
+ all_predictions["posecnn"] = load_posecnn_results().cpu()
+ elif "tless" in args.ds_name:
+ all_predictions["retinanet/pix2pose"] = load_pix2pose_results(
+ all_detections=True,
+ ).cpu()
else:
pass
@@ -117,7 +139,7 @@ def run_detection_eval(args, detector=None):
logger.info(f"Prediction: {pred_prefix}")
preds = pred_runner.get_predictions(**pred_kwargs_n)
for preds_name, preds_n in preds.items():
- all_predictions[f'{pred_prefix}/{preds_name}'] = preds_n
+ all_predictions[f"{pred_prefix}/{preds_name}"] = preds_n
logger.info("Done with predictions")
torch.distributed.barrier()
@@ -125,12 +147,16 @@ def run_detection_eval(args, detector=None):
# Evaluation.
meters = get_meters(scene_ds)
logger.info(f"Meters: {meters}")
- eval_runner = DetectionEvaluation(scene_ds, meters, batch_size=args.eval_bsz,
- cache_data=len(all_predictions) > 1,
- n_workers=args.n_workers,
- sampler=pred_runner.sampler)
+ eval_runner = DetectionEvaluation(
+ scene_ds,
+ meters,
+ batch_size=args.eval_bsz,
+ cache_data=len(all_predictions) > 1,
+ n_workers=args.n_workers,
+ sampler=pred_runner.sampler,
+ )
- eval_metrics, eval_dfs = dict(), dict()
+ eval_metrics, eval_dfs = {}, {}
if not args.skip_evaluation:
for preds_k, preds in all_predictions.items():
do_eval = True
@@ -149,12 +175,12 @@ def run_detection_eval(args, detector=None):
if get_rank() == 0:
save_dir = Path(args.save_dir)
save_dir.mkdir(exist_ok=True, parents=True)
- logger.info(f'Finished evaluation on {args.ds_name}')
+ logger.info(f"Finished evaluation on {args.ds_name}")
results = format_results(all_predictions, eval_metrics, eval_dfs)
- torch.save(results, save_dir / 'results.pth.tar')
- (save_dir / 'summary.txt').write_text(results.get('summary_txt', ''))
- (save_dir / 'config.yaml').write_text(yaml.dump(args))
- logger.info(f'Saved predictions+metrics in {save_dir}')
+ torch.save(results, save_dir / "results.pth.tar")
+ (save_dir / "summary.txt").write_text(results.get("summary_txt", ""))
+ (save_dir / "config.yaml").write_text(yaml.dump(args))
+ logger.info(f"Saved predictions+metrics in {save_dir}")
logger.info("Done with evaluation")
torch.distributed.barrier()
@@ -164,21 +190,21 @@ def run_detection_eval(args, detector=None):
def main():
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
- if 'cosypose' in logger.name:
+ if "cosypose" in logger.name:
logger.setLevel(logging.DEBUG)
- parser = argparse.ArgumentParser('Evaluation')
- parser.add_argument('--debug', action='store_true')
- parser.add_argument('--skip_predictions', action='store_true')
- parser.add_argument('--comment', default='', type=str)
- parser.add_argument('--id', default=-1, type=int)
- parser.add_argument('--config', default='', type=str)
- parser.add_argument('--models', default='', type=str)
+ parser = argparse.ArgumentParser("Evaluation")
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument("--skip_predictions", action="store_true")
+ parser.add_argument("--comment", default="", type=str)
+ parser.add_argument("--id", default=-1, type=int)
+ parser.add_argument("--config", default="", type=str)
+ parser.add_argument("--models", default="", type=str)
args = parser.parse_args()
init_distributed_mode()
- cfg = argparse.ArgumentParser('').parse_args([])
+ cfg = argparse.ArgumentParser("").parse_args([])
cfg.n_workers = 8
cfg.pred_bsz = 8
@@ -191,38 +217,38 @@ def main():
if args.debug:
cfg.n_frames = 10
- if args.config == 'bop':
+ if args.config == "bop":
# ds_names = ['ycbv.bop19', 'tless.bop19']
- ds_names = ['itodd.val', 'hb.val']
+ ds_names = ["itodd.val", "hb.val"]
else:
raise ValueError
detector_run_ids = {
- 'ycbv.bop19': 'ycbv--377940',
- 'hb.val': 'detector-bop-hb--497808',
- 'itodd.val': 'detector-bop-itodd--509908',
+ "ycbv.bop19": "ycbv--377940",
+ "hb.val": "detector-bop-hb--497808",
+ "itodd.val": "detector-bop-itodd--509908",
}
if args.id < 0:
n_rand = np.random.randint(1e6)
args.id = n_rand
- save_dir = RESULTS_DIR / f'{args.config}-{args.models}-{args.comment}-{args.id}'
- logger.info(f'Save dir: {save_dir}')
+ save_dir = RESULTS_DIR / f"{args.config}-{args.models}-{args.comment}-{args.id}"
+ logger.info(f"Save dir: {save_dir}")
for ds_name in ds_names:
this_cfg = deepcopy(cfg)
this_cfg.ds_name = ds_name
- this_cfg.save_dir = save_dir / f'dataset={ds_name}'
- logger.info(f'DATASET: {ds_name}')
+ this_cfg.save_dir = save_dir / f"dataset={ds_name}"
+ logger.info(f"DATASET: {ds_name}")
if ds_name in detector_run_ids:
this_cfg.detector_run_id = detector_run_ids[ds_name]
else:
this_cfg.skip_model_predictions = True
- logger.info(f'No model provided for dataset: {ds_name}.')
+ logger.info(f"No model provided for dataset: {ds_name}.")
run_detection_eval(this_cfg)
- logger.info('')
+ logger.info("")
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_detector_training.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_detector_training.py
index 02895260..1709e6f2 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_detector_training.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_detector_training.py
@@ -1,37 +1,43 @@
import argparse
-import numpy as np
import os
+
+import numpy as np
from colorama import Fore, Style
-from happypose.pose_estimators.cosypose.cosypose.training.train_detector import train_detector
+from happypose.pose_estimators.cosypose.cosypose.training.train_detector import (
+ train_detector,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+
logger = get_logger(__name__)
-if __name__ == '__main__':
- parser = argparse.ArgumentParser('Training')
- parser.add_argument('--config', default='', type=str)
- parser.add_argument('--debug', action='store_true')
- parser.add_argument('--resume', default='', type=str)
- parser.add_argument('--no-eval', action='store_true')
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser("Training")
+ parser.add_argument("--config", default="", type=str)
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument("--resume", default="", type=str)
+ parser.add_argument("--no-eval", action="store_true")
args = parser.parse_args()
- cfg = argparse.ArgumentParser('').parse_args([])
+ cfg = argparse.ArgumentParser("").parse_args([])
if args.config:
- logger.info(f"{Fore.GREEN}Training with config: {args.config} {Style.RESET_ALL}")
+ logger.info(
+ f"{Fore.GREEN}Training with config: {args.config} {Style.RESET_ALL}",
+ )
cfg.resume_run_id = None
if len(args.resume) > 0:
cfg.resume_run_id = args.resume
logger.info(f"{Fore.RED}Resuming {cfg.resume_run_id} {Style.RESET_ALL}")
- N_CPUS = int(os.environ.get('N_CPUS', 10))
- N_GPUS = int(os.environ.get('N_PROCS', 1))
+ N_CPUS = int(os.environ.get("N_CPUS", 10))
+ N_GPUS = int(os.environ.get("N_PROCS", 1))
N_WORKERS = min(N_CPUS - 2, 8)
N_RAND = np.random.randint(1e6)
cfg.n_gpus = N_GPUS
- run_comment = ''
+ run_comment = ""
# Data
cfg.train_ds_names = []
@@ -47,7 +53,7 @@
cfg.gray_augmentation = False
# Model
- cfg.backbone_str = 'resnet50-fpn'
+ cfg.backbone_str = "resnet50-fpn"
cfg.anchor_sizes = ((32,), (64,), (128,), (256,), (512,))
# Pretraning
@@ -63,7 +69,7 @@
cfg.n_dataloader_workers = N_WORKERS
# Optimizer
- cfg.optimizer = 'sgd'
+ cfg.optimizer = "sgd"
cfg.lr = (0.02 / 8) * N_GPUS * float(cfg.batch_size / 4)
cfg.weight_decay = 1e-4
cfg.momentum = 0.9
@@ -74,31 +80,34 @@
cfg.classifier_alpha = 1
cfg.mask_alpha = 1
cfg.box_reg_alpha = 1
- if 'tless' in args.config:
+ if "tless" in args.config:
cfg.input_resize = (540, 720)
- elif 'ycbv' in args.config:
+ elif "ycbv" in args.config:
cfg.input_resize = (480, 640)
- elif 'bop-' in args.config:
+ elif "bop-" in args.config:
cfg.input_resize = None
else:
raise ValueError
- if 'bop-' in args.config:
- from happypose.pose_estimators.cosypose.cosypose.bop_config import BOP_CONFIG
- from happypose.pose_estimators.cosypose.cosypose.bop_config import PBR_DETECTORS
- bop_name, train_type = args.config.split('-')[1:]
+ if "bop-" in args.config:
+ from happypose.pose_estimators.cosypose.cosypose.bop_config import (
+ BOP_CONFIG,
+ PBR_DETECTORS,
+ )
+
+ bop_name, train_type = args.config.split("-")[1:]
bop_cfg = BOP_CONFIG[bop_name]
- if train_type == 'pbr':
- cfg.train_ds_names = [(bop_cfg['train_pbr_ds_name'][0], 1)]
- elif train_type == 'synt+real':
- cfg.train_ds_names = bop_cfg['train_synt_real_ds_names']
+ if train_type == "pbr":
+ cfg.train_ds_names = [(bop_cfg["train_pbr_ds_name"][0], 1)]
+ elif train_type == "synt+real":
+ cfg.train_ds_names = bop_cfg["train_synt_real_ds_names"]
cfg.run_id_pretrain = PBR_DETECTORS[bop_name]
else:
raise ValueError
cfg.val_ds_names = cfg.train_ds_names
- cfg.input_resize = bop_cfg['input_resize']
- if len(bop_cfg['test_ds_name']) > 0:
- cfg.test_ds_names = bop_cfg['test_ds_name']
+ cfg.input_resize = bop_cfg["input_resize"]
+ if len(bop_cfg["test_ds_name"]) > 0:
+ cfg.test_ds_names = bop_cfg["test_ds_name"]
else:
raise ValueError(args.config)
@@ -107,20 +116,20 @@
if args.no_eval:
cfg.test_ds_names = []
- cfg.run_id = f'detector-{args.config}-{run_comment}-{N_RAND}'
+ cfg.run_id = f"detector-{args.config}-{run_comment}-{N_RAND}"
if args.debug:
cfg.n_epochs = 4
cfg.val_epoch_interval = 1
cfg.batch_size = 2
cfg.epoch_size = 10 * cfg.batch_size
- cfg.run_id = 'debug-' + cfg.run_id
+ cfg.run_id = "debug-" + cfg.run_id
cfg.background_augmentation = False
cfg.rgb_augmentation = False
cfg.n_dataloader_workers = 1
cfg.n_test_frames = 10
- N_GPUS = int(os.environ.get('N_PROCS', 1))
+ N_GPUS = int(os.environ.get("N_PROCS", 1))
cfg.epoch_size = cfg.epoch_size // N_GPUS
train_detector(cfg)
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_full_cosypose_eval_new.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_full_cosypose_eval_new.py
index 3729e333..265467e1 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_full_cosypose_eval_new.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_full_cosypose_eval_new.py
@@ -2,41 +2,37 @@
import copy
import os
from pathlib import Path
-from typing import Dict, Optional, Tuple
# Third Party
from omegaconf import OmegaConf
-from happypose.pose_estimators.megapose.evaluation.bop import run_evaluation
-from happypose.pose_estimators.megapose.evaluation.evaluation import get_save_dir
+
+from happypose.pose_estimators.cosypose.cosypose.evaluation.evaluation import run_eval
# MegaPose
-from happypose.pose_estimators.megapose.bop_config import (
- PBR_COARSE,
- PBR_DETECTORS,
- PBR_REFINER,
- SYNT_REAL_COARSE,
- SYNT_REAL_DETECTORS,
- SYNT_REAL_REFINER,
-)
+from happypose.pose_estimators.megapose.bop_config import PBR_DETECTORS
from happypose.pose_estimators.megapose.config import (
DEBUG_RESULTS_DIR,
- EXP_DIR,
MODELNET_TEST_CATEGORIES,
RESULTS_DIR,
)
+from happypose.pose_estimators.megapose.evaluation.bop import run_evaluation
from happypose.pose_estimators.megapose.evaluation.eval_config import (
BOPEvalConfig,
EvalConfig,
FullEvalConfig,
HardwareConfig,
)
-from happypose.pose_estimators.megapose.evaluation.evaluation import generate_save_key
-from happypose.pose_estimators.cosypose.cosypose.evaluation.evaluation import run_eval
-
-from happypose.toolbox.utils.distributed import get_rank, get_world_size, init_distributed_mode
+from happypose.pose_estimators.megapose.evaluation.evaluation import (
+ generate_save_key,
+ get_save_dir,
+)
+from happypose.toolbox.utils.distributed import (
+ get_rank,
+ get_world_size,
+ init_distributed_mode,
+)
from happypose.toolbox.utils.logging import get_logger, set_logging_level
-
logger = get_logger(__name__)
BOP_DATASET_NAMES = [
@@ -62,7 +58,9 @@
]
-MODELNET_TEST_DATASETS = [f"modelnet.{category}.test" for category in MODELNET_TEST_CATEGORIES]
+MODELNET_TEST_DATASETS = [
+ f"modelnet.{category}.test" for category in MODELNET_TEST_CATEGORIES
+]
def create_eval_cfg(
@@ -70,8 +68,7 @@ def create_eval_cfg(
detection_type: str,
coarse_estimation_type: str,
ds_name: str,
-) -> Tuple[str, EvalConfig]:
-
+) -> tuple[str, EvalConfig]:
cfg = copy.deepcopy(cfg)
cfg.inference.detection_type = detection_type
@@ -87,7 +84,8 @@ def create_eval_cfg(
elif detection_type == "gt":
pass
else:
- raise ValueError(f"Unknown detector type {cfg.detector_type}")
+ msg = f"Unknown detector type {cfg.detector_type}"
+ raise ValueError(msg)
name = generate_save_key(detection_type, coarse_estimation_type)
@@ -95,7 +93,6 @@ def create_eval_cfg(
def run_full_eval(cfg: FullEvalConfig) -> None:
-
bop_eval_cfgs = []
init_distributed_mode()
@@ -109,17 +106,20 @@ def run_full_eval(cfg: FullEvalConfig) -> None:
# Iterate over each dataset
for ds_name in cfg.ds_names:
-
# create the EvalConfig objects that we will call `run_eval` on
- eval_configs: Dict[str, EvalConfig] = dict()
- for (detection_type, coarse_estimation_type) in cfg.detection_coarse_types:
- name, cfg_ = create_eval_cfg(cfg, detection_type, coarse_estimation_type, ds_name)
+ eval_configs: dict[str, EvalConfig] = {}
+ for detection_type, coarse_estimation_type in cfg.detection_coarse_types:
+ name, cfg_ = create_eval_cfg(
+ cfg,
+ detection_type,
+ coarse_estimation_type,
+ ds_name,
+ )
eval_configs[name] = cfg_
# For each eval_cfg run the evaluation.
# Note that the results get saved to disk
- for save_key, eval_cfg in eval_configs.items():
-
+ for _save_key, eval_cfg in eval_configs.items():
# Run the inference
if not cfg.skip_inference:
eval_out = run_eval(eval_cfg)
@@ -139,17 +139,16 @@ def run_full_eval(cfg: FullEvalConfig) -> None:
}
assert Path(
- eval_out["results_path"]
+ eval_out["results_path"],
).is_file(), f"The file {eval_out['results_path']} doesn't exist"
# Run the bop eval for each type of prediction
if cfg.run_bop_eval and get_rank() == 0:
-
- bop_eval_keys = set(("refiner/final", "depth_refiner"))
+ bop_eval_keys = {"refiner/final", "depth_refiner"}
bop_eval_keys = bop_eval_keys.intersection(set(eval_out["pred_keys"]))
for method in bop_eval_keys:
- if not "bop19" in ds_name:
+ if "bop19" not in ds_name:
continue
bop_eval_cfg = BOPEvalConfig(
@@ -159,7 +158,7 @@ def run_full_eval(cfg: FullEvalConfig) -> None:
eval_dir=eval_out["save_dir"] / "bop_evaluation",
method=method,
convert_only=False,
- use_post_score=False
+ use_post_score=False,
)
bop_eval_cfgs.append(bop_eval_cfg)
@@ -182,6 +181,7 @@ def update_cfg_debug(cfg: EvalConfig) -> FullEvalConfig:
cfg.save_dir = str(DEBUG_RESULTS_DIR / cfg.result_id)
return cfg
+
if __name__ == "__main__":
print("Running eval")
set_logging_level("debug")
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py
index f91b533d..8e798371 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py
@@ -1,16 +1,11 @@
# Standard Library
import argparse
-import json
import os
########################
# Add cosypose to my path -> dirty
-import sys
from pathlib import Path
-from typing import List, Tuple, Union
-
-import cosypose
-import cv2
+from typing import Union
# Third Party
import numpy as np
@@ -19,50 +14,40 @@
from bokeh.plotting import gridplot
from PIL import Image
-from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
from happypose.pose_estimators.cosypose.cosypose.utils.cosypose_wrapper import (
CosyPoseWrapper,
)
-#from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import BulletSceneRenderer
-from happypose.pose_estimators.cosypose.cosypose.visualization.singleview import (
- render_prediction_wrt_camera,
-)
-
# MegaPose
from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
-from happypose.toolbox.inference.types import (
- DetectionsType,
- ObservationTensor,
- PoseEstimatesType,
-)
+from happypose.toolbox.inference.types import ObservationTensor
from happypose.toolbox.lib3d.transform import Transform
# HappyPose
from happypose.toolbox.renderer import Panda3dLightData
from happypose.toolbox.renderer.panda3d_scene_renderer import Panda3dSceneRenderer
from happypose.toolbox.utils.conversion import convert_scene_observation_to_panda3d
-from happypose.toolbox.utils.load_model import NAMED_MODELS, load_named_model
from happypose.toolbox.utils.logging import get_logger, set_logging_level
from happypose.toolbox.visualization.bokeh_plotter import BokehPlotter
from happypose.toolbox.visualization.utils import make_contour_overlay
-########################
-
-
+# from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer
+# import BulletSceneRenderer
+########################
logger = get_logger(__name__)
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
def load_observation(
example_dir: Path,
load_depth: bool = False,
-) -> Tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
+) -> tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
camera_data = CameraData.from_json((example_dir / "camera_data.json").read_text())
rgb = np.array(Image.open(example_dir / "image_rgb.png"), dtype=np.uint8)
@@ -70,7 +55,10 @@ def load_observation(
depth = None
if load_depth:
- depth = np.array(Image.open(example_dir / "image_depth.png"), dtype=np.float32) / 1000
+ depth = (
+ np.array(Image.open(example_dir / "image_depth.png"), dtype=np.float32)
+ / 1000
+ )
assert depth.shape[:2] == camera_data.resolution
return rgb, depth, camera_data
@@ -86,6 +74,7 @@ def load_observation_tensor(
observation.cuda()
return observation
+
def make_object_dataset(example_dir: Path) -> RigidObjectDataset:
rigid_objects = []
mesh_units = "mm"
@@ -98,7 +87,9 @@ def make_object_dataset(example_dir: Path) -> RigidObjectDataset:
assert not mesh_path, f"there multiple meshes in the {label} directory"
mesh_path = fn
assert mesh_path, f"couldnt find a obj or ply mesh for {label}"
- rigid_objects.append(RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units))
+ rigid_objects.append(
+ RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units),
+ )
# TODO: fix mesh units
rigid_object_dataset = RigidObjectDataset(rigid_objects)
return rigid_object_dataset
@@ -116,7 +107,10 @@ def rendering(predictions, example_dir):
renderer = Panda3dSceneRenderer(object_dataset)
# Data necessary for image rendering
object_datas = [ObjectData(label=obj_label, TWO=Transform(pred))]
- camera_data, object_datas = convert_scene_observation_to_panda3d(camera_data, object_datas)
+ camera_data, object_datas = convert_scene_observation_to_panda3d(
+ camera_data,
+ object_datas,
+ )
light_datas = [
Panda3dLightData(
light_type="ambient",
@@ -138,13 +132,14 @@ def rendering(predictions, example_dir):
def save_predictions(example_dir, renderings):
rgb_render = renderings.rgb
rgb, _, _ = load_observation(example_dir, load_depth=False)
- # render_prediction_wrt_camera calls BulletSceneRenderer.render_scene using only one camera at pose Identity and return only rgb values
- # BulletSceneRenderer.render_scene: gets a "object list" (prediction like object), a list of camera infos (with Km pose, res) and renders
+ # render_prediction_wrt_camera calls BulletSceneRenderer.render_scene using only one
+ # camera at pose Identity and return only rgb values
+ # BulletSceneRenderer.render_scene: gets a "object list" (prediction like object), a
+ # list of camera infos (with Km pose, res) and renders
# a "camera observation" for each camera/viewpoint
# Actually, renders: rgb, mask, depth, near, far
- #rgb_render = render_prediction_wrt_camera(renderer, preds, cam)
+ # rgb_render = render_prediction_wrt_camera(renderer, preds, cam)
mask = ~(rgb_render.sum(axis=-1) == 0)
- alpha = 0.1
rgb_n_render = rgb.copy()
rgb_n_render[mask] = rgb_render[mask]
@@ -158,10 +153,16 @@ def save_predictions(example_dir, renderings):
fig_mesh_overlay = plotter.plot_overlay(rgb, renderings.rgb)
contour_overlay = make_contour_overlay(
- rgb, renderings.rgb, dilate_iterations=1, color=(0, 255, 0)
+ rgb,
+ renderings.rgb,
+ dilate_iterations=1,
+ color=(0, 255, 0),
)["img"]
fig_contour_overlay = plotter.plot_image(contour_overlay)
- fig_all = gridplot([[fig_rgb, fig_contour_overlay, fig_mesh_overlay]], toolbar_location=None)
+ fig_all = gridplot(
+ [[fig_rgb, fig_contour_overlay, fig_mesh_overlay]],
+ toolbar_location=None,
+ )
vis_dir = example_dir / "visualizations"
vis_dir.mkdir(exist_ok=True)
export_png(fig_mesh_overlay, filename=vis_dir / "mesh_overlay.png")
@@ -186,11 +187,13 @@ def run_inference(
set_logging_level("info")
parser = argparse.ArgumentParser()
parser.add_argument("example_name")
- # parser.add_argument("--model", type=str, default="megapose-1.0-RGB-multi-hypothesis")
+ # parser.add_argument(
+ # "--model", type=str, default="megapose-1.0-RGB-multi-hypothesis"
+ # )
parser.add_argument("--dataset", type=str, default="ycbv")
- #parser.add_argument("--vis-detections", action="store_true")
+ # parser.add_argument("--vis-detections", action="store_true")
parser.add_argument("--run-inference", action="store_true", default=True)
- #parser.add_argument("--vis-outputs", action="store_true")
+ # parser.add_argument("--vis-outputs", action="store_true")
args = parser.parse_args()
data_dir = os.getenv("HAPPYPOSE_DATA_DIR")
@@ -198,11 +201,11 @@ def run_inference(
example_dir = Path(data_dir) / "examples" / args.example_name
dataset_to_use = args.dataset # tless or ycbv
- #if args.vis_detections:
+ # if args.vis_detections:
# make_detections_visualization(example_dir)
if args.run_inference:
run_inference(example_dir, None, dataset_to_use)
- #if args.vis_outputs:
- # make_output_visualization(example_dir)
\ No newline at end of file
+ # if args.vis_outputs:
+ # make_output_visualization(example_dir)
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_pose_training.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_pose_training.py
index 0d7b7d43..44551e50 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_pose_training.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_pose_training.py
@@ -1,39 +1,47 @@
import argparse
-import numpy as np
import os
+
+import numpy as np
from colorama import Fore, Style
from happypose.pose_estimators.cosypose.cosypose.training.train_pose import train_pose
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+
logger = get_logger(__name__)
def make_cfg(args):
- cfg = argparse.ArgumentParser('').parse_args([])
+ cfg = argparse.ArgumentParser("").parse_args([])
if args.config:
- logger.info(f"{Fore.GREEN}Training with config: {args.config} {Style.RESET_ALL}")
+ logger.info(
+ f"{Fore.GREEN}Training with config: {args.config} {Style.RESET_ALL}",
+ )
cfg.resume_run_id = None
if len(args.resume) > 0:
cfg.resume_run_id = args.resume
logger.info(f"{Fore.RED}Resuming {cfg.resume_run_id} {Style.RESET_ALL}")
- N_CPUS = int(os.environ.get('N_CPUS', 10))
+ N_CPUS = int(os.environ.get("N_CPUS", 10))
N_WORKERS = min(N_CPUS - 2, 8)
N_WORKERS = 8
N_RAND = np.random.randint(1e6)
- run_comment = ''
+ run_comment = ""
# Data
- cfg.urdf_ds_name = 'ycbv'
- cfg.object_ds_name = 'ycbv'
+ cfg.urdf_ds_name = "ycbv"
+ cfg.object_ds_name = "ycbv"
cfg.n_symmetries_batch = 64
- cfg.train_ds_names = [('synt.ycbv-1M', 1), ('ycbv.real.train', 3), ('ycbv.synthetic.train', 3)]
+ cfg.train_ds_names = [
+ ("synt.ycbv-1M", 1),
+ ("ycbv.real.train", 3),
+ ("ycbv.synthetic.train", 3),
+ ]
cfg.val_ds_names = cfg.train_ds_names
cfg.val_epoch_interval = 10
- cfg.test_ds_names = ['ycbv.test.keyframes', ]
+ cfg.test_ds_names = ["ycbv.test.keyframes"]
cfg.test_epoch_interval = 30
cfg.n_test_frames = None
@@ -43,7 +51,7 @@ def make_cfg(args):
cfg.gray_augmentation = False
# Model
- cfg.backbone_str = 'efficientnet-b3'
+ cfg.backbone_str = "efficientnet-b3"
cfg.run_id_pretrain = None
cfg.n_pose_dims = 9
cfg.n_rendering_workers = N_WORKERS
@@ -52,7 +60,7 @@ def make_cfg(args):
# Optimizer
cfg.lr = 3e-4
- cfg.weight_decay = 0.
+ cfg.weight_decay = 0.0
cfg.n_epochs_warmup = 50
cfg.lr_epoch_decay = 500
cfg.clip_grad_norm = 0.5
@@ -66,103 +74,110 @@ def make_cfg(args):
# Method
cfg.loss_disentangled = True
cfg.n_points_loss = 2600
- cfg.TCO_input_generator = 'fixed'
+ cfg.TCO_input_generator = "fixed"
cfg.n_iterations = 1
cfg.min_area = None
- if 'bop-' in args.config:
- from happypose.pose_estimators.cosypose.cosypose.bop_config import BOP_CONFIG
- from happypose.pose_estimators.cosypose.cosypose.bop_config import PBR_COARSE, PBR_REFINER
+ if "bop-" in args.config:
+ from happypose.pose_estimators.cosypose.cosypose.bop_config import (
+ BOP_CONFIG,
+ PBR_COARSE,
+ PBR_REFINER,
+ )
- bop_name, train_type, model_type = args.config.split('-')[1:]
+ bop_name, train_type, model_type = args.config.split("-")[1:]
bop_cfg = BOP_CONFIG[bop_name]
- if train_type == 'pbr':
- cfg.train_ds_names = [(bop_cfg['train_pbr_ds_name'][0], 1)]
- elif train_type == 'synt+real':
- cfg.train_ds_names = bop_cfg['train_synt_real_ds_names']
- if model_type == 'coarse':
+ if train_type == "pbr":
+ cfg.train_ds_names = [(bop_cfg["train_pbr_ds_name"][0], 1)]
+ elif train_type == "synt+real":
+ cfg.train_ds_names = bop_cfg["train_synt_real_ds_names"]
+ if model_type == "coarse":
PRETRAIN_MODELS = PBR_COARSE
- elif model_type == 'refiner':
+ elif model_type == "refiner":
PRETRAIN_MODELS = PBR_REFINER
cfg.run_id_pretrain = PRETRAIN_MODELS[bop_name]
else:
raise ValueError
cfg.val_ds_names = cfg.train_ds_names
- cfg.urdf_ds_name = bop_cfg['urdf_ds_name']
- cfg.object_ds_name = bop_cfg['obj_ds_name']
- cfg.input_resize = bop_cfg['input_resize']
+ cfg.urdf_ds_name = bop_cfg["urdf_ds_name"]
+ cfg.object_ds_name = bop_cfg["obj_ds_name"]
+ cfg.input_resize = bop_cfg["input_resize"]
cfg.test_ds_names = []
- if model_type == 'coarse':
- cfg.init_method = 'z-up+auto-depth'
- cfg.TCO_input_generator = 'fixed+trans_noise'
- run_comment = 'transnoise-zxyavg'
- elif model_type == 'refiner':
- cfg.TCO_input_generator = 'gt+noise'
+ if model_type == "coarse":
+ cfg.init_method = "z-up+auto-depth"
+ cfg.TCO_input_generator = "fixed+trans_noise"
+ run_comment = "transnoise-zxyavg"
+ elif model_type == "refiner":
+ cfg.TCO_input_generator = "gt+noise"
else:
raise ValueError
- elif 'ycbv-' in args.config:
- cfg.urdf_ds_name = 'ycbv'
- cfg.object_ds_name = 'ycbv'
- cfg.train_ds_names = [('synthetic.ycbv-1M.train', 1),
- ('ycbv.train.synt', 1),
- ('ycbv.train.real', 3)]
- cfg.val_ds_names = [('synthetic.ycbv-1M.val', 1)]
- cfg.test_ds_names = ['ycbv.test.keyframes', ]
+ elif "ycbv-" in args.config:
+ cfg.urdf_ds_name = "ycbv"
+ cfg.object_ds_name = "ycbv"
+ cfg.train_ds_names = [
+ ("synthetic.ycbv-1M.train", 1),
+ ("ycbv.train.synt", 1),
+ ("ycbv.train.real", 3),
+ ]
+ cfg.val_ds_names = [("synthetic.ycbv-1M.val", 1)]
+ cfg.test_ds_names = ["ycbv.test.keyframes"]
cfg.input_resize = (480, 640)
- if args.config == 'ycbv-refiner-syntonly':
- cfg.TCO_input_generator = 'gt+noise'
- cfg.train_ds_names = [('synthetic.ycbv-1M.train', 1)]
- elif args.config == 'ycbv-refiner-finetune':
- cfg.TCO_input_generator = 'gt+noise'
- cfg.run_id_pretrain = 'ycbv-refiner-syntonly--596719'
+ if args.config == "ycbv-refiner-syntonly":
+ cfg.TCO_input_generator = "gt+noise"
+ cfg.train_ds_names = [("synthetic.ycbv-1M.train", 1)]
+ elif args.config == "ycbv-refiner-finetune":
+ cfg.TCO_input_generator = "gt+noise"
+ cfg.run_id_pretrain = "ycbv-refiner-syntonly--596719"
else:
raise ValueError(args.config)
- elif 'tless-' in args.config:
- cfg.urdf_ds_name = 'tless.cad'
- cfg.object_ds_name = 'tless.cad'
- cfg.train_ds_names = [('synthetic.tless-1M.train', 1),
- ('tless.primesense.train', 5)]
- cfg.val_ds_names = [('synthetic.tless-1M.val', 1)]
- cfg.test_ds_names = ['tless.primesense.test', ]
+ elif "tless-" in args.config:
+ cfg.urdf_ds_name = "tless.cad"
+ cfg.object_ds_name = "tless.cad"
+ cfg.train_ds_names = [
+ ("synthetic.tless-1M.train", 1),
+ ("tless.primesense.train", 5),
+ ]
+ cfg.val_ds_names = [("synthetic.tless-1M.val", 1)]
+ cfg.test_ds_names = ["tless.primesense.test"]
cfg.input_resize = (540, 720)
- if args.config == 'tless-coarse':
- cfg.TCO_input_generator = 'fixed'
- elif args.config == 'tless-refiner':
- cfg.TCO_input_generator = 'gt+noise'
+ if args.config == "tless-coarse":
+ cfg.TCO_input_generator = "fixed"
+ elif args.config == "tless-refiner":
+ cfg.TCO_input_generator = "gt+noise"
# Ablations
- elif args.config == 'tless-coarse-ablation-loss':
+ elif args.config == "tless-coarse-ablation-loss":
cfg.loss_disentangled = False
- cfg.TCO_input_generator = 'fixed'
- elif args.config == 'tless-refiner-ablation-loss':
+ cfg.TCO_input_generator = "fixed"
+ elif args.config == "tless-refiner-ablation-loss":
cfg.loss_disentangled = False
- cfg.TCO_input_generator = 'gt+noise'
+ cfg.TCO_input_generator = "gt+noise"
- elif args.config == 'tless-coarse-ablation-network':
- cfg.TCO_input_generator = 'fixed'
- cfg.backbone_str = 'flownet'
- elif args.config == 'tless-refiner-ablation-network':
- cfg.TCO_input_generator = 'gt+noise'
- cfg.backbone_str = 'flownet'
+ elif args.config == "tless-coarse-ablation-network":
+ cfg.TCO_input_generator = "fixed"
+ cfg.backbone_str = "flownet"
+ elif args.config == "tless-refiner-ablation-network":
+ cfg.TCO_input_generator = "gt+noise"
+ cfg.backbone_str = "flownet"
- elif args.config == 'tless-coarse-ablation-rot':
+ elif args.config == "tless-coarse-ablation-rot":
cfg.n_pose_dims = 7
- cfg.TCO_input_generator = 'fixed'
- elif args.config == 'tless-refiner-ablation-rot':
+ cfg.TCO_input_generator = "fixed"
+ elif args.config == "tless-refiner-ablation-rot":
cfg.n_pose_dims = 7
- cfg.TCO_input_generator = 'gt+noise'
+ cfg.TCO_input_generator = "gt+noise"
- elif args.config == 'tless-coarse-ablation-augm':
- cfg.TCO_input_generator = 'fixed'
+ elif args.config == "tless-coarse-ablation-augm":
+ cfg.TCO_input_generator = "fixed"
cfg.rgb_augmentation = False
- elif args.config == 'tless-refiner-ablation-augm':
- cfg.TCO_input_generator = 'gt+noise'
+ elif args.config == "tless-refiner-ablation-augm":
+ cfg.TCO_input_generator = "gt+noise"
cfg.rgb_augmentation = False
else:
@@ -176,7 +191,7 @@ def make_cfg(args):
if args.no_eval:
cfg.test_ds_names = []
- cfg.run_id = f'{args.config}-{run_comment}-{N_RAND}'
+ cfg.run_id = f"{args.config}-{run_comment}-{N_RAND}"
if args.debug:
cfg.test_ds_names = []
@@ -184,23 +199,23 @@ def make_cfg(args):
cfg.val_epoch_interval = 1
cfg.batch_size = 4
cfg.epoch_size = 4 * cfg.batch_size
- cfg.run_id = 'debug-' + cfg.run_id
+ cfg.run_id = "debug-" + cfg.run_id
cfg.background_augmentation = True
cfg.n_dataloader_workers = 8
cfg.n_rendering_workers = 0
cfg.n_test_frames = 10
- N_GPUS = int(os.environ.get('N_PROCS', 1))
+ N_GPUS = int(os.environ.get("N_PROCS", 1))
cfg.epoch_size = cfg.epoch_size // N_GPUS
return cfg
-if __name__ == '__main__':
- parser = argparse.ArgumentParser('Training')
- parser.add_argument('--config', default='', type=str)
- parser.add_argument('--debug', action='store_true')
- parser.add_argument('--no-eval', action='store_true')
- parser.add_argument('--resume', default='', type=str)
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser("Training")
+ parser.add_argument("--config", default="", type=str)
+ parser.add_argument("--debug", action="store_true")
+ parser.add_argument("--no-eval", action="store_true")
+ parser.add_argument("--resume", default="", type=str)
args = parser.parse_args()
cfg = make_cfg(args)
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/test_dataset.py b/happypose/pose_estimators/cosypose/cosypose/scripts/test_dataset.py
index 7771ae44..5a3e6e75 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/test_dataset.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/test_dataset.py
@@ -1,33 +1,45 @@
from torch.utils.data import DataLoader
from tqdm import tqdm
-from happypose.pose_estimators.cosypose.cosypose.datasets.pose_dataset import PoseDataset
-from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_scene_dataset
-if __name__ == '__main__':
+from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import (
+ make_scene_dataset,
+)
+from happypose.pose_estimators.cosypose.cosypose.datasets.pose_dataset import (
+ PoseDataset,
+)
+
+if __name__ == "__main__":
from happypose.pose_estimators.cosypose.cosypose.bop_config import BOP_CONFIG
- for ds_name, bop_config in BOP_CONFIG.items():
- train_synt_real_ds_names = bop_config.get('train_synt_real_ds_names', [])
- for (ds_name_, _) in train_synt_real_ds_names:
+
+ for _ds_name, bop_config in BOP_CONFIG.items():
+ train_synt_real_ds_names = bop_config.get("train_synt_real_ds_names", [])
+ for ds_name_, _ in train_synt_real_ds_names:
scene_ds = make_scene_dataset(ds_name_)
print(scene_ds.name, len(scene_ds))
# ds_name = 'dream.baxter.synt.dr.train'
# ds_name = 'tudl.pbr'
- ds_name = 'tudl.pbr'
+ ds_name = "tudl.pbr"
scene_ds_train = make_scene_dataset(ds_name)
- ds_kwargs = dict(
+ ds_kwargs = {
# resize=(480, 640),
- resize=(1280, 960),
- rgb_augmentation=False,
- background_augmentation=False,
- )
+ "resize": (1280, 960),
+ "rgb_augmentation": False,
+ "background_augmentation": False,
+ }
ds_train = PoseDataset(scene_ds_train, **ds_kwargs)
- ds_iter_train = DataLoader(ds_train, shuffle=True, batch_size=32,
- num_workers=8, collate_fn=ds_train.collate_fn,
- drop_last=False, pin_memory=True)
+ ds_iter_train = DataLoader(
+ ds_train,
+ shuffle=True,
+ batch_size=32,
+ num_workers=8,
+ collate_fn=ds_train.collate_fn,
+ drop_last=False,
+ pin_memory=True,
+ )
# ds_train[8129]
for _ in range(1):
- for data in tqdm(ds_iter_train):
+ for _data in tqdm(ds_iter_train):
pass
diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/test_render_objects.py b/happypose/pose_estimators/cosypose/cosypose/scripts/test_render_objects.py
index 7f1d0c43..03ac3534 100644
--- a/happypose/pose_estimators/cosypose/cosypose/scripts/test_render_objects.py
+++ b/happypose/pose_estimators/cosypose/cosypose/scripts/test_render_objects.py
@@ -1,39 +1,45 @@
import numpy as np
-from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import BulletSceneRenderer
-from tqdm import tqdm
import torch
+from tqdm import tqdm
+from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import ( # noqa: E501
+ BulletSceneRenderer,
+)
-if __name__ == '__main__':
+if __name__ == "__main__":
# obj_ds_name = 'hb'
- obj_ds_name = 'itodd'
+ obj_ds_name = "itodd"
renderer = BulletSceneRenderer(obj_ds_name, gpu_renderer=True)
- TCO = torch.tensor([
- [0, 1, 0, 0],
- [0, 0, -1, 0],
- [-1, 0, 0, 0.3],
- [0, 0, 0, 1]
- ]).numpy()
+ TCO = torch.tensor(
+ [
+ [0, 1, 0, 0],
+ [0, 0, -1, 0],
+ [-1, 0, 0, 0.3],
+ [0, 0, 0, 1],
+ ],
+ ).numpy()
fx, fy = 300, 300
cx, cy = 320, 240
- K = np.array([
- [fx, 0, cx],
- [0, fy, cy],
- [0, 0, 1]
- ])
- cam = dict(
- resolution=(640, 480),
- K=K,
- TWC=np.eye(4)
+ K = np.array(
+ [
+ [fx, 0, cx],
+ [0, fy, cy],
+ [0, 0, 1],
+ ],
)
+ cam = {
+ "resolution": (640, 480),
+ "K": K,
+ "TWC": np.eye(4),
+ }
all_images = []
- labels = renderer.urdf_ds.index['label'].tolist()
- for n, obj_label in tqdm(enumerate(np.random.permutation(labels))):
- obj = dict(
- name=obj_label,
- TWO=TCO,
- )
- renders = renderer.render_scene([obj], [cam])[0]['rgb']
+ labels = renderer.urdf_ds.index["label"].tolist()
+ for _n, obj_label in tqdm(enumerate(np.random.permutation(labels))):
+ obj = {
+ "name": obj_label,
+ "TWO": TCO,
+ }
+ renders = renderer.render_scene([obj], [cam])[0]["rgb"]
assert renders.sum() > 0, obj_label
diff --git a/happypose/pose_estimators/cosypose/cosypose/simulator/__init__.py b/happypose/pose_estimators/cosypose/cosypose/simulator/__init__.py
index 6242dfa1..e69de29b 100644
--- a/happypose/pose_estimators/cosypose/cosypose/simulator/__init__.py
+++ b/happypose/pose_estimators/cosypose/cosypose/simulator/__init__.py
@@ -1,5 +0,0 @@
-from .body import Body
-from .camera import Camera
-from .base_scene import BaseScene
-from .caching import BodyCache, TextureCache
-from .textures import apply_random_textures
diff --git a/happypose/pose_estimators/cosypose/cosypose/simulator/base_scene.py b/happypose/pose_estimators/cosypose/cosypose/simulator/base_scene.py
index c77f0df8..8fd00f3b 100644
--- a/happypose/pose_estimators/cosypose/cosypose/simulator/base_scene.py
+++ b/happypose/pose_estimators/cosypose/cosypose/simulator/base_scene.py
@@ -1,8 +1,10 @@
import os
+import pkgutil
import subprocess
import xml.etree.ElementTree as ET
-import pkgutil
+
import pybullet as pb
+
from .client import BulletClient
@@ -10,33 +12,52 @@ class BaseScene:
_client_id = -1
_client = None
_connected = False
- _simulation_step = 1/240.
+ _simulation_step = 1 / 240.0
def connect(self, gpu_renderer=True, gui=False):
- assert not self._connected, 'Already connected'
+ assert not self._connected, "Already connected"
if gui:
- self._client_id = pb.connect(pb.GUI, '--width=640 --height=480')
- pb.configureDebugVisualizer(pb.COV_ENABLE_GUI, 1, physicsClientId=self._client_id)
- pb.configureDebugVisualizer(pb.COV_ENABLE_RENDERING, 1, physicsClientId=self._client_id)
- pb.configureDebugVisualizer(pb.COV_ENABLE_TINY_RENDERER, 0, physicsClientId=self._client_id)
+ self._client_id = pb.connect(pb.GUI, "--width=640 --height=480")
+ pb.configureDebugVisualizer(
+ pb.COV_ENABLE_GUI,
+ 1,
+ physicsClientId=self._client_id,
+ )
+ pb.configureDebugVisualizer(
+ pb.COV_ENABLE_RENDERING,
+ 1,
+ physicsClientId=self._client_id,
+ )
+ pb.configureDebugVisualizer(
+ pb.COV_ENABLE_TINY_RENDERER,
+ 0,
+ physicsClientId=self._client_id,
+ )
else:
self._client_id = pb.connect(pb.DIRECT)
if self._client_id < 0:
- raise Exception('Cannot connect to pybullet')
+ msg = "Cannot connect to pybullet"
+ raise Exception(msg)
if gpu_renderer and not gui:
- os.environ['MESA_GL_VERSION_OVERRIDE'] = '3.3'
- os.environ['MESA_GLSL_VERSION_OVERRIDE'] = '330'
+ os.environ["MESA_GL_VERSION_OVERRIDE"] = "3.3"
+ os.environ["MESA_GLSL_VERSION_OVERRIDE"] = "330"
# Get EGL device
- assert 'CUDA_VISIBLE_DEVICES' in os.environ
- devices = os.environ.get('CUDA_VISIBLE_DEVICES', ).split(',')
+ assert "CUDA_VISIBLE_DEVICES" in os.environ
+ devices = os.environ.get("CUDA_VISIBLE_DEVICES").split(",")
assert len(devices) == 1
- out = subprocess.check_output(['nvidia-smi', '--id='+str(devices[0]), '-q', '--xml-format'])
+ out = subprocess.check_output(
+ ["nvidia-smi", "--id=" + str(devices[0]), "-q", "--xml-format"],
+ )
tree = ET.fromstring(out)
- gpu = tree.findall('gpu')[0]
- dev_id = gpu.find('minor_number').text
- os.environ['EGL_VISIBLE_DEVICES'] = str(dev_id)
- egl = pkgutil.get_loader('eglRenderer')
- pb.loadPlugin(egl.get_filename(), "_eglRendererPlugin", physicsClientId=self._client_id)
+ gpu = tree.findall("gpu")[0]
+ dev_id = gpu.find("minor_number").text
+ os.environ["EGL_VISIBLE_DEVICES"] = str(dev_id)
+ egl = pkgutil.get_loader("eglRenderer")
+ pb.loadPlugin(
+ egl.get_filename(),
+ "_eglRendererPlugin",
+ physicsClientId=self._client_id,
+ )
pb.resetSimulation(physicsClientId=self._client_id)
self._connected = True
self._client = BulletClient(self._client_id)
diff --git a/happypose/pose_estimators/cosypose/cosypose/simulator/body.py b/happypose/pose_estimators/cosypose/cosypose/simulator/body.py
index 3981c04e..2fb9d29e 100644
--- a/happypose/pose_estimators/cosypose/cosypose/simulator/body.py
+++ b/happypose/pose_estimators/cosypose/cosypose/simulator/body.py
@@ -1,7 +1,9 @@
from pathlib import Path
import pybullet as pb
-from happypose.pose_estimators.cosypose.cosypose.lib3d import Transform, parse_pose_args
+
+from happypose.pose_estimators.cosypose.cosypose.lib3d.transform import parse_pose_args
+from happypose.toolbox.lib3d.transform import Transform
from .client import BulletClient
@@ -15,7 +17,7 @@ def __init__(self, body_id, scale=1.0, client_id=0):
@property
def name(self):
info = self._client.getBodyInfo(self._body_id)
- return info[-1].decode('utf8')
+ return info[-1].decode("utf8")
@property
def pose(self):
@@ -33,10 +35,12 @@ def pose(self, pose_args):
self._client.resetBasePositionAndOrientation(self._body_id, pos, orn)
def get_state(self):
- return dict(TWO=self.pose,
- name=self.name,
- scale=self._scale,
- body_id=self._body_id)
+ return {
+ "TWO": self.pose,
+ "name": self.name,
+ "scale": self._scale,
+ "body_id": self._body_id,
+ }
@property
def visual_shape_data(self):
@@ -53,6 +57,10 @@ def client_id(self):
@staticmethod
def load(urdf_path, scale=1.0, client_id=0):
urdf_path = Path(urdf_path)
- assert urdf_path.exists, 'URDF does not exist.'
- body_id = pb.loadURDF(urdf_path.as_posix(), physicsClientId=client_id, globalScaling=scale)
+ assert urdf_path.exists, "URDF does not exist."
+ body_id = pb.loadURDF(
+ urdf_path.as_posix(),
+ physicsClientId=client_id,
+ globalScaling=scale,
+ )
return Body(body_id, scale=scale, client_id=client_id)
diff --git a/happypose/pose_estimators/cosypose/cosypose/simulator/caching.py b/happypose/pose_estimators/cosypose/cosypose/simulator/caching.py
index 5c965d24..2b151d38 100644
--- a/happypose/pose_estimators/cosypose/cosypose/simulator/caching.py
+++ b/happypose/pose_estimators/cosypose/cosypose/simulator/caching.py
@@ -1,7 +1,9 @@
+from collections import defaultdict
+from copy import deepcopy
+
import numpy as np
+
from .body import Body
-from copy import deepcopy
-from collections import defaultdict
from .client import BulletClient
@@ -13,13 +15,15 @@ def __init__(self, urdf_ds, client_id):
self.away_transform = (0, 0, 1000), (0, 0, 0, 1)
def _load_body(self, label):
- ds_idx = np.where(self.urdf_ds.index['label'] == label)[0].item()
+ ds_idx = np.where(self.urdf_ds.index["label"] == label)[0].item()
object_infos = self.urdf_ds[ds_idx].to_dict()
- body = Body.load(object_infos['urdf_path'],
- scale=object_infos['scale'],
- client_id=self.client.client_id)
+ body = Body.load(
+ object_infos["urdf_path"],
+ scale=object_infos["scale"],
+ client_id=self.client.client_id,
+ )
body.pose = self.away_transform
- self.cache[object_infos['label']].append(body)
+ self.cache[object_infos["label"]].append(body)
return body
def hide_bodies(self):
@@ -37,9 +41,9 @@ def get_bodies_by_labels(self, labels):
for label in labels:
gb_label[label] += 1
- for label, n_instances in gb_label.items():
+ for label, _n_instances in gb_label.items():
n_missing = gb_label[label] - len(self.cache[label])
- for n in range(n_missing):
+ for _n in range(n_missing):
self._load_body(label)
remaining = deepcopy(dict(self.cache))
@@ -47,7 +51,7 @@ def get_bodies_by_labels(self, labels):
return bodies
def get_bodies_by_ids(self, ids):
- labels = [self.urdf_ds[idx]['label'] for idx in ids]
+ labels = [self.urdf_ds[idx]["label"] for idx in ids]
return self.get_bodies_by_labels(labels)
def __len__(self):
@@ -58,10 +62,12 @@ class TextureCache:
def __init__(self, texture_ds, client_id):
self.texture_ds = texture_ds
self.client = BulletClient(client_id)
- self.cache = dict()
+ self.cache = {}
def _load_texture(self, idx):
- self.cache[idx] = self.client.loadTexture(str(self.texture_ds[idx]['texture_path']))
+ self.cache[idx] = self.client.loadTexture(
+ str(self.texture_ds[idx]["texture_path"]),
+ )
def get_texture(self, idx):
if idx not in self.cache:
diff --git a/happypose/pose_estimators/cosypose/cosypose/simulator/camera.py b/happypose/pose_estimators/cosypose/cosypose/simulator/camera.py
index b553934b..7cd2b287 100644
--- a/happypose/pose_estimators/cosypose/cosypose/simulator/camera.py
+++ b/happypose/pose_estimators/cosypose/cosypose/simulator/camera.py
@@ -2,8 +2,8 @@
import pybullet as pb
import transforms3d
-from happypose.pose_estimators.cosypose.cosypose.lib3d import Transform
from happypose.pose_estimators.cosypose.cosypose.lib3d.rotations import euler2quat
+from happypose.toolbox.lib3d.transform import Transform
def proj_from_K(K, h, w, near, far):
@@ -15,20 +15,23 @@ def proj_from_K(K, h, w, near, far):
x0 = K[0, 2]
y0 = K[1, 2]
y0 = h - y0
- A = near+far
- B = near*far
- persp = np.array([[alpha, s, -x0, 0],
- [0, beta, -y0, 0],
- [0, 0, A, B],
- [0, 0, -1, 0]])
+ A = near + far
+ B = near * far
+ persp = np.array(
+ [[alpha, s, -x0, 0], [0, beta, -y0, 0], [0, 0, A, B], [0, 0, -1, 0]],
+ )
left, right, bottom, top = 0, w, 0, h
- tx = - (right+left)/(right-left)
- ty = - (top+bottom)/(top-bottom)
- tz = - (far+near) / (far-near)
- NDC = np.array([[2 / (right - left), 0, 0, tx],
- [0, 2 / (top-bottom), 0, ty],
- [0, 0, - 2 / (far - near), tz],
- [0, 0, 0, 1]])
+ tx = -(right + left) / (right - left)
+ ty = -(top + bottom) / (top - bottom)
+ tz = -(far + near) / (far - near)
+ NDC = np.array(
+ [
+ [2 / (right - left), 0, 0, tx],
+ [0, 2 / (top - bottom), 0, ty],
+ [0, 0, -2 / (far - near), tz],
+ [0, 0, 0, 1],
+ ],
+ )
proj = NDC @ persp
return proj.T
@@ -36,72 +39,99 @@ def proj_from_K(K, h, w, near, far):
def K_from_fov(fov, resolution):
h, w = min(resolution), max(resolution)
f = h / (2 * np.tan(fov * 0.5 * np.pi / 180))
- K = np.array([[f, 0, w/2], [0, f, h/2], [0, 0, 1]])
+ K = np.array([[f, 0, w / 2], [0, f, h / 2], [0, 0, 1]])
return K
class Camera:
def __init__(self, resolution=(320, 240), near=0.01, far=10, client_id=0):
- assert client_id >= 0, 'Please provide a client id (0 by default)'
+ assert client_id >= 0, "Please provide a client id (0 by default)"
h, w = min(resolution), max(resolution)
self._client_id = client_id
self._near = near
self._far = far
self._shape = (h, w)
- self._rgba = np.zeros(self._shape + (4,), dtype=np.uint8)
+ self._rgba = np.zeros((*self._shape, 4), dtype=np.uint8)
self._mask = np.zeros(self._shape, dtype=np.uint8)
self._depth = np.zeros(self._shape, dtype=np.float32)
- self._render_options = dict()
+ self._render_options = {}
self._render_flags = 0
self.mask_link_index(True)
self.casts_shadow(True)
- # Transform between standard camera coordinate (z forward) and OPENGL camera coordinate system
- wxyz = transforms3d.euler.euler2quat(np.pi / 2, 0, 0, axes='rxyz')
+ # Transform between standard camera coordinate (z forward) and OPENGL camera
+ # coordinate system
+ wxyz = transforms3d.euler.euler2quat(np.pi / 2, 0, 0, axes="rxyz")
xyzw = [*wxyz[1:], wxyz[0]]
self.TCCGL = Transform(xyzw, (0, 0, 0))
# Set some default parameters
- self.set_extrinsic_bullet(target=(0, 0, 0), distance=1.6, yaw=90, pitch=-35, roll=0)
+ self.set_extrinsic_bullet(
+ target=(0, 0, 0),
+ distance=1.6,
+ yaw=90,
+ pitch=-35,
+ roll=0,
+ )
self.set_intrinsic_fov(90)
def set_extrinsic_bullet(self, target, distance, yaw, pitch, roll):
- """
- Angles in *degrees*.
- """
- up = 'z'
- self._view_params = dict(yaw=yaw, pitch=pitch, roll=roll,
- target=target, distance=distance)
+ """Angles in *degrees*."""
+ up = "z"
+ self._view_params = {
+ "yaw": yaw,
+ "pitch": pitch,
+ "roll": roll,
+ "target": target,
+ "distance": distance,
+ }
self._view_mat = pb.computeViewMatrixFromYawPitchRoll(
- target, distance, yaw, pitch, roll, 'xyz'.index(up))
+ target,
+ distance,
+ yaw,
+ pitch,
+ roll,
+ "xyz".index(up),
+ )
def set_extrinsic_T(self, TWC):
TWC = Transform(TWC)
TWCGL = TWC * self.TCCGL
xyzw = TWCGL.quaternion.coeffs()
wxyz = [xyzw[-1], *xyzw[:-1]]
- pitch, roll, yaw = transforms3d.euler.quat2euler(wxyz, axes='sxyz')
+ pitch, roll, yaw = transforms3d.euler.quat2euler(wxyz, axes="sxyz")
yaw = yaw * 180 / np.pi
pitch = pitch * 180 / np.pi
roll = roll * 180 / np.pi
yaw = (yaw % 360 + 360) % 360
distance = 0.0001
- self.set_extrinsic_bullet(target=TWCGL.translation, distance=distance,
- pitch=pitch, roll=roll, yaw=yaw)
-
- def set_extrinsic_spherical(self, target=(0, 0, 0), rho=0.6, theta=np.pi/4, phi=0, roll=0):
- """
- Angles in *radians*.
- https://fr.wikipedia.org/wiki/Coordonn%C3%A9es_sph%C3%A9riques#/media/Fichier:Spherical_Coordinates_(Colatitude,_Longitude)_(b).svg
+ self.set_extrinsic_bullet(
+ target=TWCGL.translation,
+ distance=distance,
+ pitch=pitch,
+ roll=roll,
+ yaw=yaw,
+ )
+
+ def set_extrinsic_spherical(
+ self,
+ target=(0, 0, 0),
+ rho=0.6,
+ theta=np.pi / 4,
+ phi=0,
+ roll=0,
+ ):
+ """Angles in *radians*.
+ https://fr.wikipedia.org/wiki/Coordonn%C3%A9es_sph%C3%A9riques#/media/Fichier:Spherical_Coordinates_(Colatitude,_Longitude)_(b).svg.
"""
x = rho * np.sin(theta) * np.cos(phi)
y = rho * np.sin(theta) * np.sin(phi)
z = rho * np.cos(theta)
t = np.array([x, y, z])
- R = transforms3d.euler.euler2mat(np.pi, theta, phi, axes='sxyz')
- R = R @ transforms3d.euler.euler2mat(0, 0, -np.pi/2 + roll, axes='sxyz')
+ R = transforms3d.euler.euler2mat(np.pi, theta, phi, axes="sxyz")
+ R = R @ transforms3d.euler.euler2mat(0, 0, -np.pi / 2 + roll, axes="sxyz")
t += np.array(target)
TWC = Transform(R, t)
self.set_extrinsic_T(TWC)
@@ -116,9 +146,13 @@ def set_intrinsic_K(self, K):
def set_intrinsic_fov(self, fov):
h, w = self._shape
- self._proj_params = dict(fov=fov)
+ self._proj_params = {"fov": fov}
self._proj_mat = pb.computeProjectionMatrixFOV(
- fov=fov, aspect=w / h, nearVal=self._near, farVal=self._far)
+ fov=fov,
+ aspect=w / h,
+ nearVal=self._near,
+ farVal=self._far,
+ )
self._K = None
def set_intrinsic_f(self, *args):
@@ -134,7 +168,7 @@ def set_intrinsic_f(self, *args):
self.set_intrinsic_fov(fov)
def get_state(self):
- obs = dict()
+ obs = {}
# Get images
rgba, mask, depth = self._shot()
rgb = rgba[..., :3]
@@ -145,18 +179,27 @@ def get_state(self):
if self._K is not None:
K = self._K
else:
- K = K_from_fov(self._proj_params['fov'])
+ K = K_from_fov(self._proj_params["fov"])
- trans = self._view_params['target']
- orn = euler2quat([self._view_params[k]*np.pi/180 for k in ('pitch', 'roll', 'yaw')], axes='sxyz')
+ trans = self._view_params["target"]
+ orn = euler2quat(
+ [self._view_params[k] * np.pi / 180 for k in ("pitch", "roll", "yaw")],
+ axes="sxyz",
+ )
TWCGL = Transform(orn, trans)
TWC = TWCGL * self.TCCGL.inverse()
- obs.update(TWC=TWC.toHomogeneousMatrix(), K=K, resolution=(self._shape[1], self._shape[0]),
- proj_mat=self._proj_mat, near=self._near, far=self._far)
+ obs.update(
+ TWC=TWC.toHomogeneousMatrix(),
+ K=K,
+ resolution=(self._shape[1], self._shape[0]),
+ proj_mat=self._proj_mat,
+ near=self._near,
+ far=self._far,
+ )
return obs
def _shot(self):
- """ Computes a RGB image, a depth buffer and a segmentation mask buffer
+ """Computes a RGB image, a depth buffer and a segmentation mask buffer
with body unique ids of visible objects for each pixel.
"""
h, w = self._shape
@@ -170,7 +213,8 @@ def _shot(self):
renderer=renderer,
flags=self._render_flags,
**self._render_options,
- physicsClientId=self._client_id)
+ physicsClientId=self._client_id,
+ )
rgba = np.asarray(rgba, dtype=np.uint8).reshape((h, w, 4))
depth = np.asarray(depth, dtype=np.float32).reshape((h, w))
@@ -178,20 +222,26 @@ def _shot(self):
return rgba, mask, depth
def _project(self, fov, near, far):
- """ Apply camera projection matrix.
- Args:
- fov (float): Field of view.
- near float): Near plane distance.
- far (float): Far plane distance.
+ """Apply camera projection matrix.
+
+ Args:
+ ----
+ fov (float): Field of view.
+ near float): Near plane distance.
+ far (float): Far plane distance.
"""
self.near = near
self.far = far
h, w = self._shape
self._proj_mat = pb.computeProjectionMatrixFOV(
- fov=fov, aspect=w / h, nearVal=near, farVal=far)
+ fov=fov,
+ aspect=w / h,
+ nearVal=near,
+ farVal=far,
+ )
def mask_link_index(self, flag):
- """ If is enabled, the mask combines the object unique id and link index
+ """If is enabled, the mask combines the object unique id and link index
as follows: value = objectUniqueId + ((linkIndex+1)<<24).
"""
if flag:
@@ -200,5 +250,5 @@ def mask_link_index(self, flag):
self._render_flags &= ~pb.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX
def casts_shadow(self, flag):
- """ 1 for shadows, 0 for no shadows. """
- self._render_options['shadow'] = 1 if flag else 0
+ """1 for shadows, 0 for no shadows."""
+ self._render_options["shadow"] = 1 if flag else 0
diff --git a/happypose/pose_estimators/cosypose/cosypose/simulator/client.py b/happypose/pose_estimators/cosypose/cosypose/simulator/client.py
index 20fc960c..2975987f 100644
--- a/happypose/pose_estimators/cosypose/cosypose/simulator/client.py
+++ b/happypose/pose_estimators/cosypose/cosypose/simulator/client.py
@@ -1,4 +1,5 @@
import functools
+
import pybullet as pb
diff --git a/happypose/pose_estimators/cosypose/cosypose/simulator/textures.py b/happypose/pose_estimators/cosypose/cosypose/simulator/textures.py
index c961e796..4f4e9217 100644
--- a/happypose/pose_estimators/cosypose/cosypose/simulator/textures.py
+++ b/happypose/pose_estimators/cosypose/cosypose/simulator/textures.py
@@ -1,5 +1,6 @@
-import numpy as np
from collections import defaultdict
+
+import numpy as np
import pybullet as pb
@@ -16,8 +17,13 @@ def apply_random_textures(body, texture_ids, per_link=False, np_random=np.random
if per_link:
texture_id = np_random.choice(texture_ids)
specular = np_random.randint(0, 1000)
- pb.changeVisualShape(body._body_id, link_id, link_shape_id,
- textureUniqueId=texture_id, rgbaColor=[1, 1, 1, 1],
- physicsClientId=body._client.client_id,
- specularColor=specular * np.ones(3))
+ pb.changeVisualShape(
+ body._body_id,
+ link_id,
+ link_shape_id,
+ textureUniqueId=texture_id,
+ rgbaColor=[1, 1, 1, 1],
+ physicsClientId=body._client.client_id,
+ specularColor=specular * np.ones(3),
+ )
return
diff --git a/happypose/pose_estimators/cosypose/cosypose/training/detector_models_cfg.py b/happypose/pose_estimators/cosypose/cosypose/training/detector_models_cfg.py
index 04ec8fca..c25d9690 100644
--- a/happypose/pose_estimators/cosypose/cosypose/training/detector_models_cfg.py
+++ b/happypose/pose_estimators/cosypose/cosypose/training/detector_models_cfg.py
@@ -1,4 +1,6 @@
-from happypose.pose_estimators.cosypose.cosypose.models.mask_rcnn import DetectorMaskRCNN
+from happypose.pose_estimators.cosypose.cosypose.models.mask_rcnn import (
+ DetectorMaskRCNN,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
logger = get_logger(__name__)
@@ -9,8 +11,10 @@ def check_update_config(cfg):
def create_model_detector(cfg, n_classes):
- model = DetectorMaskRCNN(input_resize=cfg.input_resize,
- n_classes=n_classes,
- backbone_str=cfg.backbone_str,
- anchor_sizes=cfg.anchor_sizes)
+ model = DetectorMaskRCNN(
+ input_resize=cfg.input_resize,
+ n_classes=n_classes,
+ backbone_str=cfg.backbone_str,
+ anchor_sizes=cfg.anchor_sizes,
+ )
return model
diff --git a/happypose/pose_estimators/cosypose/cosypose/training/maskrcnn_forward_loss.py b/happypose/pose_estimators/cosypose/cosypose/training/maskrcnn_forward_loss.py
index 17b7fe46..ce3e0161 100644
--- a/happypose/pose_estimators/cosypose/cosypose/training/maskrcnn_forward_loss.py
+++ b/happypose/pose_estimators/cosypose/cosypose/training/maskrcnn_forward_loss.py
@@ -1,34 +1,33 @@
-from happypose.pose_estimators.cosypose.cosypose.config import DEBUG_DATA_DIR
-import torch
-
def cast(obj):
return obj.cuda(non_blocking=True)
def h_maskrcnn(data, model, meters, cfg):
images, targets = data
- images = list(cast(image).permute(2, 0, 1).float() / 255 for image in images)
+ images = [cast(image).permute(2, 0, 1).float() / 255 for image in images]
targets = [{k: cast(v) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
- loss_rpn_box_reg = loss_dict['loss_rpn_box_reg']
- loss_objectness = loss_dict['loss_objectness']
- loss_box_reg = loss_dict['loss_box_reg']
- loss_classifier = loss_dict['loss_classifier']
- loss_mask = loss_dict['loss_mask']
+ loss_rpn_box_reg = loss_dict["loss_rpn_box_reg"]
+ loss_objectness = loss_dict["loss_objectness"]
+ loss_box_reg = loss_dict["loss_box_reg"]
+ loss_classifier = loss_dict["loss_classifier"]
+ loss_mask = loss_dict["loss_mask"]
- loss = cfg.rpn_box_reg_alpha * loss_rpn_box_reg + \
- cfg.objectness_alpha * loss_objectness + \
- cfg.box_reg_alpha * loss_box_reg + \
- cfg.classifier_alpha * loss_classifier + \
- cfg.mask_alpha * loss_mask
+ loss = (
+ cfg.rpn_box_reg_alpha * loss_rpn_box_reg
+ + cfg.objectness_alpha * loss_objectness
+ + cfg.box_reg_alpha * loss_box_reg
+ + cfg.classifier_alpha * loss_classifier
+ + cfg.mask_alpha * loss_mask
+ )
# torch.save(images, DEBUG_DATA_DIR / 'images.pth.tar')
- meters['loss_rpn_box_reg'].add(loss_rpn_box_reg.item())
- meters['loss_objectness'].add(loss_objectness.item())
- meters['loss_box_reg'].add(loss_box_reg.item())
- meters['loss_classifier'].add(loss_classifier.item())
- meters['loss_mask'].add(loss_mask.item())
+ meters["loss_rpn_box_reg"].add(loss_rpn_box_reg.item())
+ meters["loss_objectness"].add(loss_objectness.item())
+ meters["loss_box_reg"].add(loss_box_reg.item())
+ meters["loss_classifier"].add(loss_classifier.item())
+ meters["loss_mask"].add(loss_mask.item())
return loss
diff --git a/happypose/pose_estimators/cosypose/cosypose/training/pose_forward_loss.py b/happypose/pose_estimators/cosypose/cosypose/training/pose_forward_loss.py
index c17df15e..4cb2d740 100644
--- a/happypose/pose_estimators/cosypose/cosypose/training/pose_forward_loss.py
+++ b/happypose/pose_estimators/cosypose/cosypose/training/pose_forward_loss.py
@@ -1,59 +1,72 @@
-import torch
import numpy as np
+import torch
-from happypose.pose_estimators.cosypose.cosypose.lib3d.cosypose_ops import TCO_init_from_boxes, TCO_init_from_boxes_zup_autodepth
-from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import add_noise
from happypose.pose_estimators.cosypose.cosypose.lib3d.cosypose_ops import (
+ TCO_init_from_boxes,
+ TCO_init_from_boxes_zup_autodepth,
loss_refiner_CO_disentangled,
loss_refiner_CO_disentangled_quaternions,
)
-from happypose.pose_estimators.cosypose.cosypose.lib3d.mesh_losses import compute_ADD_L1_loss
+from happypose.pose_estimators.cosypose.cosypose.lib3d.mesh_losses import (
+ compute_ADD_L1_loss,
+)
+from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import add_noise
def cast(obj):
return obj.cuda(non_blocking=True)
-def h_pose(model, mesh_db, data, meters,
- cfg, n_iterations=1, input_generator='fixed'):
-
+def h_pose(model, mesh_db, data, meters, cfg, n_iterations=1, input_generator="fixed"):
batch_size, _, h, w = data.images.shape
- images = cast(data.images).float() / 255.
+ images = cast(data.images).float() / 255.0
K = cast(data.K).float()
TCO_gt = cast(data.TCO).float()
- labels = np.array([obj['name'] for obj in data.objects])
+ labels = np.array([obj["name"] for obj in data.objects])
bboxes = cast(data.bboxes).float()
meshes = mesh_db.select(labels)
points = meshes.sample_points(cfg.n_points_loss, deterministic=False)
TCO_possible_gt = TCO_gt.unsqueeze(1) @ meshes.symmetries
- if input_generator == 'fixed':
+ if input_generator == "fixed":
TCO_init = TCO_init_from_boxes(z_range=(1.0, 1.0), boxes=bboxes, K=K)
- elif input_generator == 'gt+noise':
- TCO_init = add_noise(TCO_possible_gt[:, 0], euler_deg_std=[15, 15, 15], trans_std=[0.01, 0.01, 0.05])
- elif input_generator == 'fixed+trans_noise':
- assert cfg.init_method == 'z-up+auto-depth'
+ elif input_generator == "gt+noise":
+ TCO_init = add_noise(
+ TCO_possible_gt[:, 0],
+ euler_deg_std=[15, 15, 15],
+ trans_std=[0.01, 0.01, 0.05],
+ )
+ elif input_generator == "fixed+trans_noise":
+ assert cfg.init_method == "z-up+auto-depth"
TCO_init = TCO_init_from_boxes_zup_autodepth(bboxes, points, K)
- TCO_init = add_noise(TCO_init,
- euler_deg_std=[0, 0, 0],
- trans_std=[0.01, 0.01, 0.05])
+ TCO_init = add_noise(
+ TCO_init,
+ euler_deg_std=[0, 0, 0],
+ trans_std=[0.01, 0.01, 0.05],
+ )
else:
- raise ValueError('Unknown input generator', input_generator)
+ msg = "Unknown input generator"
+ raise ValueError(msg, input_generator)
# model.module.enable_debug()
- outputs = model(images=images, K=K, labels=labels,
- TCO=TCO_init, n_iterations=n_iterations)
+ outputs = model(
+ images=images,
+ K=K,
+ labels=labels,
+ TCO=TCO_init,
+ n_iterations=n_iterations,
+ )
# raise ValueError
losses_TCO_iter = []
for n in range(n_iterations):
- iter_outputs = outputs[f'iteration={n+1}']
- K_crop = iter_outputs['K_crop']
- TCO_input = iter_outputs['TCO_input']
- TCO_pred = iter_outputs['TCO_output']
- model_outputs = iter_outputs['model_outputs']
+ iter_outputs = outputs[f"iteration={n+1}"]
+ K_crop = iter_outputs["K_crop"]
+ TCO_input = iter_outputs["TCO_input"]
+ TCO_pred = iter_outputs["TCO_output"]
+ model_outputs = iter_outputs["model_outputs"]
if cfg.loss_disentangled:
if cfg.n_pose_dims == 9:
@@ -62,23 +75,26 @@ def h_pose(model, mesh_db, data, meters,
loss_fn = loss_refiner_CO_disentangled_quaternions
else:
raise ValueError
- pose_outputs = model_outputs['pose']
+ pose_outputs = model_outputs["pose"]
loss_TCO_iter = loss_fn(
TCO_possible_gt=TCO_possible_gt,
TCO_input=TCO_input,
refiner_outputs=pose_outputs,
- K_crop=K_crop, points=points,
+ K_crop=K_crop,
+ points=points,
)
else:
loss_TCO_iter = compute_ADD_L1_loss(
- TCO_possible_gt[:, 0], TCO_pred, points
+ TCO_possible_gt[:, 0],
+ TCO_pred,
+ points,
)
- meters[f'loss_TCO-iter={n+1}'].add(loss_TCO_iter.mean().item())
+ meters[f"loss_TCO-iter={n+1}"].add(loss_TCO_iter.mean().item())
losses_TCO_iter.append(loss_TCO_iter)
loss_TCO = torch.cat(losses_TCO_iter).mean()
loss = loss_TCO
- meters['loss_TCO'].add(loss_TCO.item())
- meters['loss_total'].add(loss.item())
+ meters["loss_TCO"].add(loss_TCO.item())
+ meters["loss_total"].add(loss.item())
return loss
diff --git a/happypose/pose_estimators/cosypose/cosypose/training/pose_models_cfg.py b/happypose/pose_estimators/cosypose/cosypose/training/pose_models_cfg.py
index 0cb58c29..81a71d6f 100644
--- a/happypose/pose_estimators/cosypose/cosypose/training/pose_models_cfg.py
+++ b/happypose/pose_estimators/cosypose/cosypose/training/pose_models_cfg.py
@@ -1,47 +1,55 @@
# Backbones
from happypose.pose_estimators.cosypose.cosypose.models.efficientnet import EfficientNet
-from happypose.pose_estimators.cosypose.cosypose.models.wide_resnet import WideResNet18, WideResNet34
-from happypose.pose_estimators.cosypose.cosypose.models.flownet import flownet_pretrained
+from happypose.pose_estimators.cosypose.cosypose.models.flownet import (
+ flownet_pretrained,
+)
# Pose models
from happypose.pose_estimators.cosypose.cosypose.models.pose import PosePredictor
-
+from happypose.pose_estimators.cosypose.cosypose.models.wide_resnet import (
+ WideResNet18,
+ WideResNet34,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+
logger = get_logger(__name__)
def check_update_config(config):
- if not hasattr(config, 'init_method'):
- config.init_method = 'v0'
+ if not hasattr(config, "init_method"):
+ config.init_method = "v0"
return config
def create_model_pose(cfg, renderer, mesh_db):
n_inputs = 6
backbone_str = cfg.backbone_str
- if backbone_str == 'efficientnet-b3':
- backbone = EfficientNet.from_name('efficientnet-b3', in_channels=n_inputs)
+ if backbone_str == "efficientnet-b3":
+ backbone = EfficientNet.from_name("efficientnet-b3", in_channels=n_inputs)
backbone.n_features = 1536
- elif backbone_str == 'flownet':
+ elif backbone_str == "flownet":
backbone = flownet_pretrained(n_inputs=n_inputs)
backbone.n_features = 1024
- elif 'resnet34' in backbone_str:
+ elif "resnet34" in backbone_str:
backbone = WideResNet34(n_inputs=n_inputs)
- elif 'resnet18' in backbone_str:
+ elif "resnet18" in backbone_str:
backbone = WideResNet18(n_inputs=n_inputs)
else:
- raise ValueError('Unknown backbone', backbone_str)
+ msg = "Unknown backbone"
+ raise ValueError(msg, backbone_str)
pose_dim = cfg.n_pose_dims
- logger.info(f'Backbone: {backbone_str}')
+ logger.info(f"Backbone: {backbone_str}")
backbone.n_inputs = n_inputs
render_size = (240, 320)
- model = PosePredictor(backbone=backbone,
- renderer=renderer,
- mesh_db=mesh_db,
- render_size=render_size,
- pose_dim=pose_dim)
+ model = PosePredictor(
+ backbone=backbone,
+ renderer=renderer,
+ mesh_db=mesh_db,
+ render_size=render_size,
+ pose_dim=pose_dim,
+ )
return model
diff --git a/happypose/pose_estimators/cosypose/cosypose/training/train_detector.py b/happypose/pose_estimators/cosypose/cosypose/training/train_detector.py
index 58df3c94..1e6187de 100644
--- a/happypose/pose_estimators/cosypose/cosypose/training/train_detector.py
+++ b/happypose/pose_estimators/cosypose/cosypose/training/train_detector.py
@@ -1,38 +1,46 @@
-import yaml
import argparse
-import numpy as np
-import time
-import torch
-import simplejson as json
-from tqdm import tqdm
import functools
-from torchnet.meter import AverageValueMeter
+import time
from collections import defaultdict
-import torch.distributed as dist
-
-from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR
-from torch.utils.data import DataLoader, ConcatDataset
-from happypose.pose_estimators.cosypose.cosypose.utils.multiepoch_dataloader import MultiEpochDataLoader
+import numpy as np
+import simplejson as json
+import torch
+import torch.distributed as dist
+import yaml
+from torch.backends import cudnn
from torch.hub import load_state_dict_from_url
-
-from happypose.toolbox.datasets.datasets_cfg import make_object_dataset, make_scene_dataset
-from happypose.pose_estimators.cosypose.cosypose.datasets.detection_dataset import DetectionDataset
-from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import PartialSampler
-
+from torch.utils.data import ConcatDataset, DataLoader
+from torchnet.meter import AverageValueMeter
from torchvision.models.detection.mask_rcnn import model_urls
+from tqdm import tqdm
-from .maskrcnn_forward_loss import h_maskrcnn
-from .detector_models_cfg import create_model_detector, check_update_config
-
+from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR
+from happypose.pose_estimators.cosypose.cosypose.datasets.detection_dataset import (
+ DetectionDataset,
+)
+from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import PartialSampler
+from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector
+# Evaluation
+from happypose.pose_estimators.cosypose.cosypose.scripts.run_detection_eval import (
+ run_detection_eval,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_world_size,
+ init_distributed_mode,
+ reduce_dict,
+ sync_model,
+)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_world_size, get_rank, sync_model, init_distributed_mode, reduce_dict
-from torch.backends import cudnn
+from happypose.pose_estimators.cosypose.cosypose.utils.multiepoch_dataloader import (
+ MultiEpochDataLoader,
+)
+from happypose.toolbox.datasets.datasets_cfg import make_scene_dataset
-# Evaluation
-from happypose.pose_estimators.cosypose.cosypose.scripts.run_detection_eval import run_detection_eval
-from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector
+from .detector_models_cfg import check_update_config, create_model_detector
+from .maskrcnn_forward_loss import h_maskrcnn
cudnn.benchmark = True
logger = get_logger(__name__)
@@ -50,9 +58,9 @@ def make_eval_configs(args, model_training, epoch):
configs = []
for ds_name in args.test_ds_names:
- cfg = argparse.ArgumentParser('').parse_args([])
+ cfg = argparse.ArgumentParser("").parse_args([])
cfg.ds_name = ds_name
- cfg.save_dir = args.save_dir / f'dataset={ds_name}/epoch={epoch}'
+ cfg.save_dir = args.save_dir / f"dataset={ds_name}/epoch={epoch}"
cfg.n_workers = args.n_dataloader_workers
cfg.pred_bsz = 16
cfg.eval_bsz = 16
@@ -66,39 +74,37 @@ def make_eval_configs(args, model_training, epoch):
def run_eval(args, model_training, epoch):
- errors = dict()
+ errors = {}
configs, detector = make_eval_configs(args, model_training, epoch)
for cfg in configs:
results = run_detection_eval(cfg, detector=detector)
if dist.get_rank() == 0:
- errors[cfg.ds_name] = results['summary']
+ errors[cfg.ds_name] = results["summary"]
return errors
-def log(config, model,
- log_dict, test_dict, epoch):
+def log(config, model, log_dict, test_dict, epoch):
save_dir = config.save_dir
save_dir.mkdir(exist_ok=True)
log_dict.update(epoch=epoch)
- if not (save_dir / 'config.yaml').exists():
- (save_dir / 'config.yaml').write_text(yaml.dump(config))
+ if not (save_dir / "config.yaml").exists():
+ (save_dir / "config.yaml").write_text(yaml.dump(config))
def save_checkpoint(model):
- ckpt_name = 'checkpoint'
- ckpt_name += '.pth.tar'
+ ckpt_name = "checkpoint"
+ ckpt_name += ".pth.tar"
path = save_dir / ckpt_name
- torch.save({'state_dict': model.module.state_dict(),
- 'epoch': epoch}, path)
+ torch.save({"state_dict": model.module.state_dict(), "epoch": epoch}, path)
save_checkpoint(model)
- with open(save_dir / 'log.txt', 'a') as f:
- f.write(json.dumps(log_dict, ignore_nan=True) + '\n')
+ with open(save_dir / "log.txt", "a") as f:
+ f.write(json.dumps(log_dict, ignore_nan=True) + "\n")
if test_dict is not None:
for ds_name, ds_errors in test_dict.items():
- ds_errors['epoch'] = epoch
- with open(save_dir / f'errors_{ds_name}.txt', 'a') as f:
- f.write(json.dumps(test_dict[ds_name], ignore_nan=True) + '\n')
+ ds_errors["epoch"] = epoch
+ with open(save_dir / f"errors_{ds_name}.txt", "a") as f:
+ f.write(json.dumps(test_dict[ds_name], ignore_nan=True) + "\n")
logger.info(config.run_id)
logger.info(log_dict)
@@ -110,9 +116,11 @@ def train_detector(args):
if args.resume_run_id:
resume_dir = EXP_DIR / args.resume_run_id
- resume_args = yaml.load((resume_dir / 'config.yaml').read_text())
- keep_fields = set(['resume_run_id', 'epoch_size', ])
- vars(args).update({k: v for k, v in vars(resume_args).items() if k not in keep_fields})
+ resume_args = yaml.load((resume_dir / "config.yaml").read_text())
+ keep_fields = {"resume_run_id", "epoch_size"}
+ vars(args).update(
+ {k: v for k, v in vars(resume_args).items() if k not in keep_fields},
+ )
args = check_update_config(args)
args.save_dir = EXP_DIR / args.run_id
@@ -128,16 +136,16 @@ def train_detector(args):
world_size = get_world_size()
args.n_gpus = world_size
args.global_batch_size = world_size * args.batch_size
- logger.info(f'Connection established with {world_size} gpus.')
+ logger.info(f"Connection established with {world_size} gpus.")
# Make train/val datasets
def make_datasets(dataset_names):
datasets = []
all_labels = set()
- for (ds_name, n_repeat) in dataset_names:
- assert 'test' not in ds_name
+ for ds_name, n_repeat in dataset_names:
+ assert "test" not in ds_name
ds = make_scene_dataset(ds_name)
- logger.info(f'Loaded {ds_name} with {len(ds)} images.')
+ logger.info(f"Loaded {ds_name} with {len(ds)} images.")
all_labels = all_labels.union(set(ds.all_labels))
for _ in range(n_repeat):
datasets.append(ds)
@@ -145,92 +153,126 @@ def make_datasets(dataset_names):
scene_ds_train, train_labels = make_datasets(args.train_ds_names)
scene_ds_val, _ = make_datasets(args.val_ds_names)
- label_to_category_id = dict()
- label_to_category_id['background'] = 0
- for n, label in enumerate(sorted(list(train_labels)), 1):
+ label_to_category_id = {}
+ label_to_category_id["background"] = 0
+ for n, label in enumerate(sorted(train_labels), 1):
label_to_category_id[label] = n
- logger.info(f'Training with {len(label_to_category_id)} categories: {label_to_category_id}')
+ logger.info(
+ f"Training with {len(label_to_category_id)} categories: {label_to_category_id}",
+ )
args.label_to_category_id = label_to_category_id
- ds_kwargs = dict(
- resize=args.input_resize,
- rgb_augmentation=args.rgb_augmentation,
- background_augmentation=args.background_augmentation,
- gray_augmentation=args.gray_augmentation,
- label_to_category_id=label_to_category_id,
- )
+ ds_kwargs = {
+ "resize": args.input_resize,
+ "rgb_augmentation": args.rgb_augmentation,
+ "background_augmentation": args.background_augmentation,
+ "gray_augmentation": args.gray_augmentation,
+ "label_to_category_id": label_to_category_id,
+ }
ds_train = DetectionDataset(scene_ds_train, **ds_kwargs)
ds_val = DetectionDataset(scene_ds_val, **ds_kwargs)
train_sampler = PartialSampler(ds_train, epoch_size=args.epoch_size)
- ds_iter_train = DataLoader(ds_train, sampler=train_sampler, batch_size=args.batch_size,
- num_workers=args.n_dataloader_workers,
- collate_fn=collate_fn,
- drop_last=False, pin_memory=True)
+ ds_iter_train = DataLoader(
+ ds_train,
+ sampler=train_sampler,
+ batch_size=args.batch_size,
+ num_workers=args.n_dataloader_workers,
+ collate_fn=collate_fn,
+ drop_last=False,
+ pin_memory=True,
+ )
ds_iter_train = MultiEpochDataLoader(ds_iter_train)
val_sampler = PartialSampler(ds_val, epoch_size=int(0.1 * args.epoch_size))
- ds_iter_val = DataLoader(ds_val, sampler=val_sampler, batch_size=args.batch_size,
- num_workers=args.n_dataloader_workers,
- collate_fn=collate_fn,
- drop_last=False, pin_memory=True)
+ ds_iter_val = DataLoader(
+ ds_val,
+ sampler=val_sampler,
+ batch_size=args.batch_size,
+ num_workers=args.n_dataloader_workers,
+ collate_fn=collate_fn,
+ drop_last=False,
+ pin_memory=True,
+ )
ds_iter_val = MultiEpochDataLoader(ds_iter_val)
- model = create_model_detector(cfg=args,
- n_classes=len(args.label_to_category_id)).cuda()
+ model = create_model_detector(
+ cfg=args,
+ n_classes=len(args.label_to_category_id),
+ ).cuda()
if args.resume_run_id:
resume_dir = EXP_DIR / args.resume_run_id
- path = resume_dir / 'checkpoint.pth.tar'
- logger.info(f'Loading checkpoing from {path}')
+ path = resume_dir / "checkpoint.pth.tar"
+ logger.info(f"Loading checkpoing from {path}")
save = torch.load(path)
- state_dict = save['state_dict']
+ state_dict = save["state_dict"]
model.load_state_dict(state_dict)
- start_epoch = save['epoch'] + 1
+ start_epoch = save["epoch"] + 1
else:
start_epoch = 0
end_epoch = args.n_epochs
if args.run_id_pretrain is not None:
- pretrain_path = EXP_DIR / args.run_id_pretrain / 'checkpoint.pth.tar'
- logger.info(f'Using pretrained model from {pretrain_path}.')
- model.load_state_dict(torch.load(pretrain_path)['state_dict'])
+ pretrain_path = EXP_DIR / args.run_id_pretrain / "checkpoint.pth.tar"
+ logger.info(f"Using pretrained model from {pretrain_path}.")
+ model.load_state_dict(torch.load(pretrain_path)["state_dict"])
elif args.pretrain_coco:
- state_dict = load_state_dict_from_url(model_urls['maskrcnn_resnet50_fpn_coco'])
- keep = lambda k: 'box_predictor' not in k and 'mask_predictor' not in k
+ state_dict = load_state_dict_from_url(model_urls["maskrcnn_resnet50_fpn_coco"])
+
+ def keep(k):
+ return "box_predictor" not in k and "mask_predictor" not in k
+
state_dict = {k: v for k, v in state_dict.items() if keep(k)}
model.load_state_dict(state_dict, strict=False)
- logger.info('Using model pre-trained on coco. Removed predictor heads.')
+ logger.info("Using model pre-trained on coco. Removed predictor heads.")
else:
- logger.info('Training MaskRCNN from scratch.')
+ logger.info("Training MaskRCNN from scratch.")
# Synchronize models across processes.
model = sync_model(model)
- model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], output_device=device)
+ model = torch.nn.parallel.DistributedDataParallel(
+ model,
+ device_ids=[device],
+ output_device=device,
+ )
# Optimizer
params = [p for p in model.parameters() if p.requires_grad]
- if args.optimizer.lower() == 'sgd':
- optimizer = torch.optim.SGD(params, lr=args.lr,
- weight_decay=args.weight_decay, momentum=args.momentum)
- elif args.optimizer.lower() == 'adam':
+ if args.optimizer.lower() == "sgd":
+ optimizer = torch.optim.SGD(
+ params,
+ lr=args.lr,
+ weight_decay=args.weight_decay,
+ momentum=args.momentum,
+ )
+ elif args.optimizer.lower() == "adam":
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
else:
- raise ValueError(f'Unknown optimizer {args.optimizer}')
+ msg = f"Unknown optimizer {args.optimizer}"
+ raise ValueError(msg)
# Warmup
if args.n_epochs_warmup == 0:
- lambd = lambda epoch: 1
+
+ def lambd(epoch):
+ return 1
+
else:
n_batches_warmup = args.n_epochs_warmup * (args.epoch_size // args.batch_size)
- lambd = lambda batch: (batch + 1) / n_batches_warmup
+
+ def lambd(batch):
+ return (batch + 1) / n_batches_warmup
+
lr_scheduler_warmup = torch.optim.lr_scheduler.LambdaLR(optimizer, lambd)
lr_scheduler_warmup.last_epoch = start_epoch * args.epoch_size // args.batch_size
# LR schedulers
# Divide LR by 10 every args.lr_epoch_decay
lr_scheduler = torch.optim.lr_scheduler.StepLR(
- optimizer, step_size=args.lr_epoch_decay, gamma=0.1,
+ optimizer,
+ step_size=args.lr_epoch_decay,
+ gamma=0.1,
)
lr_scheduler.last_epoch = start_epoch - 1
lr_scheduler.step()
@@ -248,25 +290,30 @@ def train_epoch():
t = time.time()
for n, sample in enumerate(iterator):
if n > 0:
- meters_time['data'].add(time.time() - t)
+ meters_time["data"].add(time.time() - t)
optimizer.zero_grad()
t = time.time()
loss = h(data=sample, meters=meters_train)
- meters_time['forward'].add(time.time() - t)
+ meters_time["forward"].add(time.time() - t)
iterator.set_postfix(loss=loss.item())
- meters_train['loss_total'].add(loss.item())
+ meters_train["loss_total"].add(loss.item())
t = time.time()
loss.backward()
total_grad_norm = torch.nn.utils.clip_grad_norm_(
- model.parameters(), max_norm=np.inf, norm_type=2)
- meters_train['grad_norm'].add(torch.as_tensor(total_grad_norm).item())
+ model.parameters(),
+ max_norm=np.inf,
+ norm_type=2,
+ )
+ meters_train["grad_norm"].add(torch.as_tensor(total_grad_norm).item())
optimizer.step()
- meters_time['backward'].add(time.time() - t)
- meters_time['memory'].add(torch.cuda.max_memory_allocated() / 1024. ** 2)
+ meters_time["backward"].add(time.time() - t)
+ meters_time["memory"].add(
+ torch.cuda.max_memory_allocated() / 1024.0**2,
+ )
if epoch < args.n_epochs_warmup:
lr_scheduler_warmup.step()
@@ -279,7 +326,7 @@ def validation():
model.train()
for sample in tqdm(ds_iter_val, ncols=80):
loss = h(data=sample, meters=meters_val)
- meters_val['loss_total'].add(loss.item())
+ meters_val["loss_total"].add(loss.item())
train_epoch()
if epoch % args.val_epoch_interval == 0:
@@ -290,26 +337,33 @@ def validation():
model.eval()
test_dict = run_eval(args, model, epoch)
- log_dict = dict()
- log_dict.update({
- 'grad_norm': meters_train['grad_norm'].mean,
- 'grad_norm_std': meters_train['grad_norm'].std,
- 'learning_rate': optimizer.param_groups[0]['lr'],
- 'time_forward': meters_time['forward'].mean,
- 'time_backward': meters_time['backward'].mean,
- 'time_data': meters_time['data'].mean,
- 'gpu_memory': meters_time['memory'].mean,
- 'time': time.time(),
- 'n_iterations': (epoch + 1) * len(ds_iter_train),
- 'n_datas': (epoch + 1) * args.global_batch_size * len(ds_iter_train),
- })
-
- for string, meters in zip(('train', 'val'), (meters_train, meters_val)):
+ log_dict = {}
+ log_dict.update(
+ {
+ "grad_norm": meters_train["grad_norm"].mean,
+ "grad_norm_std": meters_train["grad_norm"].std,
+ "learning_rate": optimizer.param_groups[0]["lr"],
+ "time_forward": meters_time["forward"].mean,
+ "time_backward": meters_time["backward"].mean,
+ "time_data": meters_time["data"].mean,
+ "gpu_memory": meters_time["memory"].mean,
+ "time": time.time(),
+ "n_iterations": (epoch + 1) * len(ds_iter_train),
+ "n_datas": (epoch + 1) * args.global_batch_size * len(ds_iter_train),
+ },
+ )
+
+ for string, meters in zip(("train", "val"), (meters_train, meters_val)):
for k in dict(meters).keys():
- log_dict[f'{string}_{k}'] = meters[k].mean
+ log_dict[f"{string}_{k}"] = meters[k].mean
log_dict = reduce_dict(log_dict)
if get_rank() == 0:
- log(config=args, model=model, epoch=epoch,
- log_dict=log_dict, test_dict=test_dict)
+ log(
+ config=args,
+ model=model,
+ epoch=epoch,
+ log_dict=log_dict,
+ test_dict=test_dict,
+ )
dist.barrier()
diff --git a/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py b/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py
index 9fd87097..39163c1a 100644
--- a/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py
+++ b/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py
@@ -1,81 +1,88 @@
-import yaml
-import numpy as np
-import time
-import torch
-import simplejson as json
-from tqdm import tqdm
import functools
-from pathlib import Path
-from torchnet.meter import AverageValueMeter
+import time
from collections import defaultdict
+from pathlib import Path
+
+import simplejson as json
+import torch
import torch.distributed as dist
+import yaml
+from torch.backends import cudnn
+from torch.utils.data import ConcatDataset, DataLoader
+from torchnet.meter import AverageValueMeter
+from tqdm import tqdm
from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR
-
-from torch.utils.data import DataLoader, ConcatDataset
-from happypose.pose_estimators.cosypose.cosypose.utils.multiepoch_dataloader import MultiEpochDataLoader
-
-from happypose.toolbox.datasets.datasets_cfg import make_object_dataset, make_scene_dataset
-from happypose.pose_estimators.cosypose.cosypose.datasets.pose_dataset import PoseDataset
-from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import PartialSampler, ListSampler
+from happypose.pose_estimators.cosypose.cosypose.datasets.pose_dataset import (
+ PoseDataset,
+)
+from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import PartialSampler
+from happypose.pose_estimators.cosypose.cosypose.evaluation.prediction_runner import (
+ PredictionRunner,
+)
+from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import (
+ run_pred_eval,
+)
# Evaluation
from happypose.pose_estimators.cosypose.cosypose.integrated.pose_estimator import (
PoseEstimator,
)
-from happypose.pose_estimators.cosypose.cosypose.evaluation.prediction_runner import (
- PredictionRunner,
+from happypose.pose_estimators.cosypose.cosypose.scripts.run_cosypose_eval import (
+ get_pose_meters,
+ load_pix2pose_results,
+ load_posecnn_results,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_world_size,
+ init_distributed_mode,
+ reduce_dict,
+ sync_model,
+)
+from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
+from happypose.pose_estimators.cosypose.cosypose.utils.multiepoch_dataloader import (
+ MultiEpochDataLoader,
)
from happypose.pose_estimators.megapose.evaluation.evaluation_runner import (
EvaluationRunner,
)
-from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.multiview_predictions import MultiviewPredictionRunner
-from happypose.pose_estimators.cosypose.cosypose.evaluation.eval_runner.pose_eval import PoseEvaluation
-from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import run_pred_eval
-from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import MultiViewWrapper
-from happypose.pose_estimators.cosypose.cosypose.scripts.run_cosypose_eval import (
- load_pix2pose_results, load_posecnn_results, get_pose_meters)
-
-from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import BulletBatchRenderer
+from happypose.toolbox.datasets.datasets_cfg import (
+ make_object_dataset,
+ make_scene_dataset,
+)
from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
-
-from .pose_forward_loss import h_pose
-from .pose_models_cfg import create_model_pose, check_update_config
-
from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
-from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_world_size, get_rank, sync_model, init_distributed_mode, reduce_dict
-from torch.backends import cudnn
+from .pose_forward_loss import h_pose
+from .pose_models_cfg import check_update_config, create_model_pose
cudnn.benchmark = True
logger = get_logger(__name__)
-def log(config, model,
- log_dict, test_dict, epoch):
+def log(config, model, log_dict, test_dict, epoch):
save_dir = config.save_dir
save_dir.mkdir(exist_ok=True)
log_dict.update(epoch=epoch)
- if not (save_dir / 'config.yaml').exists():
- (save_dir / 'config.yaml').write_text(yaml.dump(config))
+ if not (save_dir / "config.yaml").exists():
+ (save_dir / "config.yaml").write_text(yaml.dump(config))
def save_checkpoint(model):
- ckpt_name = 'checkpoint'
- ckpt_name += '.pth.tar'
+ ckpt_name = "checkpoint"
+ ckpt_name += ".pth.tar"
path = save_dir / ckpt_name
- torch.save({'state_dict': model.module.state_dict(),
- 'epoch': epoch}, path)
+ torch.save({"state_dict": model.module.state_dict(), "epoch": epoch}, path)
save_checkpoint(model)
- with open(save_dir / 'log.txt', 'a') as f:
- f.write(json.dumps(log_dict, ignore_nan=True) + '\n')
+ with open(save_dir / "log.txt", "a") as f:
+ f.write(json.dumps(log_dict, ignore_nan=True) + "\n")
if test_dict is not None:
for ds_name, ds_errors in test_dict.items():
- ds_errors['epoch'] = epoch
- with open(save_dir / f'errors_{ds_name}.txt', 'a') as f:
- f.write(json.dumps(test_dict[ds_name], ignore_nan=True) + '\n')
+ ds_errors["epoch"] = epoch
+ with open(save_dir / f"errors_{ds_name}.txt", "a") as f:
+ f.write(json.dumps(test_dict[ds_name], ignore_nan=True) + "\n")
logger.info(config.run_id)
logger.info(log_dict)
@@ -83,18 +90,25 @@ def save_checkpoint(model):
def make_eval_bundle(args, model_training):
- eval_bundle = dict()
+ eval_bundle = {}
model_training.cfg = args
def load_model(run_id):
if run_id is None:
return None
run_dir = EXP_DIR / run_id
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.FullLoader)
cfg = check_update_config(cfg)
- model = create_model_pose(cfg, renderer=model_training.renderer,
- mesh_db=model_training.mesh_db).cuda().eval()
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar')['state_dict']
+ model = (
+ create_model_pose(
+ cfg,
+ renderer=model_training.renderer,
+ mesh_db=model_training.mesh_db,
+ )
+ .cuda()
+ .eval()
+ )
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar")["state_dict"]
model.load_state_dict(ckpt)
model.eval()
model.cfg = cfg
@@ -114,24 +128,36 @@ def load_model(run_id):
coarse_model=coarse_model,
)
- base_pred_kwargs = dict(
- pose_predictor=pose_estimator,
- mv_predictor=None,
- skip_mv=True,
- )
+ base_pred_kwargs = {
+ "pose_predictor": pose_estimator,
+ "mv_predictor": None,
+ "skip_mv": True,
+ }
for ds_name in args.test_ds_names:
- assert ds_name in {'ycbv.test.keyframes', 'tless.primesense.test'}
+ assert ds_name in {"ycbv.test.keyframes", "tless.primesense.test"}
scene_ds = make_scene_dataset(ds_name, n_frames=args.n_test_frames)
- logger.info(f'TEST: Loaded {ds_name} with {len(scene_ds)} images.')
- #scene_ds_pred = MultiViewWrapper(scene_ds, n_views=1)
+ logger.info(f"TEST: Loaded {ds_name} with {len(scene_ds)} images.")
+ # scene_ds_pred = MultiViewWrapper(scene_ds, n_views=1)
# Predictions
- #pred_runner = MultiviewPredictionRunner(scene_ds_pred, batch_size=1,
- # n_workers=args.n_dataloader_workers, cache_data=False)
-
- inference = {'detection_type': 'gt', 'coarse_estimation_type': 'S03_grid', 'SO3_grid_size': 576,
- 'n_refiner_iterations': 5, 'n_pose_hypotheses': 5, 'run_depth_refiner': False,
- 'depth_refiner': None, 'bsz_objects': 16, 'bsz_images': 288}
+ # pred_runner = MultiviewPredictionRunner(
+ # scene_ds_pred,
+ # batch_size=1,
+ # n_workers=args.n_dataloader_workers,
+ # cache_data=False,
+ # )
+
+ inference = {
+ "detection_type": "gt",
+ "coarse_estimation_type": "S03_grid",
+ "SO3_grid_size": 576,
+ "n_refiner_iterations": 5,
+ "n_pose_hypotheses": 5,
+ "run_depth_refiner": False,
+ "depth_refiner": None,
+ "bsz_objects": 16,
+ "bsz_images": 288,
+ }
pred_runner = PredictionRunner(
scene_ds=scene_ds,
@@ -141,54 +167,64 @@ def load_model(run_id):
)
detections = None
- pred_kwargs = dict()
-
- if 'tless' in ds_name:
- detections = load_pix2pose_results(all_detections=False,
- remove_incorrect_poses=False).cpu()
- coarse_detections = load_pix2pose_results(all_detections=False,
- remove_incorrect_poses=True).cpu()
- det_k = 'pix2pose_detections'
- coarse_k = 'pix2pose_coarse'
-
- elif 'ycbv' in ds_name:
+ pred_kwargs = {}
+
+ if "tless" in ds_name:
+ detections = load_pix2pose_results(
+ all_detections=False,
+ remove_incorrect_poses=False,
+ ).cpu()
+ coarse_detections = load_pix2pose_results(
+ all_detections=False,
+ remove_incorrect_poses=True,
+ ).cpu()
+ det_k = "pix2pose_detections"
+ coarse_k = "pix2pose_coarse"
+
+ elif "ycbv" in ds_name:
detections = load_posecnn_results().cpu()
coarse_detections = detections
- det_k = 'posecnn_detections'
- coarse_k = 'posecnn_coarse'
+ det_k = "posecnn_detections"
+ coarse_k = "posecnn_coarse"
else:
raise ValueError(ds_name)
if refiner_model is not None:
- pred_kwargs.update({
- coarse_k: dict(
- detections=coarse_detections,
- use_detections_TCO=True,
- n_coarse_iterations=0,
- n_refiner_iterations=1,
- **base_pred_kwargs,
- )
- })
+ pred_kwargs.update(
+ {
+ coarse_k: dict(
+ detections=coarse_detections,
+ use_detections_TCO=True,
+ n_coarse_iterations=0,
+ n_refiner_iterations=1,
+ **base_pred_kwargs,
+ ),
+ },
+ )
if coarse_model is not None:
- pred_kwargs.update({
- det_k: dict(
- detections=detections,
- use_detections_TCO=False,
- n_coarse_iterations=coarse_model.cfg.n_iterations,
- n_refiner_iterations=1 if refiner_model is not None else 0,
- **base_pred_kwargs,
- )
- })
+ pred_kwargs.update(
+ {
+ det_k: dict(
+ detections=detections,
+ use_detections_TCO=False,
+ n_coarse_iterations=coarse_model.cfg.n_iterations,
+ n_refiner_iterations=1 if refiner_model is not None else 0,
+ **base_pred_kwargs,
+ ),
+ },
+ )
# Evaluation
meters = get_pose_meters(scene_ds, ds_name)
- meters = {k.split('_')[0]: v for k, v in meters.items()}
- mv_group_ids = list(iter(pred_runner.sampler))
+ meters = {k.split("_")[0]: v for k, v in meters.items()}
+ list(iter(pred_runner.sampler))
print(scene_ds.frame_index)
- #scene_ds_ids = np.concatenate(scene_ds.frame_index.loc[mv_group_ids, 'scene_ds_ids'].values)
- #sampler = ListSampler(scene_ds_ids)
+ # scene_ds_ids = np.concatenate(
+ # scene_ds.frame_index.loc[mv_group_ids, "scene_ds_ids"].values
+ # )
+ # sampler = ListSampler(scene_ds_ids)
eval_runner = EvaluationRunner(
scene_ds,
meters,
@@ -198,20 +234,20 @@ def load_model(run_id):
sampler=pred_runner.sampler,
)
- save_dir = Path(args.save_dir) / 'eval' / ds_name
+ save_dir = Path(args.save_dir) / "eval" / ds_name
save_dir.mkdir(exist_ok=True, parents=True)
eval_bundle[ds_name] = (pred_runner, pred_kwargs, eval_runner, save_dir)
return eval_bundle
def run_eval(eval_bundle, epoch):
- errors = dict()
+ errors = {}
for ds_name, bundle in eval_bundle.items():
pred_runner, pred_kwargs, eval_runner, save_dir = bundle
results = run_pred_eval(pred_runner, pred_kwargs, eval_runner)
if dist.get_rank() == 0:
- torch.save(results, save_dir / f'epoch={epoch}.pth.tar')
- errors[ds_name] = results['summary']
+ torch.save(results, save_dir / f"epoch={epoch}.pth.tar")
+ errors[ds_name] = results["summary"]
return errors
@@ -220,11 +256,13 @@ def train_pose(args):
if args.resume_run_id:
resume_dir = EXP_DIR / args.resume_run_id
- resume_args = yaml.load((resume_dir / 'config.yaml').read_text())
- keep_fields = set(['resume_run_id', 'epoch_size', ])
- vars(args).update({k: v for k, v in vars(resume_args).items() if k not in keep_fields})
+ resume_args = yaml.load((resume_dir / "config.yaml").read_text())
+ keep_fields = {"resume_run_id", "epoch_size"}
+ vars(args).update(
+ {k: v for k, v in vars(resume_args).items() if k not in keep_fields},
+ )
- args.train_refiner = args.TCO_input_generator == 'gt+noise'
+ args.train_refiner = args.TCO_input_generator == "gt+noise"
args.train_coarse = not args.train_refiner
args.save_dir = EXP_DIR / args.run_id
args = check_update_config(args)
@@ -240,15 +278,15 @@ def train_pose(args):
world_size = get_world_size()
args.n_gpus = world_size
args.global_batch_size = world_size * args.batch_size
- logger.info(f'Connection established with {world_size} gpus.')
+ logger.info(f"Connection established with {world_size} gpus.")
# Make train/val datasets
def make_datasets(dataset_names):
datasets = []
- for (ds_name, n_repeat) in dataset_names:
- assert 'test' not in ds_name
+ for ds_name, n_repeat in dataset_names:
+ assert "test" not in ds_name
ds = make_scene_dataset(ds_name)
- logger.info(f'Loaded {ds_name} with {len(ds)} images.')
+ logger.info(f"Loaded {ds_name} with {len(ds)} images.")
for _ in range(n_repeat):
datasets.append(ds)
return ConcatDataset(datasets)
@@ -256,32 +294,53 @@ def make_datasets(dataset_names):
scene_ds_train = make_datasets(args.train_ds_names)
scene_ds_val = make_datasets(args.val_ds_names)
- ds_kwargs = dict(
- resize=args.input_resize,
- rgb_augmentation=args.rgb_augmentation,
- background_augmentation=args.background_augmentation,
- min_area=args.min_area,
- gray_augmentation=args.gray_augmentation,
- )
+ ds_kwargs = {
+ "resize": args.input_resize,
+ "rgb_augmentation": args.rgb_augmentation,
+ "background_augmentation": args.background_augmentation,
+ "min_area": args.min_area,
+ "gray_augmentation": args.gray_augmentation,
+ }
ds_train = PoseDataset(scene_ds_train, **ds_kwargs)
ds_val = PoseDataset(scene_ds_val, **ds_kwargs)
train_sampler = PartialSampler(ds_train, epoch_size=args.epoch_size)
- ds_iter_train = DataLoader(ds_train, sampler=train_sampler, batch_size=args.batch_size,
- num_workers=args.n_dataloader_workers, collate_fn=ds_train.collate_fn,
- drop_last=False, pin_memory=True)
+ ds_iter_train = DataLoader(
+ ds_train,
+ sampler=train_sampler,
+ batch_size=args.batch_size,
+ num_workers=args.n_dataloader_workers,
+ collate_fn=ds_train.collate_fn,
+ drop_last=False,
+ pin_memory=True,
+ )
ds_iter_train = MultiEpochDataLoader(ds_iter_train)
val_sampler = PartialSampler(ds_val, epoch_size=int(0.1 * args.epoch_size))
- ds_iter_val = DataLoader(ds_val, sampler=val_sampler, batch_size=args.batch_size,
- num_workers=args.n_dataloader_workers, collate_fn=ds_val.collate_fn,
- drop_last=False, pin_memory=True)
+ ds_iter_val = DataLoader(
+ ds_val,
+ sampler=val_sampler,
+ batch_size=args.batch_size,
+ num_workers=args.n_dataloader_workers,
+ collate_fn=ds_val.collate_fn,
+ drop_last=False,
+ pin_memory=True,
+ )
ds_iter_val = MultiEpochDataLoader(ds_iter_val)
# Make model
object_ds = make_object_dataset(args.object_ds_name)
- renderer = Panda3dBatchRenderer(object_ds, n_workers=args.n_rendering_workers, preload_cache=False)
- mesh_db = MeshDataBase.from_object_ds(object_ds).batched(n_sym=args.n_symmetries_batch).cuda().float()
+ renderer = Panda3dBatchRenderer(
+ object_ds,
+ n_workers=args.n_rendering_workers,
+ preload_cache=False,
+ )
+ mesh_db = (
+ MeshDataBase.from_object_ds(object_ds)
+ .batched(n_sym=args.n_symmetries_batch)
+ .cuda()
+ .float()
+ )
model = create_model_pose(cfg=args, renderer=renderer, mesh_db=mesh_db).cuda()
@@ -289,41 +348,57 @@ def make_datasets(dataset_names):
if args.resume_run_id:
resume_dir = EXP_DIR / args.resume_run_id
- path = resume_dir / 'checkpoint.pth.tar'
- logger.info(f'Loading checkpoing from {path}')
+ path = resume_dir / "checkpoint.pth.tar"
+ logger.info(f"Loading checkpoing from {path}")
save = torch.load(path)
- state_dict = save['state_dict']
+ state_dict = save["state_dict"]
model.load_state_dict(state_dict)
- start_epoch = save['epoch'] + 1
+ start_epoch = save["epoch"] + 1
else:
start_epoch = 0
end_epoch = args.n_epochs
if args.run_id_pretrain is not None:
- pretrain_path = EXP_DIR / args.run_id_pretrain / 'checkpoint.pth.tar'
- logger.info(f'Using pretrained model from {pretrain_path}.')
- model.load_state_dict(torch.load(pretrain_path)['state_dict'])
+ pretrain_path = EXP_DIR / args.run_id_pretrain / "checkpoint.pth.tar"
+ logger.info(f"Using pretrained model from {pretrain_path}.")
+ model.load_state_dict(torch.load(pretrain_path)["state_dict"])
# Synchronize models across processes.
model = sync_model(model)
- model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], output_device=device)
+ model = torch.nn.parallel.DistributedDataParallel(
+ model,
+ device_ids=[device],
+ output_device=device,
+ )
# Optimizer
- optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
+ optimizer = torch.optim.Adam(
+ model.parameters(),
+ lr=args.lr,
+ weight_decay=args.weight_decay,
+ )
# Warmup
if args.n_epochs_warmup == 0:
- lambd = lambda epoch: 1
+
+ def lambd(epoch):
+ return 1
+
else:
n_batches_warmup = args.n_epochs_warmup * (args.epoch_size // args.batch_size)
- lambd = lambda batch: (batch + 1) / n_batches_warmup
+
+ def lambd(batch):
+ return (batch + 1) / n_batches_warmup
+
lr_scheduler_warmup = torch.optim.lr_scheduler.LambdaLR(optimizer, lambd)
lr_scheduler_warmup.last_epoch = start_epoch * args.epoch_size // args.batch_size
# LR schedulers
# Divide LR by 10 every args.lr_epoch_decay
lr_scheduler = torch.optim.lr_scheduler.StepLR(
- optimizer, step_size=args.lr_epoch_decay, gamma=0.1,
+ optimizer,
+ step_size=args.lr_epoch_decay,
+ gamma=0.1,
)
lr_scheduler.last_epoch = start_epoch - 1
lr_scheduler.step()
@@ -333,8 +408,14 @@ def make_datasets(dataset_names):
meters_val = defaultdict(lambda: AverageValueMeter())
meters_time = defaultdict(lambda: AverageValueMeter())
- h = functools.partial(h_pose, model=model, cfg=args, n_iterations=args.n_iterations,
- mesh_db=mesh_db, input_generator=args.TCO_input_generator)
+ h = functools.partial(
+ h_pose,
+ model=model,
+ cfg=args,
+ n_iterations=args.n_iterations,
+ mesh_db=mesh_db,
+ input_generator=args.TCO_input_generator,
+ )
def train_epoch():
model.train()
@@ -342,25 +423,30 @@ def train_epoch():
t = time.time()
for n, sample in enumerate(iterator):
if n > 0:
- meters_time['data'].add(time.time() - t)
+ meters_time["data"].add(time.time() - t)
optimizer.zero_grad()
t = time.time()
loss = h(data=sample, meters=meters_train)
- meters_time['forward'].add(time.time() - t)
+ meters_time["forward"].add(time.time() - t)
iterator.set_postfix(loss=loss.item())
- meters_train['loss_total'].add(loss.item())
+ meters_train["loss_total"].add(loss.item())
t = time.time()
loss.backward()
total_grad_norm = torch.nn.utils.clip_grad_norm_(
- model.parameters(), max_norm=args.clip_grad_norm, norm_type=2)
- meters_train['grad_norm'].add(torch.as_tensor(total_grad_norm).item())
+ model.parameters(),
+ max_norm=args.clip_grad_norm,
+ norm_type=2,
+ )
+ meters_train["grad_norm"].add(torch.as_tensor(total_grad_norm).item())
optimizer.step()
- meters_time['backward'].add(time.time() - t)
- meters_time['memory'].add(torch.cuda.max_memory_allocated() / 1024. ** 2)
+ meters_time["backward"].add(time.time() - t)
+ meters_time["memory"].add(
+ torch.cuda.max_memory_allocated() / 1024.0**2,
+ )
if epoch < args.n_epochs_warmup:
lr_scheduler_warmup.step()
@@ -373,7 +459,7 @@ def validation():
model.eval()
for sample in tqdm(ds_iter_val, ncols=80):
loss = h(data=sample, meters=meters_val)
- meters_val['loss_total'].add(loss.item())
+ meters_val["loss_total"].add(loss.item())
@torch.no_grad()
def test():
@@ -388,26 +474,33 @@ def test():
if epoch % args.test_epoch_interval == 0:
test_dict = test()
- log_dict = dict()
- log_dict.update({
- 'grad_norm': meters_train['grad_norm'].mean,
- 'grad_norm_std': meters_train['grad_norm'].std,
- 'learning_rate': optimizer.param_groups[0]['lr'],
- 'time_forward': meters_time['forward'].mean,
- 'time_backward': meters_time['backward'].mean,
- 'time_data': meters_time['data'].mean,
- 'gpu_memory': meters_time['memory'].mean,
- 'time': time.time(),
- 'n_iterations': (epoch + 1) * len(ds_iter_train),
- 'n_datas': (epoch + 1) * args.global_batch_size * len(ds_iter_train),
- })
-
- for string, meters in zip(('train', 'val'), (meters_train, meters_val)):
+ log_dict = {}
+ log_dict.update(
+ {
+ "grad_norm": meters_train["grad_norm"].mean,
+ "grad_norm_std": meters_train["grad_norm"].std,
+ "learning_rate": optimizer.param_groups[0]["lr"],
+ "time_forward": meters_time["forward"].mean,
+ "time_backward": meters_time["backward"].mean,
+ "time_data": meters_time["data"].mean,
+ "gpu_memory": meters_time["memory"].mean,
+ "time": time.time(),
+ "n_iterations": (epoch + 1) * len(ds_iter_train),
+ "n_datas": (epoch + 1) * args.global_batch_size * len(ds_iter_train),
+ },
+ )
+
+ for string, meters in zip(("train", "val"), (meters_train, meters_val)):
for k in dict(meters).keys():
- log_dict[f'{string}_{k}'] = meters[k].mean
+ log_dict[f"{string}_{k}"] = meters[k].mean
log_dict = reduce_dict(log_dict)
if get_rank() == 0:
- log(config=args, model=model, epoch=epoch,
- log_dict=log_dict, test_dict=test_dict)
+ log(
+ config=args,
+ model=model,
+ epoch=epoch,
+ log_dict=log_dict,
+ test_dict=test_dict,
+ )
dist.barrier()
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/colmap_read_write_model.py b/happypose/pose_estimators/cosypose/cosypose/utils/colmap_read_write_model.py
index bc28414c..a604b7aa 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/colmap_read_write_model.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/colmap_read_write_model.py
@@ -30,21 +30,26 @@
#
# Author: Johannes L. Schoenberger (jsch-at-demuc-dot-de)
-import os
+import argparse
import collections
-import numpy as np
+import os
import struct
-import argparse
+import numpy as np
CameraModel = collections.namedtuple(
- "CameraModel", ["model_id", "model_name", "num_params"])
-Camera = collections.namedtuple(
- "Camera", ["id", "model", "width", "height", "params"])
+ "CameraModel",
+ ["model_id", "model_name", "num_params"],
+)
+Camera = collections.namedtuple("Camera", ["id", "model", "width", "height", "params"])
BaseImage = collections.namedtuple(
- "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
+ "Image",
+ ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"],
+)
Point3D = collections.namedtuple(
- "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
+ "Point3D",
+ ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"],
+)
class Image(BaseImage):
@@ -63,12 +68,14 @@ def qvec2rotmat(self):
CameraModel(model_id=7, model_name="FOV", num_params=5),
CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
- CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
+ CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12),
+}
+CAMERA_MODEL_IDS = {
+ camera_model.model_id: camera_model for camera_model in CAMERA_MODELS
+}
+CAMERA_MODEL_NAMES = {
+ camera_model.model_name: camera_model for camera_model in CAMERA_MODELS
}
-CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model)
- for camera_model in CAMERA_MODELS])
-CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
- for camera_model in CAMERA_MODELS])
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
@@ -84,13 +91,13 @@ def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
def write_next_bytes(fid, data, format_char_sequence, endian_character="<"):
- """pack and write to a binary file.
+ """Pack and write to a binary file.
:param fid:
:param data: data to send, if multiple elements are sent at the same time,
they should be encapsuled either in a list or a tuple
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
should be the same length as the data list or tuple
- :param endian_character: Any of {@, =, <, >, !}
+ :param endian_character: Any of {@, =, <, >, !}.
"""
if isinstance(data, (list, tuple)):
bytes = struct.pack(endian_character + format_char_sequence, *data)
@@ -100,13 +107,12 @@ def write_next_bytes(fid, data, format_char_sequence, endian_character="<"):
def read_cameras_text(path):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::WriteCamerasText(const std::string& path)
- void Reconstruction::ReadCamerasText(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::WriteCamerasText(const std::string& path)
+ void Reconstruction::ReadCamerasText(const std::string& path).
"""
cameras = {}
- with open(path, "r") as fid:
+ with open(path) as fid:
while True:
line = fid.readline()
if not line:
@@ -119,50 +125,60 @@ def read_cameras_text(path):
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
- cameras[camera_id] = Camera(id=camera_id, model=model,
- width=width, height=height,
- params=params)
+ cameras[camera_id] = Camera(
+ id=camera_id,
+ model=model,
+ width=width,
+ height=height,
+ params=params,
+ )
return cameras
def read_cameras_binary(path_to_model_file):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::WriteCamerasBinary(const std::string& path)
- void Reconstruction::ReadCamerasBinary(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::WriteCamerasBinary(const std::string& path)
+ void Reconstruction::ReadCamerasBinary(const std::string& path).
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
- for camera_line_index in range(num_cameras):
+ for _camera_line_index in range(num_cameras):
camera_properties = read_next_bytes(
- fid, num_bytes=24, format_char_sequence="iiQQ")
+ fid,
+ num_bytes=24,
+ format_char_sequence="iiQQ",
+ )
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
- params = read_next_bytes(fid, num_bytes=8*num_params,
- format_char_sequence="d"*num_params)
- cameras[camera_id] = Camera(id=camera_id,
- model=model_name,
- width=width,
- height=height,
- params=np.array(params))
+ params = read_next_bytes(
+ fid,
+ num_bytes=8 * num_params,
+ format_char_sequence="d" * num_params,
+ )
+ cameras[camera_id] = Camera(
+ id=camera_id,
+ model=model_name,
+ width=width,
+ height=height,
+ params=np.array(params),
+ )
assert len(cameras) == num_cameras
return cameras
def write_cameras_text(cameras, path):
+ """see: src/base/reconstruction.cc
+ void Reconstruction::WriteCamerasText(const std::string& path)
+ void Reconstruction::ReadCamerasText(const std::string& path).
"""
- see: src/base/reconstruction.cc
- void Reconstruction::WriteCamerasText(const std::string& path)
- void Reconstruction::ReadCamerasText(const std::string& path)
- """
- HEADER = '# Camera list with one line of data per camera:\n'
- '# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n'
- '# Number of cameras: {}\n'.format(len(cameras))
+ HEADER = "# Camera list with one line of data per camera:\n"
+ "# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\n"
+ f"# Number of cameras: {len(cameras)}\n"
with open(path, "w") as fid:
fid.write(HEADER)
for _, cam in cameras.items():
@@ -172,19 +188,15 @@ def write_cameras_text(cameras, path):
def write_cameras_binary(cameras, path_to_model_file):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::WriteCamerasBinary(const std::string& path)
- void Reconstruction::ReadCamerasBinary(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::WriteCamerasBinary(const std::string& path)
+ void Reconstruction::ReadCamerasBinary(const std::string& path).
"""
with open(path_to_model_file, "wb") as fid:
write_next_bytes(fid, len(cameras), "Q")
for _, cam in cameras.items():
model_id = CAMERA_MODEL_NAMES[cam.model].model_id
- camera_properties = [cam.id,
- model_id,
- cam.width,
- cam.height]
+ camera_properties = [cam.id, model_id, cam.width, cam.height]
write_next_bytes(fid, camera_properties, "iiQQ")
for p in cam.params:
write_next_bytes(fid, float(p), "d")
@@ -192,13 +204,12 @@ def write_cameras_binary(cameras, path_to_model_file):
def read_images_text(path):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::ReadImagesText(const std::string& path)
- void Reconstruction::WriteImagesText(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::ReadImagesText(const std::string& path)
+ void Reconstruction::WriteImagesText(const std::string& path).
"""
images = {}
- with open(path, "r") as fid:
+ with open(path) as fid:
while True:
line = fid.readline()
if not line:
@@ -212,65 +223,87 @@ def read_images_text(path):
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
- xys = np.column_stack([tuple(map(float, elems[0::3])),
- tuple(map(float, elems[1::3]))])
+ xys = np.column_stack(
+ [tuple(map(float, elems[0::3])), tuple(map(float, elems[1::3]))],
+ )
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
- id=image_id, qvec=qvec, tvec=tvec,
- camera_id=camera_id, name=image_name,
- xys=xys, point3D_ids=point3D_ids)
+ id=image_id,
+ qvec=qvec,
+ tvec=tvec,
+ camera_id=camera_id,
+ name=image_name,
+ xys=xys,
+ point3D_ids=point3D_ids,
+ )
return images
def read_images_binary(path_to_model_file):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::ReadImagesBinary(const std::string& path)
- void Reconstruction::WriteImagesBinary(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::ReadImagesBinary(const std::string& path)
+ void Reconstruction::WriteImagesBinary(const std::string& path).
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
- for image_index in range(num_reg_images):
+ for _image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(
- fid, num_bytes=64, format_char_sequence="idddddddi")
+ fid,
+ num_bytes=64,
+ format_char_sequence="idddddddi",
+ )
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
- while current_char != b"\x00": # look for the ASCII 0 entry
+ while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
- num_points2D = read_next_bytes(fid, num_bytes=8,
- format_char_sequence="Q")[0]
- x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
- format_char_sequence="ddq"*num_points2D)
- xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
- tuple(map(float, x_y_id_s[1::3]))])
+ num_points2D = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[
+ 0
+ ]
+ x_y_id_s = read_next_bytes(
+ fid,
+ num_bytes=24 * num_points2D,
+ format_char_sequence="ddq" * num_points2D,
+ )
+ xys = np.column_stack(
+ [tuple(map(float, x_y_id_s[0::3])), tuple(map(float, x_y_id_s[1::3]))],
+ )
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
- id=image_id, qvec=qvec, tvec=tvec,
- camera_id=camera_id, name=image_name,
- xys=xys, point3D_ids=point3D_ids)
+ id=image_id,
+ qvec=qvec,
+ tvec=tvec,
+ camera_id=camera_id,
+ name=image_name,
+ xys=xys,
+ point3D_ids=point3D_ids,
+ )
return images
def write_images_text(images, path):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::ReadImagesText(const std::string& path)
- void Reconstruction::WriteImagesText(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::ReadImagesText(const std::string& path)
+ void Reconstruction::WriteImagesText(const std::string& path).
"""
if len(images) == 0:
mean_observations = 0
else:
- mean_observations = sum((len(img.point3D_ids) for _, img in images.items()))/len(images)
- HEADER = '# Image list with two lines of data per image:\n'
- '# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n'
- '# POINTS2D[] as (X, Y, POINT3D_ID)\n'
- '# Number of images: {}, mean observations per image: {}\n'.format(len(images), mean_observations)
+ mean_observations = sum(
+ (len(img.point3D_ids) for _, img in images.items()),
+ ) / len(images)
+ HEADER = (
+ "# Image list with two lines of data per image:\n"
+ "# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n"
+ "# POINTS2D[] as (X, Y, POINT3D_ID)\n"
+ f"# Number of images: {len(images)}, mean observations per image: "
+ f"{mean_observations}\n"
+ )
with open(path, "w") as fid:
fid.write(HEADER)
@@ -286,10 +319,9 @@ def write_images_text(images, path):
def write_images_binary(images, path_to_model_file):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::ReadImagesBinary(const std::string& path)
- void Reconstruction::WriteImagesBinary(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::ReadImagesBinary(const std::string& path)
+ void Reconstruction::WriteImagesBinary(const std::string& path).
"""
with open(path_to_model_file, "wb") as fid:
write_next_bytes(fid, len(images), "Q")
@@ -307,13 +339,12 @@ def write_images_binary(images, path_to_model_file):
def read_points3D_text(path):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::ReadPoints3DText(const std::string& path)
- void Reconstruction::WritePoints3DText(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::ReadPoints3DText(const std::string& path)
+ void Reconstruction::WritePoints3DText(const std::string& path).
"""
points3D = {}
- with open(path, "r") as fid:
+ with open(path) as fid:
while True:
line = fid.readline()
if not line:
@@ -327,55 +358,70 @@ def read_points3D_text(path):
error = float(elems[7])
image_ids = np.array(tuple(map(int, elems[8::2])))
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
- points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb,
- error=error, image_ids=image_ids,
- point2D_idxs=point2D_idxs)
+ points3D[point3D_id] = Point3D(
+ id=point3D_id,
+ xyz=xyz,
+ rgb=rgb,
+ error=error,
+ image_ids=image_ids,
+ point2D_idxs=point2D_idxs,
+ )
return points3D
def read_points3d_binary(path_to_model_file):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::ReadPoints3DBinary(const std::string& path)
- void Reconstruction::WritePoints3DBinary(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::ReadPoints3DBinary(const std::string& path)
+ void Reconstruction::WritePoints3DBinary(const std::string& path).
"""
points3D = {}
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
- for point_line_index in range(num_points):
+ for _point_line_index in range(num_points):
binary_point_line_properties = read_next_bytes(
- fid, num_bytes=43, format_char_sequence="QdddBBBd")
+ fid,
+ num_bytes=43,
+ format_char_sequence="QdddBBBd",
+ )
point3D_id = binary_point_line_properties[0]
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
- track_length = read_next_bytes(
- fid, num_bytes=8, format_char_sequence="Q")[0]
+ track_length = read_next_bytes(fid, num_bytes=8, format_char_sequence="Q")[
+ 0
+ ]
track_elems = read_next_bytes(
- fid, num_bytes=8*track_length,
- format_char_sequence="ii"*track_length)
+ fid,
+ num_bytes=8 * track_length,
+ format_char_sequence="ii" * track_length,
+ )
image_ids = np.array(tuple(map(int, track_elems[0::2])))
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
points3D[point3D_id] = Point3D(
- id=point3D_id, xyz=xyz, rgb=rgb,
- error=error, image_ids=image_ids,
- point2D_idxs=point2D_idxs)
+ id=point3D_id,
+ xyz=xyz,
+ rgb=rgb,
+ error=error,
+ image_ids=image_ids,
+ point2D_idxs=point2D_idxs,
+ )
return points3D
def write_points3D_text(points3D, path):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::ReadPoints3DText(const std::string& path)
- void Reconstruction::WritePoints3DText(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::ReadPoints3DText(const std::string& path)
+ void Reconstruction::WritePoints3DText(const std::string& path).
"""
if len(points3D) == 0:
mean_track_length = 0
else:
- mean_track_length = sum((len(pt.image_ids) for _, pt in points3D.items()))/len(points3D)
- HEADER = '# 3D point list with one line of data per point:\n'
- '# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n'
- '# Number of points: {}, mean track length: {}\n'.format(len(points3D), mean_track_length)
+ mean_track_length = sum(
+ (len(pt.image_ids) for _, pt in points3D.items()),
+ ) / len(points3D)
+ HEADER = "# 3D point list with one line of data per point:\n"
+ "# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\n"
+ f"# Number of points: {len(points3D)}, mean track length: {mean_track_length}\n"
with open(path, "w") as fid:
fid.write(HEADER)
@@ -389,10 +435,9 @@ def write_points3D_text(points3D, path):
def write_points3d_binary(points3D, path_to_model_file):
- """
- see: src/base/reconstruction.cc
- void Reconstruction::ReadPoints3DBinary(const std::string& path)
- void Reconstruction::WritePoints3DBinary(const std::string& path)
+ """see: src/base/reconstruction.cc
+ void Reconstruction::ReadPoints3DBinary(const std::string& path)
+ void Reconstruction::WritePoints3DBinary(const std::string& path).
"""
with open(path_to_model_file, "wb") as fid:
write_next_bytes(fid, len(points3D), "Q")
@@ -432,25 +477,40 @@ def write_model(cameras, images, points3D, path, ext):
def qvec2rotmat(qvec):
- return np.array([
- [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
- 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
- 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
- [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
- 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
- 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
- [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
- 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
- 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
+ return np.array(
+ [
+ [
+ 1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2,
+ 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
+ 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2],
+ ],
+ [
+ 2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
+ 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2,
+ 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1],
+ ],
+ [
+ 2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
+ 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
+ 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2,
+ ],
+ ],
+ )
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
- K = np.array([
- [Rxx - Ryy - Rzz, 0, 0, 0],
- [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
- [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
- [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
+ K = (
+ np.array(
+ [
+ [Rxx - Ryy - Rzz, 0, 0, 0],
+ [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
+ [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
+ [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz],
+ ],
+ )
+ / 3.0
+ )
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
@@ -459,14 +519,26 @@ def rotmat2qvec(R):
def main():
- parser = argparse.ArgumentParser(description='Read and write COLMAP binary and text models')
- parser.add_argument('input_model', help='path to input model folder')
- parser.add_argument('input_format', choices=['.bin', '.txt'],
- help='input model format')
- parser.add_argument('--output_model', metavar='PATH',
- help='path to output model folder')
- parser.add_argument('--output_format', choices=['.bin', '.txt'],
- help='outut model format', default='.txt')
+ parser = argparse.ArgumentParser(
+ description="Read and write COLMAP binary and text models",
+ )
+ parser.add_argument("input_model", help="path to input model folder")
+ parser.add_argument(
+ "input_format",
+ choices=[".bin", ".txt"],
+ help="input model format",
+ )
+ parser.add_argument(
+ "--output_model",
+ metavar="PATH",
+ help="path to output model folder",
+ )
+ parser.add_argument(
+ "--output_format",
+ choices=[".bin", ".txt"],
+ help="outut model format",
+ default=".txt",
+ )
args = parser.parse_args()
cameras, images, points3D = read_model(path=args.input_model, ext=args.input_format)
@@ -476,7 +548,13 @@ def main():
print("num_points3D:", len(points3D))
if args.output_model is not None:
- write_model(cameras, images, points3D, path=args.output_model, ext=args.output_format)
+ write_model(
+ cameras,
+ images,
+ points3D,
+ path=args.output_model,
+ ext=args.output_format,
+ )
if __name__ == "__main__":
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/cosypose_wrapper.py b/happypose/pose_estimators/cosypose/cosypose/utils/cosypose_wrapper.py
index e4db0285..6507ca41 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/cosypose_wrapper.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/cosypose_wrapper.py
@@ -6,46 +6,47 @@
#
-"""
-TODO:
+"""TODO:
+----
- remove commented useless code
- check if all imports necessary
-- refactor hardcoded model weight checkpoints
+- refactor hardcoded model weight checkpoints.
+
"""
-from PIL import Image
-import numpy as np
-from copy import deepcopy
-from pathlib import Path
-import yaml
-import torch
-import argparse
-import pandas as pd
-# from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_scene_dataset, make_object_dataset
-from happypose.toolbox.datasets.datasets_cfg import make_scene_dataset, make_object_dataset
+import torch
+import yaml
-# Pose estimator
-from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
-from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import create_model_refiner, create_model_coarse
-from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import check_update_config as check_update_config_pose
-# Detection
-from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import create_model_detector
-from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import check_update_config as check_update_config_detector
+from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR
from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector
-
-from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.bop_predictions import BopPredictionRunner
from happypose.pose_estimators.cosypose.cosypose.integrated.pose_estimator import (
PoseEstimator,
)
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_tmp_dir, get_rank
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import init_distributed_mode
-from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR, RESULTS_DIR
+# Detection
+from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import (
+ check_update_config as check_update_config_detector,
+)
+from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import (
+ create_model_detector,
+)
+from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
+ check_update_config as check_update_config_pose,
+)
+from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
+ create_model_coarse,
+ create_model_refiner,
+)
+
+# from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg
+# import make_scene_dataset, make_object_dataset
+from happypose.toolbox.datasets.datasets_cfg import make_object_dataset
+# Pose estimator
+from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
-
"""
def make_object_dataset(example_dir: Path) -> RigidObjectDataset:
print(example_dir)
@@ -64,14 +65,16 @@ def make_object_dataset(example_dir: Path) -> RigidObjectDataset:
assert not mesh_path, f"there multiple meshes in the {label} directory"
mesh_path = fn
assert mesh_path, f"couldnt find a obj or ply mesh for {label}"
- rigid_objects.append(RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units))
+ rigid_objects.append(
+ RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units)
+ )
# TODO: fix mesh units
rigid_object_dataset = RigidObjectDataset(rigid_objects)
return rigid_object_dataset
example_dir = Path("/home/emaitre/cosypose/local_data/bop_datasets/ycbv/examples/")
"""
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class CosyPoseWrapper:
@@ -84,12 +87,12 @@ def __init__(self, dataset_name, n_workers=8, gpu_renderer=False) -> None:
def load_detector(run_id, ds_name):
run_dir = EXP_DIR / run_id
# cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.UnsafeLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.UnsafeLoader)
cfg = check_update_config_detector(cfg)
label_to_category_id = cfg.label_to_category_id
model = create_model_detector(cfg, len(label_to_category_id))
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar', map_location=device)
- ckpt = ckpt['state_dict']
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar", map_location=device)
+ ckpt = ckpt["state_dict"]
model.load_state_dict(ckpt)
model = model.to(device).eval()
model.cfg = cfg
@@ -102,37 +105,56 @@ def load_pose_models(coarse_run_id, refiner_run_id, n_workers):
run_dir = EXP_DIR / coarse_run_id
# cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.UnsafeLoader)
+ cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.UnsafeLoader)
cfg = check_update_config_pose(cfg)
# object_ds = BOPObjectDataset(BOP_DS_DIR / 'tless/models_cad')
- #object_ds = make_object_dataset(cfg.object_ds_name)
- #mesh_db = MeshDataBase.from_object_ds(object_ds)
- #renderer = BulletBatchRenderer(object_set=cfg.urdf_ds_name, n_workers=n_workers, gpu_renderer=gpu_renderer)
+ # object_ds = make_object_dataset(cfg.object_ds_name)
+ # mesh_db = MeshDataBase.from_object_ds(object_ds)
+ # renderer = BulletBatchRenderer(
+ # object_set=cfg.urdf_ds_name, n_workers=n_workers, gpu_renderer=gpu_renderer
+ # )
#
-
+
object_dataset = make_object_dataset("ycbv")
mesh_db = MeshDataBase.from_object_ds(object_dataset)
- renderer = Panda3dBatchRenderer(object_dataset, n_workers=n_workers, preload_cache=False)
+ renderer = Panda3dBatchRenderer(
+ object_dataset,
+ n_workers=n_workers,
+ preload_cache=False,
+ )
mesh_db_batched = mesh_db.batched().to(device)
def load_model(run_id):
run_dir = EXP_DIR / run_id
- # cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader)
- cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.UnsafeLoader)
+ # cfg = yaml.load(
+ # (run_dir / "config.yaml").read_text(), Loader=yaml.FullLoader
+ # )
+ cfg = yaml.load(
+ (run_dir / "config.yaml").read_text(),
+ Loader=yaml.UnsafeLoader,
+ )
cfg = check_update_config_pose(cfg)
if cfg.train_refiner:
- model = create_model_refiner(cfg, renderer=renderer, mesh_db=mesh_db_batched)
+ model = create_model_refiner(
+ cfg,
+ renderer=renderer,
+ mesh_db=mesh_db_batched,
+ )
else:
- model = create_model_coarse(cfg, renderer=renderer, mesh_db=mesh_db_batched)
- ckpt = torch.load(run_dir / 'checkpoint.pth.tar', map_location=device)
- ckpt = ckpt['state_dict']
+ model = create_model_coarse(
+ cfg,
+ renderer=renderer,
+ mesh_db=mesh_db_batched,
+ )
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar", map_location=device)
+ ckpt = ckpt["state_dict"]
model.load_state_dict(ckpt)
model = model.to(device).eval()
model.cfg = cfg
model.config = cfg
return model
-
+
coarse_model = load_model(coarse_run_id)
refiner_model = load_model(refiner_run_id)
return coarse_model, refiner_model, mesh_db
@@ -140,27 +162,30 @@ def load_model(run_id):
@staticmethod
def get_model(dataset_name, n_workers):
# load models
- if dataset_name == 'tless':
+ if dataset_name == "tless":
# TLESS setup
# python -m cosypose.scripts.download --model=detector-bop-tless-pbr--873074
# python -m cosypose.scripts.download --model=coarse-bop-tless-pbr--506801
# python -m cosypose.scripts.download --model=refiner-bop-tless-pbr--233420
- detector_run_id = 'detector-bop-tless-pbr--873074'
- coarse_run_id = 'coarse-bop-tless-pbr--506801'
- refiner_run_id = 'refiner-bop-tless-pbr--233420'
- elif dataset_name == 'ycbv':
+ detector_run_id = "detector-bop-tless-pbr--873074"
+ coarse_run_id = "coarse-bop-tless-pbr--506801"
+ refiner_run_id = "refiner-bop-tless-pbr--233420"
+ elif dataset_name == "ycbv":
# YCBV setup
# python -m cosypose.scripts.download --model=detector-bop-ycbv-pbr--970850
# python -m cosypose.scripts.download --model=coarse-bop-ycbv-pbr--724183
# python -m cosypose.scripts.download --model=refiner-bop-ycbv-pbr--604090
- detector_run_id = 'detector-bop-ycbv-pbr--970850'
- coarse_run_id = 'coarse-bop-ycbv-pbr--724183'
- refiner_run_id = 'refiner-bop-ycbv-pbr--604090'
+ detector_run_id = "detector-bop-ycbv-pbr--970850"
+ coarse_run_id = "coarse-bop-ycbv-pbr--724183"
+ refiner_run_id = "refiner-bop-ycbv-pbr--604090"
else:
- raise ValueError(f"Not prepared for {dataset_name} dataset")
+ msg = f"Not prepared for {dataset_name} dataset"
+ raise ValueError(msg)
detector = CosyPoseWrapper.load_detector(detector_run_id, dataset_name)
- coarse_model, refiner_model , mesh_db = CosyPoseWrapper.load_pose_models(
- coarse_run_id=coarse_run_id, refiner_run_id=refiner_run_id, n_workers=n_workers
+ coarse_model, refiner_model, mesh_db = CosyPoseWrapper.load_pose_models(
+ coarse_run_id=coarse_run_id,
+ refiner_run_id=refiner_run_id,
+ n_workers=n_workers,
)
pose_estimator = PoseEstimator(
@@ -179,14 +204,18 @@ def inference(self, observation, coarse_guess=None):
detections=detections,
run_detector=run_detector,
data_TCO_init=None,
- n_coarse_iterations=1, n_refiner_iterations=4)
+ n_coarse_iterations=1,
+ n_refiner_iterations=4,
+ )
else:
final_preds, all_preds = self.pose_predictor.run_inference_pipeline(
observation,
detections=detections,
run_detector=run_detector,
data_TCO_init=None,
- n_coarse_iterations=0, n_refiner_iterations=4)
+ n_coarse_iterations=0,
+ n_refiner_iterations=4,
+ )
print("inference successfully.")
# result: this_batch_detections, final_preds
- return final_preds.cpu()
\ No newline at end of file
+ return final_preds.cpu()
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/distributed.py b/happypose/pose_estimators/cosypose/cosypose/utils/distributed.py
index 79fea887..e5800ad1 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/distributed.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/distributed.py
@@ -1,23 +1,24 @@
-import sys
import os
-import torch.distributed as dist
-import torch
+import sys
from pathlib import Path
+import torch
+import torch.distributed as dist
+
def get_tmp_dir():
- if 'JOB_DIR' in os.environ:
- tmp_dir = Path(os.environ['JOB_DIR']) / 'tmp'
+ if "JOB_DIR" in os.environ:
+ tmp_dir = Path(os.environ["JOB_DIR"]) / "tmp"
else:
- tmp_dir = Path('/tmp/cosypose_job')
+ tmp_dir = Path("/tmp/cosypose_job")
tmp_dir.mkdir(exist_ok=True)
return tmp_dir
def sync_model(model):
- sync_dir = get_tmp_dir() / 'models'
+ sync_dir = get_tmp_dir() / "models"
sync_dir.mkdir(exist_ok=True)
- sync_ckpt = sync_dir / 'sync.checkpoint'
+ sync_ckpt = sync_dir / "sync.checkpoint"
if get_rank() == 0 and get_world_size() > 1:
torch.save(model.state_dict(), sync_ckpt)
dist.barrier()
@@ -28,11 +29,11 @@ def sync_model(model):
def redirect_output():
- if 'JOB_DIR' in os.environ:
+ if "JOB_DIR" in os.environ:
rank = get_rank()
- output_file = Path(os.environ['JOB_DIR']) / f'stdout{rank}.out'
- sys.stdout = open(output_file, 'w')
- sys.stderr = open(output_file, 'w')
+ output_file = Path(os.environ["JOB_DIR"]) / f"stdout{rank}.out"
+ sys.stdout = open(output_file, "w")
+ sys.stderr = open(output_file, "w")
return
@@ -54,24 +55,25 @@ def get_world_size():
def init_distributed_mode(initfile=None):
assert torch.cuda.device_count() == 1
- rank = int(os.environ.get('SLURM_PROCID', 0))
- world_size = int(os.environ.get('SLURM_NTASKS', 1))
+ rank = int(os.environ.get("SLURM_PROCID", 0))
+ world_size = int(os.environ.get("SLURM_NTASKS", 1))
if initfile is None:
- initfile = get_tmp_dir() / 'initfile'
+ initfile = get_tmp_dir() / "initfile"
if initfile.exists() and world_size == 1:
initfile.unlink()
initfile = Path(initfile)
assert initfile.parent.exists()
torch.distributed.init_process_group(
- backend='nccl', rank=rank, world_size=world_size,
- init_method=f'file://{initfile.as_posix()}'
+ backend="nccl",
+ rank=rank,
+ world_size=world_size,
+ init_method=f"file://{initfile.as_posix()}",
)
torch.distributed.barrier()
def reduce_dict(input_dict, average=True):
- """
- https://github.com/pytorch/vision/blob/master/references/detection/utils.py
+ """https://github.com/pytorch/vision/blob/master/references/detection/utils.py
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/extensions.py b/happypose/pose_estimators/cosypose/cosypose/utils/extensions.py
index 15b0419d..59d37a54 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/extensions.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/extensions.py
@@ -1,12 +1,15 @@
-from happypose.pose_estimators.cosypose.cosypose.config import PROJECT_DIR
from torch.utils.cpp_extension import load
+from happypose.pose_estimators.cosypose.cosypose.config import PROJECT_DIR
+
-def load_extension(optimization='-O3'):
- module = load(name='cosypose_cext',
- sources=[
- PROJECT_DIR / 'cosypose/multiview/csrc/ransac.cpp',
- ],
- extra_cflags=[optimization],
- verbose=True)
+def load_extension(optimization="-O3"):
+ module = load(
+ name="cosypose_cext",
+ sources=[
+ PROJECT_DIR / "cosypose/multiview/csrc/ransac.cpp",
+ ],
+ extra_cflags=[optimization],
+ verbose=True,
+ )
return module
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/logging.py b/happypose/pose_estimators/cosypose/cosypose/utils/logging.py
index 0dd093cd..b0d96fb2 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/logging.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/logging.py
@@ -3,15 +3,14 @@
from datetime import timedelta
-class ElapsedFormatter():
-
+class ElapsedFormatter:
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = record.created - self.start_time
elapsed = timedelta(seconds=elapsed_seconds)
- return "{} - {}".format(elapsed, record.getMessage())
+ return f"{elapsed} - {record.getMessage()}"
def get_logger(name):
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/logs_bokeh.py b/happypose/pose_estimators/cosypose/cosypose/utils/logs_bokeh.py
index bd628266..ac452a66 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/logs_bokeh.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/logs_bokeh.py
@@ -1,17 +1,21 @@
-import pandas as pd
-from pathlib import Path
-from IPython.display import display
-import yaml
-from itertools import cycle
import textwrap
from collections import OrderedDict
-from bokeh.io import output_notebook, show
+from itertools import cycle
+from pathlib import Path
+
import numpy as np
-from bokeh.plotting import figure
-from bokeh.models import HoverTool
-from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import check_update_config
-from bokeh.layouts import gridplot
+import pandas as pd
import seaborn as sns
+import yaml
+from bokeh.io import output_notebook, show
+from bokeh.layouts import gridplot
+from bokeh.models import HoverTool
+from bokeh.plotting import figure
+from IPython.display import display
+
+from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
+ check_update_config,
+)
class Plotter:
@@ -36,23 +40,23 @@ def load_logs(self, run_ids):
colors = OrderedDict()
for run_id, color in zip(run_ids, self.colors_hex):
run_dir = self.log_dir / run_id
- assert run_dir.exists(), f'{run_id} does not exists.'
- config = yaml.full_load((run_dir / 'config.yaml').read_text())
+ assert run_dir.exists(), f"{run_id} does not exists."
+ config = yaml.full_load((run_dir / "config.yaml").read_text())
configs[run_id] = self.fill_config_fn(config)
- log_path = run_dir / 'log.txt'
+ log_path = run_dir / "log.txt"
if log_path.exists():
- log_df = pd.read_json(run_dir / 'log.txt', lines=True)
+ log_df = pd.read_json(run_dir / "log.txt", lines=True)
else:
log_df = None
log_dicts[run_id] = log_df
- ds_eval = dict()
+ ds_eval = {}
for f in run_dir.iterdir():
- if 'errors_' in f.name:
- ds = f.with_suffix('').name.split('errors_')[1]
+ if "errors_" in f.name:
+ ds = f.with_suffix("").name.split("errors_")[1]
ds_eval[ds] = pd.read_json(f, lines=True)
- ds_eval[ds] = ds_eval[ds].groupby('epoch').last().reset_index()
+ ds_eval[ds] = ds_eval[ds].groupby("epoch").last().reset_index()
eval_dicts[run_id] = ds_eval
colors[run_id] = color
@@ -72,21 +76,25 @@ def _add_figure(self, f, new_row):
row = self.figures[-1]
row.append(f)
- def plot_eval_fields(self,
- fields,
- dataset='auto',
- new_row=False,
- semilogy=False,
- legend=False,
- title=None,
- y_range=None,
- dash_patterns=('solid', 'dashed', 'dotted')):
- y_axis_type = 'auto' if not semilogy else 'log'
- f = figure(y_axis_type=y_axis_type,
- background_fill_color='#EAEAF2',
- background_fill_alpha=0.6,
- y_range=y_range)
- if dataset == 'auto':
+ def plot_eval_fields(
+ self,
+ fields,
+ dataset="auto",
+ new_row=False,
+ semilogy=False,
+ legend=False,
+ title=None,
+ y_range=None,
+ dash_patterns=("solid", "dashed", "dotted"),
+ ):
+ y_axis_type = "auto" if not semilogy else "log"
+ f = figure(
+ y_axis_type=y_axis_type,
+ background_fill_color="#EAEAF2",
+ background_fill_alpha=0.6,
+ y_range=y_range,
+ )
+ if dataset == "auto":
datasets = []
for ds_eval in self.eval_dicts.values():
datasets += list(ds_eval.keys())
@@ -102,47 +110,61 @@ def plot_eval_fields(self,
continue
eval_df = self.eval_dicts[run_id][dataset]
if field in eval_df:
- x = eval_df['epoch']
+ x = eval_df["epoch"]
y = eval_df[field]
- run_num = run_id.split('-')[-1]
- f.line(x, y, line_width=1.0, color=color,
- line_dash=dash_pattern, legend_label=str(run_num),
- name=f'{run_num}/{field}')
+ run_num = run_id.split("-")[-1]
+ f.line(
+ x,
+ y,
+ line_width=1.0,
+ color=color,
+ line_dash=dash_pattern,
+ legend_label=str(run_num),
+ name=f"{run_num}/{field}",
+ )
if title is not None:
f.title.text = title
if legend:
- f.legend.location = 'top_right'
- f.legend.click_policy = 'hide'
- f.legend.label_text_font_size = '6pt'
+ f.legend.location = "top_right"
+ f.legend.click_policy = "hide"
+ f.legend.label_text_font_size = "6pt"
else:
f.legend.visible = False
- tool = HoverTool(tooltips=[
- ('x,y', '@x, @y'),
- ('name', '$name')
- ], line_policy='nearest', point_policy='snap_to_data')
+ tool = HoverTool(
+ tooltips=[
+ ("x,y", "@x, @y"),
+ ("name", "$name"),
+ ],
+ line_policy="nearest",
+ point_policy="snap_to_data",
+ )
f.add_tools(tool)
self._add_figure(f, new_row=new_row)
return f
- def plot_eval_field(self,
- field,
- datasets='auto',
- new_row=False,
- semilogy=False,
- legend=False,
- title=None,
- y_range=None,
- dash_patterns=('solid', 'dashed', 'dotted')):
- y_axis_type = 'auto' if not semilogy else 'log'
- f = figure(y_axis_type=y_axis_type,
- background_fill_color='#EAEAF2',
- background_fill_alpha=0.6,
- y_range=y_range)
- assert datasets == 'auto' or isinstance(datasets, list)
- if datasets == 'auto':
+ def plot_eval_field(
+ self,
+ field,
+ datasets="auto",
+ new_row=False,
+ semilogy=False,
+ legend=False,
+ title=None,
+ y_range=None,
+ dash_patterns=("solid", "dashed", "dotted"),
+ ):
+ y_axis_type = "auto" if not semilogy else "log"
+ f = figure(
+ y_axis_type=y_axis_type,
+ background_fill_color="#EAEAF2",
+ background_fill_alpha=0.6,
+ y_range=y_range,
+ )
+ assert datasets == "auto" or isinstance(datasets, list)
+ if datasets == "auto":
datasets = []
for ds_eval in self.eval_dicts.values():
datasets += list(ds_eval.keys())
@@ -155,90 +177,129 @@ def plot_eval_field(self,
if dataset in eval_df:
df = eval_df[dataset]
if field in eval_df[dataset]:
- x = df['epoch'].values
+ x = df["epoch"].values
y = df[field].values
- run_num = run_id.split('-')[-1]
- name = f'{run_num}/{dataset}'
- name = '\n '.join(textwrap.wrap(name, width=20))
+ run_num = run_id.split("-")[-1]
+ name = f"{run_num}/{dataset}"
+ name = "\n ".join(textwrap.wrap(name, width=20))
if len(x) == 1:
- f.circle(x, y, color=color, line_dash=dash_pattern, name=name)
+ f.circle(
+ x,
+ y,
+ color=color,
+ line_dash=dash_pattern,
+ name=name,
+ )
x = np.concatenate(([0], x))
y = np.concatenate((y, y))
- f.line(x, y, line_width=1.0, color=color,
- line_dash=dash_pattern, legend_label=str(run_num),
- name=name)
+ f.line(
+ x,
+ y,
+ line_width=1.0,
+ color=color,
+ line_dash=dash_pattern,
+ legend_label=str(run_num),
+ name=name,
+ )
if title is not None:
f.title.text = title
if legend:
- f.legend.location = 'top_right'
- f.legend.click_policy = 'hide'
- f.legend.label_text_font_size = '6pt'
+ f.legend.location = "top_right"
+ f.legend.click_policy = "hide"
+ f.legend.label_text_font_size = "6pt"
else:
f.legend.visible = False
- tool = HoverTool(tooltips=[
- ('x,y', '@x, @y'),
- ('name', '$name')
- ], line_policy='nearest', point_policy='snap_to_data')
+ tool = HoverTool(
+ tooltips=[
+ ("x,y", "@x, @y"),
+ ("name", "$name"),
+ ],
+ line_policy="nearest",
+ point_policy="snap_to_data",
+ )
f.add_tools(tool)
self._add_figure(f, new_row=new_row)
return f
- def plot_train_fields(self,
- fields,
- new_row=False,
- semilogy=False,
- y_range=None,
- legend=False,
- title=None,
- dash_patterns=('solid', 'dashed', 'dotted', 'dotdash')):
- y_axis_type = 'auto' if not semilogy else 'log'
- f = figure(y_axis_type=y_axis_type,
- background_fill_color='#EAEAF2',
- background_fill_alpha=0.6,
- y_range=y_range)
+ def plot_train_fields(
+ self,
+ fields,
+ new_row=False,
+ semilogy=False,
+ y_range=None,
+ legend=False,
+ title=None,
+ dash_patterns=("solid", "dashed", "dotted", "dotdash"),
+ ):
+ y_axis_type = "auto" if not semilogy else "log"
+ f = figure(
+ y_axis_type=y_axis_type,
+ background_fill_color="#EAEAF2",
+ background_fill_alpha=0.6,
+ y_range=y_range,
+ )
for field, dash_pattern in zip(fields, dash_patterns):
for run_id in self.run_ids:
color = self.colors[run_id]
log_df = self.log_dicts[run_id]
if field in log_df:
- x = log_df['epoch']
+ x = log_df["epoch"]
y = log_df[field]
m = np.logical_not(np.isnan(y))
x, y = x[m], y[m]
- run_num = run_id.split('-')[-1]
- f.line(x, y, line_width=1.0, color=color,
- line_dash=dash_pattern, legend_label=str(run_num),
- name=f'{run_num}/{field}')
+ run_num = run_id.split("-")[-1]
+ f.line(
+ x,
+ y,
+ line_width=1.0,
+ color=color,
+ line_dash=dash_pattern,
+ legend_label=str(run_num),
+ name=f"{run_num}/{field}",
+ )
if title is not None:
f.title.text = title
if legend:
- f.legend.location = 'top_right'
- f.legend.click_policy = 'hide'
- f.legend.label_text_font_size = '6pt'
+ f.legend.location = "top_right"
+ f.legend.click_policy = "hide"
+ f.legend.label_text_font_size = "6pt"
else:
f.legend.visible = False
- tool = HoverTool(tooltips=[
- ('x,y', '@x, @y'),
- ('name', '$name')
- ], line_policy='nearest', point_policy='snap_to_data')
+ tool = HoverTool(
+ tooltips=[
+ ("x,y", "@x, @y"),
+ ("name", "$name"),
+ ],
+ line_policy="nearest",
+ point_policy="snap_to_data",
+ )
f.add_tools(tool)
self._add_figure(f, new_row=new_row)
return f
def show(self):
- layout = gridplot(self.figures, sizing_mode='scale_width')
+ layout = gridplot(self.figures, sizing_mode="scale_width")
show(layout)
def show_configs(self, ignore=None, diff=True):
if ignore is None:
- ignore = ('n_workers', 'save_dir', 'job_dir', 'seed',
- 'train_ds_names', 'val_ds_names', 'test_ds_names',
- 'run_id', 'label_to_category_id', 'categories')
+ ignore = (
+ "n_workers",
+ "save_dir",
+ "job_dir",
+ "seed",
+ "train_ds_names",
+ "val_ds_names",
+ "test_ds_names",
+ "run_id",
+ "label_to_category_id",
+ "categories",
+ )
ignore = list(ignore)
df = {k: vars(v) for k, v in self.configs.items()}
@@ -246,11 +307,15 @@ def show_configs(self, ignore=None, diff=True):
config_df = df.copy()
self.config_df = config_df
- name2color = {k: v for k, v in zip(self.run_ids, self.colors_uint8)}
+ name2color = dict(zip(self.run_ids, self.colors_uint8))
def f_row(data):
rgb = (np.array(name2color[data.name]) * 255).astype(np.uint8)
- return [f'background-color: rgba({rgb[0]},{rgb[1]},{rgb[2]},1.0)' for _ in range(len(data))]
+ return [
+ f"background-color: rgba({rgb[0]},{rgb[1]},{rgb[2]},1.0)"
+ for _ in range(len(data))
+ ]
+
if diff:
for ignore_n in ignore:
if ignore_n in df:
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/multiepoch_dataloader.py b/happypose/pose_estimators/cosypose/cosypose/utils/multiepoch_dataloader.py
index a658784e..e989ae25 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/multiepoch_dataloader.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/multiepoch_dataloader.py
@@ -21,7 +21,9 @@ def __iter__(self):
self.sampler_length += len(self.dataloader)
next_index_sampler = iter(self.dataloader_iter._index_sampler)
self.dataloader_iter._sampler_iter = chain(
- self.dataloader_iter._sampler_iter, next_index_sampler)
+ self.dataloader_iter._sampler_iter,
+ next_index_sampler,
+ )
self.epoch_id += 1
self.batch_id = 0
@@ -36,7 +38,9 @@ def __next__(self):
if self.batch_id == self.epoch_size:
raise StopIteration
- elif self.id_in_sampler == self.sampler_length - 2 * self.dataloader.num_workers:
+ elif (
+ self.id_in_sampler == self.sampler_length - 2 * self.dataloader.num_workers
+ ):
next_index_sampler = iter(self.dataloader_iter._index_sampler)
self.dataloader_iter._sampler_iter = next_index_sampler
self.id_in_sampler = 0
@@ -51,7 +55,7 @@ def __next__(self):
return batch
def get_infos(self):
- return dict()
+ return {}
def __del__(self):
del self.dataloader_iter
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/random.py b/happypose/pose_estimators/cosypose/cosypose/utils/random.py
index d1ff044b..8889fb67 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/random.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/random.py
@@ -1,6 +1,8 @@
import contextlib
+
import numpy as np
+
@contextlib.contextmanager
def temp_numpy_seed(seed):
state = np.random.get_state()
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/resources.py b/happypose/pose_estimators/cosypose/cosypose/utils/resources.py
index 0d004274..ee8bd9bd 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/resources.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/resources.py
@@ -1,18 +1,19 @@
import os
-import psutil
from shutil import which
+import psutil
+
def is_egl_available():
- return is_gpu_available and 'EGL_VISIBLE_DEVICES' in os.environ
+ return is_gpu_available and "EGL_VISIBLE_DEVICES" in os.environ
def is_gpu_available():
- return which('nvidia-smi') is not None
+ return which("nvidia-smi") is not None
def is_slurm_available():
- return which('sinfo') is not None
+ return which("sinfo") is not None
def get_total_memory():
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/tensor_collection.py b/happypose/pose_estimators/cosypose/cosypose/utils/tensor_collection.py
index 1a98074b..f8ff076e 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/tensor_collection.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/tensor_collection.py
@@ -1,7 +1,12 @@
-import torch
from pathlib import Path
+
import pandas as pd
-from happypose.pose_estimators.cosypose.cosypose.utils.distributed import get_rank, get_world_size
+import torch
+
+from happypose.pose_estimators.cosypose.cosypose.utils.distributed import (
+ get_rank,
+ get_world_size,
+)
def concatenate(datas):
@@ -9,11 +14,13 @@ def concatenate(datas):
if len(datas) == 0:
return PandasTensorCollection(infos=pd.DataFrame())
classes = [data.__class__ for data in datas]
- assert all([class_n == classes[0] for class_n in classes])
+ assert all(class_n == classes[0] for class_n in classes)
- infos = pd.concat([data.infos for data in datas], axis=0, sort=False).reset_index(drop=True)
+ infos = pd.concat([data.infos for data in datas], axis=0, sort=False).reset_index(
+ drop=True,
+ )
tensor_keys = datas[0].tensors.keys()
- tensors = dict()
+ tensors = {}
for k in tensor_keys:
tensors[k] = torch.cat([getattr(data, k) for data in datas], dim=0)
return PandasTensorCollection(infos=infos, **tensors)
@@ -21,7 +28,7 @@ def concatenate(datas):
class TensorCollection:
def __init__(self, **kwargs):
- self.__dict__['_tensors'] = dict()
+ self.__dict__["_tensors"] = {}
for k, v in kwargs.items():
self.register_tensor(k, v)
@@ -32,15 +39,15 @@ def delete_tensor(self, name):
del self._tensors[name]
def __repr__(self):
- s = self.__class__.__name__ + '(' '\n'
+ s = self.__class__.__name__ + "(" "\n"
for k, t in self._tensors.items():
- s += f' {k}: {t.shape} {t.dtype} {t.device},\n'
- s += ')'
+ s += f" {k}: {t.shape} {t.dtype} {t.device},\n"
+ s += ")"
return s
def __getitem__(self, ids):
- tensors = dict()
- for k, v in self._tensors.items():
+ tensors = {}
+ for k, _v in self._tensors.items():
tensors[k] = getattr(self, k)[ids]
return TensorCollection(**tensors)
@@ -61,15 +68,16 @@ def device(self):
return list(self.tensors.values())[0].device
def __getstate__(self):
- return {'tensors': self.tensors}
+ return {"tensors": self.tensors}
def __setstate__(self, state):
- self.__init__(**state['tensors'])
+ self.__init__(**state["tensors"])
return
def __setattr__(self, name, value):
- if '_tensors' not in self.__dict__:
- raise ValueError('Please call __init__')
+ if "_tensors" not in self.__dict__:
+ msg = "Please call __init__"
+ raise ValueError(msg)
if name in self._tensors:
self._tensors[name] = value
else:
@@ -81,10 +89,10 @@ def to(self, torch_attr):
return self
def cuda(self):
- return self.to('cuda')
+ return self.to("cuda")
def cpu(self):
- return self.to('cpu')
+ return self.to("cpu")
def float(self):
return self.to(torch.float)
@@ -96,8 +104,8 @@ def half(self):
return self.to(torch.half)
def clone(self):
- tensors = dict()
- for k, v in self.tensors.items():
+ tensors = {}
+ for k, _v in self.tensors.items():
tensors[k] = getattr(self, k).clone()
return TensorCollection(**tensors)
@@ -106,14 +114,14 @@ class PandasTensorCollection(TensorCollection):
def __init__(self, infos, **tensors):
super().__init__(**tensors)
self.infos = infos.reset_index(drop=True)
- self.meta = dict()
+ self.meta = {}
def register_buffer(self, k, v):
assert len(v) == len(self)
super().register_buffer()
def merge_df(self, df, *args, **kwargs):
- infos = self.infos.merge(df, how='left', *args, **kwargs)
+ infos = self.infos.merge(df, how="left", *args, **kwargs)
assert len(infos) == len(self.infos)
assert (infos.index == self.infos.index).all()
return PandasTensorCollection(infos=infos, **self.tensors)
@@ -123,12 +131,12 @@ def clone(self):
return PandasTensorCollection(self.infos.copy(), **tensors)
def __repr__(self):
- s = self.__class__.__name__ + '(' '\n'
+ s = self.__class__.__name__ + "(" "\n"
for k, t in self._tensors.items():
- s += f' {k}: {t.shape} {t.dtype} {t.device},\n'
+ s += f" {k}: {t.shape} {t.dtype} {t.device},\n"
s += f"{'-'*40}\n"
- s += ' infos:\n' + self.infos.__repr__() + '\n'
- s += ')'
+ s += " infos:\n" + self.infos.__repr__() + "\n"
+ s += ")"
return s
def __getitem__(self, ids):
@@ -141,7 +149,7 @@ def __len__(self):
def gather_distributed(self, tmp_dir=None):
rank, world_size = get_rank(), get_world_size()
- tmp_file_template = (tmp_dir / 'rank={rank}.pth.tar').as_posix()
+ tmp_file_template = (tmp_dir / "rank={rank}.pth.tar").as_posix()
if rank > 0:
tmp_file = tmp_file_template.format(rank=rank)
@@ -164,11 +172,11 @@ def gather_distributed(self, tmp_dir=None):
def __getstate__(self):
state = super().__getstate__()
- state['infos'] = self.infos
- state['meta'] = self.meta
+ state["infos"] = self.infos
+ state["meta"] = self.meta
return state
def __setstate__(self, state):
- self.__init__(state['infos'], **state['tensors'])
- self.meta = state['meta']
+ self.__init__(state["infos"], **state["tensors"])
+ self.meta = state["meta"]
return
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/timer.py b/happypose/pose_estimators/cosypose/cosypose/utils/timer.py
index 20f9b9fb..95f2bb60 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/timer.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/timer.py
@@ -9,7 +9,7 @@ def __init__(self):
def reset(self):
self.start_time = None
- self.elapsed = 0.
+ self.elapsed = 0.0
self.is_running = False
def start(self):
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/tqdm.py b/happypose/pose_estimators/cosypose/cosypose/utils/tqdm.py
index b09be613..b3da79dd 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/tqdm.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/tqdm.py
@@ -1,9 +1,8 @@
-import sys
import functools
-from tqdm import tqdm
+import sys
def patch_tqdm():
- tqdm = sys.modules['tqdm'].tqdm
- sys.modules['tqdm'].tqdm = functools.partial(tqdm, file=sys.stdout)
+ tqdm = sys.modules["tqdm"].tqdm
+ sys.modules["tqdm"].tqdm = functools.partial(tqdm, file=sys.stdout)
return
diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/xarray.py b/happypose/pose_estimators/cosypose/cosypose/utils/xarray.py
index bd964082..c28aca8d 100644
--- a/happypose/pose_estimators/cosypose/cosypose/utils/xarray.py
+++ b/happypose/pose_estimators/cosypose/cosypose/utils/xarray.py
@@ -1,8 +1,8 @@
import numpy as np
-def xr_merge(ds1, ds2, on, how='left', dim1='dim_0', dim2='dim_0', fill_value=np.nan):
- if how != 'left':
+def xr_merge(ds1, ds2, on, how="left", dim1="dim_0", dim2="dim_0", fill_value=np.nan):
+ if how != "left":
raise NotImplementedError
ds1 = ds1.copy()
@@ -13,14 +13,14 @@ def xr_merge(ds1, ds2, on, how='left', dim1='dim_0', dim2='dim_0', fill_value=np
df1 = ds1.reset_coords()[on].to_dataframe()
df2 = ds2.reset_coords()[on].to_dataframe()
- df1['idx1'] = np.arange(len(df1))
- df2['idx2'] = np.arange(len(df2))
+ df1["idx1"] = np.arange(len(df1))
+ df2["idx2"] = np.arange(len(df2))
merge = df1.merge(df2, on=on, how=how)
assert len(merge) == ds1.dims[dim1]
- idx1 = merge['idx1'].values
- idx2 = merge['idx2'].values
+ idx1 = merge["idx1"].values
+ idx2 = merge["idx2"].values
mask = np.isfinite(idx2)
# assert mask.sum() == ds2.dims[dim1]
idx1 = idx1[mask]
@@ -29,7 +29,7 @@ def xr_merge(ds1, ds2, on, how='left', dim1='dim_0', dim2='dim_0', fill_value=np
for k, data_var in ds2.data_vars.items():
array = data_var.values
if isinstance(fill_value, dict):
- fill = fill_value.get(k, float('nan'))
+ fill = fill_value.get(k, float("nan"))
else:
fill = fill_value
assert data_var.dims[0] == dim1
diff --git a/happypose/pose_estimators/cosypose/cosypose/visualization/bokeh_utils.py b/happypose/pose_estimators/cosypose/cosypose/visualization/bokeh_utils.py
index 9a9ebd9d..f0fb41a2 100644
--- a/happypose/pose_estimators/cosypose/cosypose/visualization/bokeh_utils.py
+++ b/happypose/pose_estimators/cosypose/cosypose/visualization/bokeh_utils.py
@@ -1,21 +1,20 @@
import bokeh
-from bokeh.plotting import figure as bokeh_figure
-from bokeh.models import ColumnDataSource
-from bokeh.models.widgets import DataTable, TableColumn
-from bokeh.models.widgets import NumberFormatter
import bokeh.io
import numpy as np
+from bokeh.models import ColumnDataSource
+from bokeh.models.widgets import DataTable, NumberFormatter, TableColumn
+from bokeh.plotting import figure as bokeh_figure
from PIL import Image
def to_rgba(im):
im = Image.fromarray(im)
- im = np.asarray(im.convert('RGBA'))
+ im = np.asarray(im.convert("RGBA"))
im = np.flipud(im)
return im
-def plot_image(im, axes=True, tools='', im_size=None, figure=None):
+def plot_image(im, axes=True, tools="", im_size=None, figure=None):
if np.asarray(im).ndim == 2:
gray = True
else:
@@ -26,16 +25,29 @@ def plot_image(im, axes=True, tools='', im_size=None, figure=None):
h, w = im.shape[:2]
else:
h, w = im_size
- source = bokeh.models.sources.ColumnDataSource(dict(rgba=[im]))
- f = image_figure('rgba', source, im_size=(h, w), axes=axes, tools=tools, gray=gray, figure=figure)
+ source = bokeh.models.sources.ColumnDataSource({"rgba": [im]})
+ f = image_figure(
+ "rgba",
+ source,
+ im_size=(h, w),
+ axes=axes,
+ tools=tools,
+ gray=gray,
+ figure=figure,
+ )
return f, source
def make_image_figure(im_size=(240, 320), axes=True):
w, h = im_size
- f = bokeh_figure(x_range=(0, w), y_range=(0, h),
- plot_width=w, plot_height=h, tools='',
- tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")])
+ f = bokeh_figure(
+ x_range=(0, w),
+ y_range=(0, h),
+ plot_width=w,
+ plot_height=h,
+ tools="",
+ tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")],
+ )
f.toolbar.logo = None
if not axes:
f.xaxis[0].visible = False
@@ -43,13 +55,25 @@ def make_image_figure(im_size=(240, 320), axes=True):
return f
-def image_figure(key, source, im_size=(240, 320), axes=True, tools='',
- gray=False, figure=None):
+def image_figure(
+ key,
+ source,
+ im_size=(240, 320),
+ axes=True,
+ tools="",
+ gray=False,
+ figure=None,
+):
h, w = im_size
if figure is None:
- f = bokeh_figure(x_range=(0, w), y_range=(0, h),
- plot_width=w, plot_height=h, tools=tools,
- tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")])
+ f = bokeh_figure(
+ x_range=(0, w),
+ y_range=(0, h),
+ plot_width=w,
+ plot_height=h,
+ tools=tools,
+ tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")],
+ )
else:
f = figure
@@ -68,8 +92,8 @@ def image_figure(key, source, im_size=(240, 320), axes=True, tools='',
def convert_df(df):
columns = []
for column in df.columns:
- if df.dtypes[column].kind == 'f':
- formatter = NumberFormatter(format='0.000')
+ if df.dtypes[column].kind == "f":
+ formatter = NumberFormatter(format="0.000")
else:
formatter = None
table_col = TableColumn(field=column, title=column, formatter=formatter)
diff --git a/happypose/pose_estimators/cosypose/cosypose/visualization/multiview.py b/happypose/pose_estimators/cosypose/cosypose/visualization/multiview.py
index 37a61875..404ff313 100644
--- a/happypose/pose_estimators/cosypose/cosypose/visualization/multiview.py
+++ b/happypose/pose_estimators/cosypose/cosypose/visualization/multiview.py
@@ -1,35 +1,39 @@
-import numpy as np
import time
-import transforms3d
-import torch
-from copy import deepcopy
from collections import defaultdict
+from copy import deepcopy
+
+import numpy as np
import seaborn as sns
+import torch
+import transforms3d
-from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import BulletSceneRenderer
+from happypose.pose_estimators.cosypose.cosypose.lib3d.rotations import euler2quat
from happypose.pose_estimators.cosypose.cosypose.lib3d.transform import Transform
from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import invert_T
-from happypose.pose_estimators.cosypose.cosypose.lib3d.rotations import euler2quat
+from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import ( # noqa: E501
+ BulletSceneRenderer,
+)
+
from .plotter import Plotter
def get_group_infos(group_id, mv_scene_ds):
- mask = mv_scene_ds.frame_index['group_id'] == group_id
+ mask = mv_scene_ds.frame_index["group_id"] == group_id
row = mv_scene_ds.frame_index.loc[mask]
scene_id, view_ids = row.scene_id.item(), row.view_ids.item()
return scene_id, view_ids
def filter_predictions(preds, group_id):
- m = preds.infos['group_id'] == group_id
+ m = preds.infos["group_id"] == group_id
return preds[np.where(m)[0]]
-def nms3d(preds, th=0.04, poses_attr='poses'):
+def nms3d(preds, th=0.04, poses_attr="poses"):
TCO, TCO_infos = getattr(preds, poses_attr).cpu(), preds.infos
is_tested = set()
TCO = np.array(TCO)
- scores = TCO_infos['score'].values
+ scores = TCO_infos["score"].values
all_t = TCO[:, :3, -1]
argsort = np.argsort(-scores)
keep = []
@@ -52,30 +56,44 @@ def nms3d(preds, th=0.04, poses_attr='poses'):
return new_preds
-def make_scene_renderings(objects, cameras, urdf_ds_name, distance=1.5, theta=np.pi/4, angles=[0],
- object_scale=1.0, camera_scale=1.5, background_color=(242, 231, 191),
- show_cameras=False,
- resolution=(640, 480), colormap_rgb=None, object_id_ref=0,
- gui=False,
- use_nms3d=True,
- camera_color=(0.2, 0.2, 0.2, 1.0)):
-
- renderer = BulletSceneRenderer([urdf_ds_name, 'camera'], background_color=background_color, gui=gui)
+def make_scene_renderings(
+ objects,
+ cameras,
+ urdf_ds_name,
+ distance=1.5,
+ theta=np.pi / 4,
+ angles=[0],
+ object_scale=1.0,
+ camera_scale=1.5,
+ background_color=(242, 231, 191),
+ show_cameras=False,
+ resolution=(640, 480),
+ colormap_rgb=None,
+ object_id_ref=0,
+ gui=False,
+ use_nms3d=True,
+ camera_color=(0.2, 0.2, 0.2, 1.0),
+):
+ renderer = BulletSceneRenderer(
+ [urdf_ds_name, "camera"],
+ background_color=background_color,
+ gui=gui,
+ )
urdf_ds = renderer.body_cache.urdf_ds
# Patch the scales for visualization
- is_camera = np.array(['camera' in label for label in urdf_ds.index['label']])
- urdf_ds.index.loc[~is_camera, 'scale'] = object_scale * 0.001
- urdf_ds.index.loc[is_camera, 'scale'] = camera_scale
+ is_camera = np.array(["camera" in label for label in urdf_ds.index["label"]])
+ urdf_ds.index.loc[~is_camera, "scale"] = object_scale * 0.001
+ urdf_ds.index.loc[is_camera, "scale"] = camera_scale
if use_nms3d:
- objects = nms3d(objects, poses_attr='TWO', th=0.04)
+ objects = nms3d(objects, poses_attr="TWO", th=0.04)
objects = objects.cpu()
objects.TWO = objects.poses
if colormap_rgb is None:
- colormap_rgb, _ = make_colormaps(objects.infos['label'])
- objects.infos['color'] = objects.infos['label'].apply(lambda k: colormap_rgb[k])
+ colormap_rgb, _ = make_colormaps(objects.infos["label"])
+ objects.infos["color"] = objects.infos["label"].apply(lambda k: colormap_rgb[k])
cameras = cameras.cpu()
TWWB = objects.poses[object_id_ref]
@@ -93,46 +111,48 @@ def make_scene_renderings(objects, cameras, urdf_ds_name, distance=1.5, theta=np
for obj_id in range(len(objects)):
TWO = np.linalg.inv(TWWB) @ objects.TWO[obj_id].numpy()
TWO[:3, -1] *= object_scale
- obj = dict(
- name=objects.infos.loc[obj_id, 'label'],
- color=objects.infos.loc[obj_id, 'color'],
- TWO=TWO,
- )
+ obj = {
+ "name": objects.infos.loc[obj_id, "label"],
+ "color": objects.infos.loc[obj_id, "color"],
+ "TWO": TWO,
+ }
list_objects.append(obj)
- target = np.mean(np.stack([obj['TWO'][:3, -1] for obj in list_objects]), axis=0)
+ target = np.mean(np.stack([obj["TWO"][:3, -1] for obj in list_objects]), axis=0)
if show_cameras:
for cam_id in range(len(cameras)):
- obj = dict(
- name='camera',
- color=camera_color,
- TWO=np.linalg.inv(TWWB) @ cameras.TWC[cam_id].numpy()
- )
+ obj = {
+ "name": "camera",
+ "color": camera_color,
+ "TWO": np.linalg.inv(TWWB) @ cameras.TWC[cam_id].numpy(),
+ }
list_objects.append(obj)
fx, fy = 515, 515
w, h = resolution
- K = np.array([
- [fx, 0, w/2],
- [0, fy, h/2],
- [0, 0, 1]
- ])
+ K = np.array(
+ [
+ [fx, 0, w / 2],
+ [0, fy, h / 2],
+ [0, 0, 1],
+ ],
+ )
list_cameras = []
for phi in angles:
x = distance * np.sin(theta) * np.cos(phi)
y = distance * np.sin(theta) * np.sin(phi)
z = distance * np.cos(theta)
t = np.array([x, y, z])
- R = transforms3d.euler.euler2mat(np.pi, theta, phi, axes='sxyz')
- R = R @ transforms3d.euler.euler2mat(0, 0, -np.pi/2, axes='sxyz')
+ R = transforms3d.euler.euler2mat(np.pi, theta, phi, axes="sxyz")
+ R = R @ transforms3d.euler.euler2mat(0, 0, -np.pi / 2, axes="sxyz")
t += np.array(target)
TWC = Transform(R, t).toHomogeneousMatrix()
TWBC = TWWB @ TWC
list_cameras.append(
- dict(K=K, TWC=TWC, resolution=(w, h))
+ {"K": K, "TWC": TWC, "resolution": (w, h)},
)
renders = renderer.render_scene(list_objects, list_cameras)
- images = np.stack([render['rgb'] for render in renders])
+ images = np.stack([render["rgb"] for render in renders])
if gui:
time.sleep(100)
renderer.disconnect()
@@ -141,24 +161,33 @@ def make_scene_renderings(objects, cameras, urdf_ds_name, distance=1.5, theta=np
def make_colormaps(labels):
colors_hex = sns.color_palette(n_colors=len(labels)).as_hex()
- colormap_hex = {label: color for label, color in zip(labels, colors_hex)}
- colormap_rgb = {k: [int(h[1:][i:i+2], 16) / 255. for i in (0, 2, 4)] + [1.0] for k, h in colormap_hex.items()}
+ colormap_hex = dict(zip(labels, colors_hex))
+ colormap_rgb = {
+ k: [int(h[1:][i : i + 2], 16) / 255.0 for i in (0, 2, 4)] + [1.0]
+ for k, h in colormap_hex.items()
+ }
return colormap_rgb, colormap_hex
def mark_inliers(cand_inputs, cand_matched):
- inliers_infos = cand_matched.infos[['scene_id', 'view_id', 'label', 'cand_id']].copy()
- inliers_infos['is_inlier'] = True
- infos = cand_inputs.infos.merge(inliers_infos, on=['scene_id', 'view_id', 'label', 'cand_id'], how='left')
- infos['is_inlier'] = infos['is_inlier'].astype(np.float)
- infos.loc[~np.isfinite(infos.loc[:, 'is_inlier'].astype(np.float)), 'is_inlier'] = 0
- infos['is_inlier'] = infos['is_inlier'].astype(np.bool)
+ inliers_infos = cand_matched.infos[
+ ["scene_id", "view_id", "label", "cand_id"]
+ ].copy()
+ inliers_infos["is_inlier"] = True
+ infos = cand_inputs.infos.merge(
+ inliers_infos,
+ on=["scene_id", "view_id", "label", "cand_id"],
+ how="left",
+ )
+ infos["is_inlier"] = infos["is_inlier"].astype(np.float)
+ infos.loc[~np.isfinite(infos.loc[:, "is_inlier"].astype(np.float)), "is_inlier"] = 0
+ infos["is_inlier"] = infos["is_inlier"].astype(np.bool)
cand_inputs.infos = infos
return cand_inputs
def render_predictions_wrt_camera(renderer, preds_with_colors, camera):
- for k in ('K', 'resolution'):
+ for k in ("K", "resolution"):
assert k in camera
camera = deepcopy(camera)
@@ -166,159 +195,214 @@ def render_predictions_wrt_camera(renderer, preds_with_colors, camera):
list_objects = []
for n in range(len(preds_with_colors)):
row = preds_with_colors.infos.iloc[n]
- obj = dict(
- name=row.label,
- color=row.color,
- TWO=preds_with_colors.poses[n].cpu().numpy(),
- )
+ obj = {
+ "name": row.label,
+ "color": row.color,
+ "TWO": preds_with_colors.poses[n].cpu().numpy(),
+ }
list_objects.append(obj)
- rgb_rendered = renderer.render_scene(list_objects, [camera])[0]['rgb']
+ rgb_rendered = renderer.render_scene(list_objects, [camera])[0]["rgb"]
return rgb_rendered
def render_gt(renderer, objects, camera, colormap_rgb):
camera = deepcopy(camera)
- TWC = camera['TWC']
+ TWC = camera["TWC"]
for obj in objects:
- obj['color'] = colormap_rgb[obj['label']]
- obj['TWO'] = np.linalg.inv(TWC) @ obj['TWO']
- camera['TWC'] = np.eye(4)
- rgb_rendered = renderer.render_scene(objects, [camera])[0]['rgb']
+ obj["color"] = colormap_rgb[obj["label"]]
+ obj["TWO"] = np.linalg.inv(TWC) @ obj["TWO"]
+ camera["TWC"] = np.eye(4)
+ rgb_rendered = renderer.render_scene(objects, [camera])[0]["rgb"]
return rgb_rendered
def add_colors_to_predictions(predictions, colormap):
- predictions.infos['color'] = predictions.infos['label'].map(colormap)
+ predictions.infos["color"] = predictions.infos["label"].map(colormap)
return predictions
-def make_cosypose_plots(scene_ds, scene_id, view_ids,
- dict_predictions, renderer,
- use_class_colors_for_3d=True,
- use_nms3d=True,
- inlier_color=(0, 1, 0, 1.0),
- outlier_color=(1.0, 0, 0, 0.3)):
+def make_cosypose_plots(
+ scene_ds,
+ scene_id,
+ view_ids,
+ dict_predictions,
+ renderer,
+ use_class_colors_for_3d=True,
+ use_nms3d=True,
+ inlier_color=(0, 1, 0, 1.0),
+ outlier_color=(1.0, 0, 0, 0.3),
+):
plotter = Plotter()
scene_ds_index = scene_ds.frame_index
- scene_ds_index['ds_idx'] = np.arange(len(scene_ds_index))
- scene_ds_index = scene_ds_index.set_index(['scene_id', 'view_id'])
+ scene_ds_index["ds_idx"] = np.arange(len(scene_ds_index))
+ scene_ds_index = scene_ds_index.set_index(["scene_id", "view_id"])
- _, _, gt_state = scene_ds[scene_ds_index.loc[(scene_id, view_ids[0]), 'ds_idx']]
- scene_labels = set([obj['label'] for obj in gt_state['objects']])
+ _, _, gt_state = scene_ds[scene_ds_index.loc[(scene_id, view_ids[0]), "ds_idx"]]
+ scene_labels = {obj["label"] for obj in gt_state["objects"]}
- preds_by_view = dict()
+ preds_by_view = {}
for view_id in view_ids:
- this_view_dict_preds = dict()
- for k in ('cand_inputs', 'cand_matched', 'ba_output'):
+ this_view_dict_preds = {}
+ for k in ("cand_inputs", "cand_matched", "ba_output"):
assert k in dict_predictions
- scene_labels = scene_labels.union(set(dict_predictions[k].infos['label'].values.tolist()))
+ scene_labels = scene_labels.union(
+ set(dict_predictions[k].infos["label"].values.tolist()),
+ )
pred_infos = dict_predictions[k].infos
- keep = np.logical_and(pred_infos['scene_id'] == scene_id,
- np.isin(pred_infos['view_id'], view_id))
+ keep = np.logical_and(
+ pred_infos["scene_id"] == scene_id,
+ np.isin(pred_infos["view_id"], view_id),
+ )
this_view_dict_preds[k] = dict_predictions[k][np.where(keep)[0]]
preds_by_view[view_id] = this_view_dict_preds
colormap_rgb, colormap_hex = make_colormaps(scene_labels)
- colormap_rgb_3d = colormap_rgb if use_class_colors_for_3d else defaultdict(lambda: (1, 1, 1, 1))
+ colormap_rgb_3d = (
+ colormap_rgb if use_class_colors_for_3d else defaultdict(lambda: (1, 1, 1, 1))
+ )
fig_array = []
for view_id in view_ids:
this_view_dict_preds = preds_by_view[view_id]
- input_rgb, _, gt_state = scene_ds[scene_ds_index.loc[(scene_id, view_id), 'ds_idx']]
+ input_rgb, _, gt_state = scene_ds[
+ scene_ds_index.loc[(scene_id, view_id), "ds_idx"]
+ ]
fig_input_im = plotter.plot_image(input_rgb)
# Detections
- detections = this_view_dict_preds['cand_inputs']
+ detections = this_view_dict_preds["cand_inputs"]
bboxes = detections.initial_bboxes
- bboxes = bboxes + torch.as_tensor(np.random.randint(30, size=((len(bboxes), 4)))).to(bboxes.dtype).to(bboxes.device)
+ bboxes = bboxes + torch.as_tensor(
+ np.random.randint(30, size=((len(bboxes), 4))),
+ ).to(bboxes.dtype).to(bboxes.device)
detections.bboxes = bboxes
detections = add_colors_to_predictions(detections, colormap_hex)
fig_detections = plotter.plot_image(input_rgb)
- fig_detections = plotter.plot_maskrcnn_bboxes(fig_detections, detections, colors=detections.infos['color'].tolist())
+ fig_detections = plotter.plot_maskrcnn_bboxes(
+ fig_detections,
+ detections,
+ colors=detections.infos["color"].tolist(),
+ )
# fig_array.append([fig_input_im, fig_detections])
# Candidates
- cand_inputs = this_view_dict_preds['cand_inputs']
- cand_matched = this_view_dict_preds['cand_matched']
+ cand_inputs = this_view_dict_preds["cand_inputs"]
+ cand_matched = this_view_dict_preds["cand_matched"]
cand_inputs = mark_inliers(cand_inputs, cand_matched)
- colors = np.array([inlier_color if is_inlier else outlier_color for is_inlier in cand_inputs.infos['is_inlier']])
- cand_inputs.infos['color'] = colors.tolist()
+ colors = np.array(
+ [
+ inlier_color if is_inlier else outlier_color
+ for is_inlier in cand_inputs.infos["is_inlier"]
+ ],
+ )
+ cand_inputs.infos["color"] = colors.tolist()
- cand_rgb_rendered = render_predictions_wrt_camera(renderer, cand_inputs, gt_state['camera'])
+ cand_rgb_rendered = render_predictions_wrt_camera(
+ renderer,
+ cand_inputs,
+ gt_state["camera"],
+ )
fig_cand = plotter.plot_overlay(input_rgb, cand_rgb_rendered)
# Scene reconstruction
- ba_outputs = this_view_dict_preds['ba_output']
+ ba_outputs = this_view_dict_preds["ba_output"]
if use_nms3d:
ba_outputs = nms3d(ba_outputs)
ba_outputs = add_colors_to_predictions(ba_outputs, colormap_rgb_3d)
- outputs_rgb_rendered = render_predictions_wrt_camera(renderer, ba_outputs, gt_state['camera'])
+ outputs_rgb_rendered = render_predictions_wrt_camera(
+ renderer,
+ ba_outputs,
+ gt_state["camera"],
+ )
fig_outputs = plotter.plot_overlay(input_rgb, outputs_rgb_rendered)
- gt_rgb_rendered = render_gt(renderer, gt_state['objects'], gt_state['camera'], colormap_rgb_3d)
+ gt_rgb_rendered = render_gt(
+ renderer,
+ gt_state["objects"],
+ gt_state["camera"],
+ colormap_rgb_3d,
+ )
fig_gt = plotter.plot_overlay(input_rgb, gt_rgb_rendered)
fig_array.append([fig_input_im, fig_detections, fig_cand, fig_outputs, fig_gt])
return fig_array
-def render_candidates(scene_ds, scene_id, view_ids,
- colormap_rgb, dict_predictions, renderer):
-
+def render_candidates(
+ scene_ds,
+ scene_id,
+ view_ids,
+ colormap_rgb,
+ dict_predictions,
+ renderer,
+):
plotter = Plotter()
scene_ds_index = scene_ds.frame_index
- scene_ds_index['ds_idx'] = np.arange(len(scene_ds_index))
- scene_ds_index = scene_ds_index.set_index(['scene_id', 'view_id'])
+ scene_ds_index["ds_idx"] = np.arange(len(scene_ds_index))
+ scene_ds_index = scene_ds_index.set_index(["scene_id", "view_id"])
- preds_by_view = dict()
+ preds_by_view = {}
for view_id in view_ids:
- this_view_dict_preds = dict()
- for k in ('cand_inputs', 'cand_matched', 'ba_output'):
+ this_view_dict_preds = {}
+ for k in ("cand_inputs", "cand_matched", "ba_output"):
assert k in dict_predictions
pred_infos = dict_predictions[k].infos
- keep = np.logical_and(pred_infos['scene_id'] == scene_id,
- np.isin(pred_infos['view_id'], view_id))
+ keep = np.logical_and(
+ pred_infos["scene_id"] == scene_id,
+ np.isin(pred_infos["view_id"], view_id),
+ )
this_view_dict_preds[k] = dict_predictions[k][np.where(keep)[0]]
preds_by_view[view_id] = this_view_dict_preds
figures_by_view = []
for view_id in view_ids:
- this_view_figures = dict()
+ this_view_figures = {}
figures_by_view.append(this_view_figures)
this_view_dict_preds = preds_by_view[view_id]
- input_rgb, _, gt_state = scene_ds[scene_ds_index.loc[(scene_id, view_id), 'ds_idx']]
+ input_rgb, _, gt_state = scene_ds[
+ scene_ds_index.loc[(scene_id, view_id), "ds_idx"]
+ ]
fig_input_im = plotter.plot_image(input_rgb)
- this_view_figures['input_im'] = fig_input_im
+ this_view_figures["input_im"] = fig_input_im
# Detections
- detections = this_view_dict_preds['cand_inputs']
+ detections = this_view_dict_preds["cand_inputs"]
bboxes = detections.initial_bboxes
- bboxes = bboxes + torch.as_tensor(np.random.randint(10, size=((len(bboxes), 4)))).to(bboxes.dtype).to(bboxes.device)
+ bboxes = bboxes + torch.as_tensor(
+ np.random.randint(10, size=((len(bboxes), 4))),
+ ).to(bboxes.dtype).to(bboxes.device)
detections.bboxes = bboxes
- this_view_figures['input_im'] = fig_input_im
+ this_view_figures["input_im"] = fig_input_im
- detections = add_colors_to_predictions(detections, lambda k: '#FFFF00')
+ detections = add_colors_to_predictions(detections, lambda k: "#FFFF00")
fig_detections = plotter.plot_image(input_rgb)
- fig_detections = plotter.plot_maskrcnn_bboxes(fig_detections, detections,
- text_auto=False,
- colors=detections.infos['color'].tolist())
- this_view_figures['detections'] = fig_detections
+ fig_detections = plotter.plot_maskrcnn_bboxes(
+ fig_detections,
+ detections,
+ text_auto=False,
+ colors=detections.infos["color"].tolist(),
+ )
+ this_view_figures["detections"] = fig_detections
detections = add_colors_to_predictions(detections, colormap_rgb)
fig_candidates = []
fig_candidates_black = []
for cand_id in range(len(detections)):
cand_pred = detections[[cand_id]]
- rgb_rendered = render_predictions_wrt_camera(renderer, cand_pred, gt_state['camera'])
+ rgb_rendered = render_predictions_wrt_camera(
+ renderer,
+ cand_pred,
+ gt_state["camera"],
+ )
fig = plotter.plot_overlay(input_rgb, rgb_rendered)
fig_candidates.append(fig)
fig = plotter.plot_image(rgb_rendered)
fig_candidates_black.append(fig)
- this_view_figures['candidates'] = fig_candidates
- this_view_figures['candidates_black'] = fig_candidates_black
+ this_view_figures["candidates"] = fig_candidates
+ this_view_figures["candidates_black"] = fig_candidates_black
return figures_by_view, preds_by_view
diff --git a/happypose/pose_estimators/cosypose/cosypose/visualization/plotter.py b/happypose/pose_estimators/cosypose/cosypose/visualization/plotter.py
index d10efb3c..2f3129a5 100644
--- a/happypose/pose_estimators/cosypose/cosypose/visualization/plotter.py
+++ b/happypose/pose_estimators/cosypose/cosypose/visualization/plotter.py
@@ -1,16 +1,16 @@
-import bokeh
-import numpy as np
-from PIL import Image
from itertools import cycle
-import torch
-import seaborn as sns
-from .bokeh_utils import plot_image, to_rgba, make_image_figure, image_figure
+import numpy as np
+import seaborn as sns
+import torch
from bokeh.models import ColumnDataSource, LabelSet
+from PIL import Image
+
+from .bokeh_utils import make_image_figure, to_rgba
class Plotter:
- source_map = dict()
+ source_map = {}
@property
def hex_colors(self):
@@ -30,13 +30,24 @@ def plot_overlay(self, rgb_input, rgb_rendered):
overlay[~mask] = rgb_input[~mask] * 0.6 + 255 * 0.4
overlay[mask] = rgb_rendered[mask] * 0.8 + 255 * 0.2
# overlay[mask] = rgb_rendered[mask] * 0.3 + rgb_input[mask] * 0.7
- f = self.plot_image(overlay, name='image')
+ f = self.plot_image(overlay, name="image")
return f
- def plot_maskrcnn_bboxes(self, f, detections, colors='red', text=None, text_auto=True, line_width=2, source_id=''):
+ def plot_maskrcnn_bboxes(
+ self,
+ f,
+ detections,
+ colors="red",
+ text=None,
+ text_auto=True,
+ line_width=2,
+ source_id="",
+ ):
boxes = detections.bboxes
if text_auto:
- text = [f'{row.label} {row.score:.2f}' for _, row in detections.infos.iterrows()]
+ text = [
+ f"{row.label} {row.score:.2f}" for _, row in detections.infos.iterrows()
+ ]
boxes = self.numpy(boxes)
xs = []
@@ -62,20 +73,32 @@ def plot_maskrcnn_bboxes(self, f, detections, colors='red', text=None, text_auto
if text is not None:
text_x.append(x1)
text_y.append(y1)
- source, new = self.get_source(f'{f.id}/{source_id}/bboxes')
+ source, new = self.get_source(f"{f.id}/{source_id}/bboxes")
if new:
- f.patches(xs='xs', ys='ys', source=source,
- line_width=line_width, color='colors', fill_alpha=0.0)
+ f.patches(
+ xs="xs",
+ ys="ys",
+ source=source,
+ line_width=line_width,
+ color="colors",
+ fill_alpha=0.0,
+ )
if text is not None:
- labelset = LabelSet(x='text_x', y='text_y', text='text',
- text_align='left', text_baseline='bottom',
- text_color='white',
- source=source, background_fill_color='colors',
- text_font_size="5pt")
+ labelset = LabelSet(
+ x="text_x",
+ y="text_y",
+ text="text",
+ text_align="left",
+ text_baseline="bottom",
+ text_color="white",
+ source=source,
+ background_fill_color="colors",
+ text_font_size="5pt",
+ )
f.add_layout(labelset)
- data = dict(xs=xs, ys=ys, colors=patch_colors)
+ data = {"xs": xs, "ys": ys, "colors": patch_colors}
if text is not None:
data.update(text_x=text_x, text_y=text_y, text=text)
source.data = data
@@ -96,25 +119,29 @@ def plot_mask_overlay(self, im, mask, th=0.9, alpha=0.8, figure=None):
new_fig = figure is None
mask = self.numpy(mask)
h, w = mask.shape
- mask_rgba = self._make_rgba_instance_segm(mask > 0.9, colors=self.colors, alpha=alpha)
+ mask_rgba = self._make_rgba_instance_segm(
+ mask > 0.9,
+ colors=self.colors,
+ alpha=alpha,
+ )
if new_fig:
figure = make_image_figure(im_size=(w, h), axes=False)
- source, new = self.get_source(f'{figure.id}/mask')
+ source, new = self.get_source(f"{figure.id}/mask")
if new:
- figure.image_rgba('rgb', x=0, y=0, dw=w, dh=h, source=source)
- figure.image_rgba('mask', x=0, y=0, dw=w, dh=h, source=source)
+ figure.image_rgba("rgb", x=0, y=0, dw=w, dh=h, source=source)
+ figure.image_rgba("mask", x=0, y=0, dw=w, dh=h, source=source)
- source.data = dict(rgb=[to_rgba(im)], mask=[to_rgba(mask_rgba)])
+ source.data = {"rgb": [to_rgba(im)], "mask": [to_rgba(mask_rgba)]}
return figure
def masks_to_instance_segm(self, masks, thresh=0.9):
masks = torch.as_tensor(masks).cpu().float()
segm = torch.zeros(masks.shape[-2:], dtype=torch.uint8)
for n, mask_n in enumerate(masks):
- m = torch.as_tensor(mask_n > thresh)
+ torch.as_tensor(mask_n > thresh)
segm[mask_n > thresh] = n + 1
return segm
@@ -126,10 +153,10 @@ def plot_predictions_overlay(self, figure, im, alpha=0.5):
rgba[..., -1] = alpha * 255
rgba[..., :3] = im
rgba[im.sum(axis=-1) == 255 * 3, -1] = 120
- source, new = self.get_source(f'{figure.id}/segm')
+ source, new = self.get_source(f"{figure.id}/segm")
if new:
- figure.image_rgba('image', x=0, y=0, dw=w, dh=h, source=source)
- source.data = dict(image=[to_rgba(rgba)])
+ figure.image_rgba("image", x=0, y=0, dw=w, dh=h, source=source)
+ source.data = {"image": [to_rgba(rgba)]}
return figure
def plot_segm_overlay(self, im, segm, alpha=0.8, figure=None):
@@ -149,16 +176,16 @@ def plot_segm_overlay(self, im, segm, alpha=0.8, figure=None):
if new_fig:
figure = make_image_figure(im_size=(w, h), axes=False)
- source, new = self.get_source(f'{figure.id}/segm')
+ source, new = self.get_source(f"{figure.id}/segm")
if new:
- figure.image_rgba('rgb', x=0, y=0, dw=w, dh=h, source=source)
- figure.image_rgba('segm', x=0, y=0, dw=w, dh=h, source=source)
+ figure.image_rgba("rgb", x=0, y=0, dw=w, dh=h, source=source)
+ figure.image_rgba("segm", x=0, y=0, dw=w, dh=h, source=source)
- source.data = dict(rgb=[to_rgba(im)], segm=[to_rgba(segm_rgba)])
+ source.data = {"rgb": [to_rgba(im)], "segm": [to_rgba(segm_rgba)]}
return figure
- def plot_image(self, im, figure=None, name='image'):
+ def plot_image(self, im, figure=None, name="image"):
im = self.numpy(im)
if im.shape[0] == 3:
im = im.transpose((1, 2, 0))
@@ -171,12 +198,12 @@ def plot_image(self, im, figure=None, name='image'):
if new_fig:
figure = make_image_figure(im_size=(w, h), axes=False)
- source, new = self.get_source(f'{figure.id}/{name}')
+ source, new = self.get_source(f"{figure.id}/{name}")
if new:
- figure.image_rgba('image', x=0, y=0, dw=w, dh=h, source=source)
+ figure.image_rgba("image", x=0, y=0, dw=w, dh=h, source=source)
- source.data = dict(image=[to_rgba(im)])
+ source.data = {"image": [to_rgba(im)]}
return figure
def numpy(self, x):
diff --git a/happypose/pose_estimators/cosypose/cosypose/visualization/singleview.py b/happypose/pose_estimators/cosypose/cosypose/visualization/singleview.py
index acb72f1e..e651489b 100644
--- a/happypose/pose_estimators/cosypose/cosypose/visualization/singleview.py
+++ b/happypose/pose_estimators/cosypose/cosypose/visualization/singleview.py
@@ -1,17 +1,21 @@
import numpy as np
-from .plotter import Plotter
+from happypose.pose_estimators.cosypose.cosypose.datasets.augmentations import (
+ CropResizeToAspectAugmentation,
+)
+from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.augmentation_wrapper import ( # noqa: E501
+ AugmentationWrapper,
+)
-from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.augmentation_wrapper import AugmentationWrapper
-from happypose.pose_estimators.cosypose.cosypose.datasets.augmentations import CropResizeToAspectAugmentation
+from .plotter import Plotter
def filter_predictions(preds, scene_id, view_id=None, th=None):
- mask = preds.infos['scene_id'] == scene_id
+ mask = preds.infos["scene_id"] == scene_id
if view_id is not None:
- mask = np.logical_and(mask, preds.infos['view_id'] == view_id)
+ mask = np.logical_and(mask, preds.infos["view_id"] == view_id)
if th is not None:
- mask = np.logical_and(mask, preds.infos['score'] >= th)
+ mask = np.logical_and(mask, preds.infos["score"] >= th)
keep_ids = np.where(mask)[0]
preds = preds[keep_ids]
return preds
@@ -24,40 +28,53 @@ def render_prediction_wrt_camera(renderer, pred, camera=None, resolution=(640, 4
list_objects = []
for n in range(len(pred)):
row = pred.infos.iloc[n]
- obj = dict(
- name=row.label,
- color=(1, 1, 1, 1),
- TWO=pred.poses[n].numpy(),
- )
+ obj = {
+ "name": row.label,
+ "color": (1, 1, 1, 1),
+ "TWO": pred.poses[n].numpy(),
+ }
list_objects.append(obj)
- rgb_rendered = renderer.render_scene(list_objects, [camera])[0]['rgb']
+ rgb_rendered = renderer.render_scene(list_objects, [camera])[0]["rgb"]
return rgb_rendered
-def make_singleview_prediction_plots(scene_ds, renderer, predictions, detections=None, resolution=(640, 480)):
+def make_singleview_prediction_plots(
+ scene_ds,
+ renderer,
+ predictions,
+ detections=None,
+ resolution=(640, 480),
+):
plotter = Plotter()
- scene_id, view_id = np.unique(predictions.infos['scene_id']).item(), np.unique(predictions.infos['view_id']).item()
+ scene_id, view_id = (
+ np.unique(predictions.infos["scene_id"]).item(),
+ np.unique(predictions.infos["view_id"]).item(),
+ )
scene_ds_index = scene_ds.frame_index
- scene_ds_index['ds_idx'] = np.arange(len(scene_ds_index))
- scene_ds_index = scene_ds_index.set_index(['scene_id', 'view_id'])
- idx = scene_ds_index.loc[(scene_id, view_id), 'ds_idx']
+ scene_ds_index["ds_idx"] = np.arange(len(scene_ds_index))
+ scene_ds_index = scene_ds_index.set_index(["scene_id", "view_id"])
+ idx = scene_ds_index.loc[(scene_id, view_id), "ds_idx"]
augmentation = CropResizeToAspectAugmentation(resize=resolution)
scene_ds = AugmentationWrapper(scene_ds, augmentation)
rgb_input, mask, state = scene_ds[idx]
- figures = dict()
+ figures = {}
- figures['input_im'] = plotter.plot_image(rgb_input)
+ figures["input_im"] = plotter.plot_image(rgb_input)
if detections is not None:
fig_dets = plotter.plot_image(rgb_input)
fig_dets = plotter.plot_maskrcnn_bboxes(fig_dets, detections)
- figures['detections'] = fig_dets
-
- pred_rendered = render_prediction_wrt_camera(renderer, predictions, camera=state['camera'])
- figures['pred_rendered'] = plotter.plot_image(pred_rendered)
- figures['pred_overlay'] = plotter.plot_overlay(rgb_input, pred_rendered)
+ figures["detections"] = fig_dets
+
+ pred_rendered = render_prediction_wrt_camera(
+ renderer,
+ predictions,
+ camera=state["camera"],
+ )
+ figures["pred_rendered"] = plotter.plot_image(pred_rendered)
+ figures["pred_overlay"] = plotter.plot_overlay(rgb_input, pred_rendered)
return figures
diff --git a/happypose/pose_estimators/cosypose/cosypose_demos/.gitignore b/happypose/pose_estimators/cosypose/cosypose_demos/.gitignore
index ba0430d2..c18dd8d8 100644
--- a/happypose/pose_estimators/cosypose/cosypose_demos/.gitignore
+++ b/happypose/pose_estimators/cosypose/cosypose_demos/.gitignore
@@ -1 +1 @@
-__pycache__/
\ No newline at end of file
+__pycache__/
diff --git a/happypose/pose_estimators/cosypose/data/assets/camera/camera_mesh.obj b/happypose/pose_estimators/cosypose/data/assets/camera/camera_mesh.obj
index 0103de01..a1f48ea6 100644
--- a/happypose/pose_estimators/cosypose/data/assets/camera/camera_mesh.obj
+++ b/happypose/pose_estimators/cosypose/data/assets/camera/camera_mesh.obj
@@ -41,4 +41,4 @@ f 6//6 2//2 8//8
f 8//8 2//2 4//4
# 12 faces, 0 coords texture
-# End of File
\ No newline at end of file
+# End of File
diff --git a/happypose/pose_estimators/cosypose/data/assets/camera/cube.obj b/happypose/pose_estimators/cosypose/data/assets/camera/cube.obj
index c4d834c6..ef06c983 100644
--- a/happypose/pose_estimators/cosypose/data/assets/camera/cube.obj
+++ b/happypose/pose_estimators/cosypose/data/assets/camera/cube.obj
@@ -1,8 +1,8 @@
# cube.obj
#
-
+
g cube
-
+
v 0.0 0.0 0.0
v 0.0 0.0 1.0
v 0.0 1.0 0.0
@@ -18,16 +18,16 @@ vn 0.0 1.0 0.0
vn 0.0 -1.0 0.0
vn 1.0 0.0 0.0
vn -1.0 0.0 0.0
-
+
f 1//2 7//2 5//2
-f 1//2 3//2 7//2
-f 1//6 4//6 3//6
-f 1//6 2//6 4//6
-f 3//3 8//3 7//3
-f 3//3 4//3 8//3
-f 5//5 7//5 8//5
-f 5//5 8//5 6//5
-f 1//4 5//4 6//4
-f 1//4 6//4 2//4
-f 2//1 6//1 8//1
-f 2//1 8//1 4//1
+f 1//2 3//2 7//2
+f 1//6 4//6 3//6
+f 1//6 2//6 4//6
+f 3//3 8//3 7//3
+f 3//3 4//3 8//3
+f 5//5 7//5 8//5
+f 5//5 8//5 6//5
+f 1//4 5//4 6//4
+f 1//4 6//4 2//4
+f 2//1 6//1 8//1
+f 2//1 8//1 4//1
diff --git a/happypose/pose_estimators/cosypose/data/assets/camera/model.urdf b/happypose/pose_estimators/cosypose/data/assets/camera/model.urdf
index f1538efc..d7a25b0b 100644
--- a/happypose/pose_estimators/cosypose/data/assets/camera/model.urdf
+++ b/happypose/pose_estimators/cosypose/data/assets/camera/model.urdf
@@ -29,4 +29,3 @@
-
diff --git a/happypose/pose_estimators/cosypose/data/assets/camera/model.urdf.tmp b/happypose/pose_estimators/cosypose/data/assets/camera/model.urdf.tmp
deleted file mode 100644
index 24ea051d..00000000
--- a/happypose/pose_estimators/cosypose/data/assets/camera/model.urdf.tmp
+++ /dev/null
@@ -1,20 +0,0 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/happypose/pose_estimators/cosypose/data/assets/cube/model.urdf b/happypose/pose_estimators/cosypose/data/assets/cube/model.urdf
old mode 100755
new mode 100644
index f2333cf5..cf033e52
--- a/happypose/pose_estimators/cosypose/data/assets/cube/model.urdf
+++ b/happypose/pose_estimators/cosypose/data/assets/cube/model.urdf
@@ -17,6 +17,5 @@
-
+
-
diff --git a/happypose/pose_estimators/cosypose/data/assets/cube/model_vhacd.obj b/happypose/pose_estimators/cosypose/data/assets/cube/model_vhacd.obj
old mode 100755
new mode 100644
index 59069742..59b9aa4c
--- a/happypose/pose_estimators/cosypose/data/assets/cube/model_vhacd.obj
+++ b/happypose/pose_estimators/cosypose/data/assets/cube/model_vhacd.obj
@@ -7,15 +7,15 @@ v -0.500000 0.500000 0.500000
v 0.500000 -0.500000 -0.500000
v -0.500000 -0.500000 0.500000
v -0.500000 0.500000 -0.500000
-f 5 4 8
-f 2 3 4
-f 3 2 5
-f 2 4 5
-f 3 1 6
-f 4 3 6
-f 1 4 6
-f 1 3 7
-f 5 1 7
-f 3 5 7
-f 4 1 8
-f 1 5 8
+f 5 4 8
+f 2 3 4
+f 3 2 5
+f 2 4 5
+f 3 1 6
+f 4 3 6
+f 1 4 6
+f 1 3 7
+f 5 1 7
+f 3 5 7
+f 4 1 8
+f 1 5 8
diff --git a/happypose/pose_estimators/cosypose/data/assets/cube/part0.obj b/happypose/pose_estimators/cosypose/data/assets/cube/part0.obj
old mode 100755
new mode 100644
diff --git a/happypose/pose_estimators/cosypose/data/assets/plane/plane.urdf b/happypose/pose_estimators/cosypose/data/assets/plane/plane.urdf
index 525b5ac0..0697e324 100644
--- a/happypose/pose_estimators/cosypose/data/assets/plane/plane.urdf
+++ b/happypose/pose_estimators/cosypose/data/assets/plane/plane.urdf
@@ -26,4 +26,3 @@
-
diff --git a/happypose/pose_estimators/cosypose/data/assets/sphere/sphere2red.urdf b/happypose/pose_estimators/cosypose/data/assets/sphere/sphere2red.urdf
index e7f74fe3..86e0b135 100644
--- a/happypose/pose_estimators/cosypose/data/assets/sphere/sphere2red.urdf
+++ b/happypose/pose_estimators/cosypose/data/assets/sphere/sphere2red.urdf
@@ -14,7 +14,7 @@
-
+
@@ -29,4 +29,3 @@
-
diff --git a/happypose/pose_estimators/cosypose/deps/bop_toolkit_challenge b/happypose/pose_estimators/cosypose/deps/bop_toolkit_challenge
deleted file mode 160000
index f114991b..00000000
--- a/happypose/pose_estimators/cosypose/deps/bop_toolkit_challenge
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit f114991b3eea186184127cddfe6a10e2d640e656
diff --git a/happypose/pose_estimators/cosypose/deps/bop_toolkit_cosypose b/happypose/pose_estimators/cosypose/deps/bop_toolkit_cosypose
deleted file mode 160000
index b1308103..00000000
--- a/happypose/pose_estimators/cosypose/deps/bop_toolkit_cosypose
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit b1308103c1e53733d8bbf9ceba2e68fa0b735042
diff --git a/happypose/pose_estimators/cosypose/deps/job-runner b/happypose/pose_estimators/cosypose/deps/job-runner
deleted file mode 160000
index eda91f46..00000000
--- a/happypose/pose_estimators/cosypose/deps/job-runner
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit eda91f467d27096ef008e992245b8a916ffe5e3d
diff --git a/happypose/pose_estimators/cosypose/pyproject.toml b/happypose/pose_estimators/cosypose/pyproject.toml
new file mode 100644
index 00000000..abf12cfc
--- /dev/null
+++ b/happypose/pose_estimators/cosypose/pyproject.toml
@@ -0,0 +1,6 @@
+[build-system]
+build-backend = "setuptools.build_meta"
+requires = [
+ "pybind11>=2.10.0",
+ "setuptools>=42"
+]
diff --git a/happypose/pose_estimators/cosypose/rclone.conf b/happypose/pose_estimators/cosypose/rclone.conf
index 790b8fc1..4f4eee36 100644
--- a/happypose/pose_estimators/cosypose/rclone.conf
+++ b/happypose/pose_estimators/cosypose/rclone.conf
@@ -3,4 +3,3 @@ type = drive
scope = drive.readonly
root_folder_id = 1JmOYbu1oqN81Dlj2lh6NCAMrC8pEdAtD
token = {"access_token":"ya29.a0AVvZVsqTr7tvlzA33tEBMQ4Egcz8hQTjwgbSjTKjh5I305YtzfsVKCYIoLOvYnAyveWfqs6GUft706C3AziXqH7vrvZs0AqQlX_AUOXMQjSDcbOHguKTo8qXge4SFrbP3sD0xuKwMSoP-1PLrfvBE5ZVYsGnnN6CLgaCgYKAYYSAQASFQGbdwaI_8qNV-1ETonlDq4Pgg7ABw0169","token_type":"Bearer","refresh_token":"1//03Y__9oDQaadTCgYIARAAGAMSNwF-L9Ir1Ekfa8SGW8WupnLR2QsdRU0Su-QXIWeqAST-mbmtg2OGWB01vblv_GNyQXLjzD9b35A","expiry":"2023-01-26T16:03:53.549850469+01:00"}
-
diff --git a/happypose/pose_estimators/cosypose/setup.py b/happypose/pose_estimators/cosypose/setup.py
index 44f2499f..d424a388 100644
--- a/happypose/pose_estimators/cosypose/setup.py
+++ b/happypose/pose_estimators/cosypose/setup.py
@@ -1,29 +1,16 @@
-import os
-from setuptools import setup, find_packages
-from torch.utils.cpp_extension import BuildExtension, CppExtension
-from os import path
+from pybind11.setup_helpers import Pybind11Extension, build_ext
+from setuptools import setup
-here = path.abspath(path.dirname(__file__))
-
-# Use correct conda compiler used to build pytorch
-os.environ['CXX'] = os.environ.get('GXX', '')
+ext_modules = [
+ Pybind11Extension("cosypose_cext", ["cosypose/csrc/cosypose_cext.cpp"]),
+]
setup(
- name='cosypose',
- version='1.0.0',
- description='CosyPose',
- packages=find_packages(),
- ext_modules=[
- CppExtension(
- name='cosypose_cext',
- sources=[
- 'cosypose/csrc/cosypose_cext.cpp'
- ],
- extra_compile_args=['-O3'],
- verbose=True
- )
- ],
- cmdclass={
- 'build_ext': BuildExtension
- }
+ name="cosypose",
+ version="1.0.0",
+ description="CosyPose",
+ ext_modules=ext_modules,
+ cmdclass={"build_ext": build_ext},
+ zip_safe=False,
+ python_requires=">=3.9",
)
diff --git a/happypose/pose_estimators/cosypose/tox.ini b/happypose/pose_estimators/cosypose/tox.ini
index 7bec2034..b00851f9 100644
--- a/happypose/pose_estimators/cosypose/tox.ini
+++ b/happypose/pose_estimators/cosypose/tox.ini
@@ -4,4 +4,4 @@ markers =
[flake8]
ignore = E231, E226, E731
-max-line-length = 120
\ No newline at end of file
+max-line-length = 120
diff --git a/happypose/pose_estimators/megapose/CLA b/happypose/pose_estimators/megapose/CLA
index c93b751c..f47f66b9 100644
--- a/happypose/pose_estimators/megapose/CLA
+++ b/happypose/pose_estimators/megapose/CLA
@@ -21,4 +21,4 @@ Disclaimer.
To the fullest extent permitted under applicable law, your Contributions are provided on an "as is" basis, without any warranties or conditions, express or implied, including, without limitation, any implied warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not required to provide support for your Contributions, except to the extent you desire to provide support.
No Obligation.
-You acknowledge that the maintainers of this project are under no obligation to use or incorporate your contributions into the project. The decision to use or incorporate your contributions into the project will be made at the sole discretion of the maintainers or their authorized delegates.
\ No newline at end of file
+You acknowledge that the maintainers of this project are under no obligation to use or incorporate your contributions into the project. The decision to use or incorporate your contributions into the project will be made at the sole discretion of the maintainers or their authorized delegates.
diff --git a/happypose/pose_estimators/megapose/LICENSE b/happypose/pose_estimators/megapose/LICENSE
index ed3b434f..6a3914c9 100644
--- a/happypose/pose_estimators/megapose/LICENSE
+++ b/happypose/pose_estimators/megapose/LICENSE
@@ -10,4 +10,4 @@ Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
-limitations under the License.
\ No newline at end of file
+limitations under the License.
diff --git a/happypose/pose_estimators/megapose/README.md b/happypose/pose_estimators/megapose/README.md
index d8b02e1e..0bbf6f2f 100644
--- a/happypose/pose_estimators/megapose/README.md
+++ b/happypose/pose_estimators/megapose/README.md
@@ -1,5 +1,5 @@
# MegaPose
-This repository contains code, models and dataset for our MegaPose paper.
+This repository contains code, models and dataset for our MegaPose paper.
Yann Labbé, Lucas Manuelli, Arsalan Mousavian, Stephen Tyree, Stan Birchfield, Jonathan Tremblay, Justin Carpentier, Mathieu Aubry, Dieter Fox, Josef Sivic. “MegaPose: 6D Pose Estimation of Novel Objects via Render & Compare.” In: CoRL 2022.
@@ -33,15 +33,15 @@ This repository contains pre-trained models for pose estimation of novel objects
## Pose estimation of novel objects
-We provide pre-trained models for 6D pose estimation of novel objects.
+We provide pre-trained models for 6D pose estimation of novel objects.
-Given as inputs:
+Given as inputs:
- an RGB image (depth can also be used but is optional),
- the intrinsic parameters of the camera,
- a mesh of the object,
- a bounding box of that object in the image,
-our approach estimates the 6D pose of the object (3D rotation + 3D translation) with respect to the camera.
+our approach estimates the 6D pose of the object (3D rotation + 3D translation) with respect to the camera.
We provide a script and an example for inference on novel objects. After installation, please see the [Inference tutorial](#inference-tutorial).
@@ -52,7 +52,7 @@ We provide the synthetic dataset we used to train MegaPose. The dataset contains
# Installation
-Once you are done with the installation, we recommend you head to the [Inference tutorial](#inference-tutorial).
+Once you are done with the installation, we recommend you head to the [Inference tutorial](#inference-tutorial).
The first step is to clone the repo and submodules:
```
@@ -84,7 +84,7 @@ pip install -e .
Click for details...
### Create a conda environment
-Creat a conda environment with `python==3.9`. We will use this conda environment to manage a small number of dependencies needed for
+Creat a conda environment with `python==3.9`. We will use this conda environment to manage a small number of dependencies needed for
```
conda env create -f conda/environment.yaml
@@ -257,7 +257,7 @@ python -m megapose.scripts.run_inference_on_example barbecue-sauce --vis-detecti
## 3. Run pose estimation and visualize results
Run inference with the following command:
```
-python -m megapose.scripts.run_inference_on_example barbecue-sauce --run-inference
+python -m megapose.scripts.run_inference_on_example barbecue-sauce --run-inference
```
by default, the model only uses the RGB input. You can use of our RGB-D megapose models using the `--model` argument. Please see our [Model Zoo](#model-zoo) for all models available.
@@ -272,7 +272,7 @@ This file contains a list of objects with their estimated poses . For each objec
[{"label": "barbecue-sauce", "TWO": [[0.5453961536730983, 0.6226545207599095, -0.43295293693197473, 0.35692612413663855], [0.10723329335451126, 0.07313819974660873, 0.45735278725624084]]}]
-Finally, you can visualize the results using:
+Finally, you can visualize the results using:
```
python -m megapose.scripts.run_inference_on_example barbecue-sauce --vis-outputs
@@ -312,7 +312,7 @@ For optimal performance, we recommend using `megapose-1.0-RGB-multi-hypothesis`
## Dataset information
The dataset is available at this [url](https://drive.google.com/drive/folders/1CXc_GG11jNVMeGr-Mb4o4iiNjYeKDkKd?usp=sharing). It is split into two datasets: `gso_1M` (Google Scanned Objects) and `shapenet_1M` (ShapeNet objects). Each dataset has 1 million images which were generated using [BlenderProc](https://github.com/DLR-RM/BlenderProc).
-Datasets are released in the [webdataset](https://github.com/webdataset/webdataset) format for high reading performance. Each dataset is split into chunks of size ~600MB containing 1000 images each.
+Datasets are released in the [webdataset](https://github.com/webdataset/webdataset) format for high reading performance. Each dataset is split into chunks of size ~600MB containing 1000 images each.
We provide the pre-processed meshes ready to be used for rendering and training in this [directory](https://drive.google.com/drive/folders/1AYxkv7jpDniOnTcMAxiWbdhPo8WBJaZG):
- `google_scanned_objects.zip`
diff --git a/happypose/pose_estimators/megapose/__init__.py b/happypose/pose_estimators/megapose/__init__.py
index a5524b94..64ee07fa 100644
--- a/happypose/pose_estimators/megapose/__init__.py
+++ b/happypose/pose_estimators/megapose/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,10 +14,8 @@
"""
-
# Standard Library
import os
-import cv2
def assign_gpu() -> None:
@@ -40,10 +37,13 @@ def assign_gpu() -> None:
os.environ["OMP_NUM_THREADS"] = "1"
if "EGL_VISIBLE_DEVICES" not in os.environ:
- os.environ['EGL_VISIBLE_DEVICES'] = '0'
+ os.environ["EGL_VISIBLE_DEVICES"] = "0"
for k in (
- "MKL_NUM_THREADS", "OMP_NUM_THREADS",
- "CUDA_VISIBLE_DEVICES", "EGL_VISIBLE_DEVICES"):
+ "MKL_NUM_THREADS",
+ "OMP_NUM_THREADS",
+ "CUDA_VISIBLE_DEVICES",
+ "EGL_VISIBLE_DEVICES",
+):
if k in os.environ:
print(f"{k}: {os.environ[k]}")
diff --git a/happypose/pose_estimators/megapose/bop_config.py b/happypose/pose_estimators/megapose/bop_config.py
index 6c6e752a..fb0d21fa 100644
--- a/happypose/pose_estimators/megapose/bop_config.py
+++ b/happypose/pose_estimators/megapose/bop_config.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,183 +16,187 @@
# Cosypose models
-BOP_CONFIG = dict()
-BOP_CONFIG["hb"] = dict(
- input_resize=(640, 480),
- urdf_ds_name="hb",
- obj_ds_name="hb",
- train_pbr_ds_name=["hb.pbr"],
- inference_ds_name=["hb.bop19"],
- test_ds_name=[],
-)
-
-BOP_CONFIG["icbin"] = dict(
- input_resize=(640, 480),
- urdf_ds_name="icbin",
- obj_ds_name="icbin",
- train_pbr_ds_name=["icbin.pbr"],
- inference_ds_name=["icbin.bop19"],
- test_ds_name=["icbin.bop19"],
-)
-
-
-BOP_CONFIG["itodd"] = dict(
- input_resize=(1280, 960),
- urdf_ds_name="itodd",
- obj_ds_name="itodd",
- train_pbr_ds_name=["itodd.pbr"],
- inference_ds_name=["itodd.bop19"],
- test_ds_name=[],
- val_ds_name=["itodd.val"],
-)
-
-
-BOP_CONFIG["lmo"] = dict(
- input_resize=(640, 480),
- urdf_ds_name="lm",
- obj_ds_name="lm",
- train_pbr_ds_name=["lm.pbr"],
- train_synt_real_ds_names=[
+BOP_CONFIG = {}
+BOP_CONFIG["hb"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "hb",
+ "obj_ds_name": "hb",
+ "train_pbr_ds_name": ["hb.pbr"],
+ "inference_ds_name": ["hb.bop19"],
+ "test_ds_name": [],
+}
+
+BOP_CONFIG["icbin"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "icbin",
+ "obj_ds_name": "icbin",
+ "train_pbr_ds_name": ["icbin.pbr"],
+ "inference_ds_name": ["icbin.bop19"],
+ "test_ds_name": ["icbin.bop19"],
+}
+
+
+BOP_CONFIG["itodd"] = {
+ "input_resize": (1280, 960),
+ "urdf_ds_name": "itodd",
+ "obj_ds_name": "itodd",
+ "train_pbr_ds_name": ["itodd.pbr"],
+ "inference_ds_name": ["itodd.bop19"],
+ "test_ds_name": [],
+ "val_ds_name": ["itodd.val"],
+}
+
+
+BOP_CONFIG["lmo"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "lm",
+ "obj_ds_name": "lm",
+ "train_pbr_ds_name": ["lm.pbr"],
+ "train_synt_real_ds_names": [
("lm.pbr", 1),
],
- inference_ds_name=["lmo.bop19"],
- test_ds_name=["lmo.bop19"],
-)
-
-BOP_CONFIG["lm"] = dict(
- input_resize=(640, 480),
- urdf_ds_name="lm",
- obj_ds_name="lm",
- train_pbr_ds_name=["lm.pbr"],
- train_synt_real_ds_names=[
+ "inference_ds_name": ["lmo.bop19"],
+ "test_ds_name": ["lmo.bop19"],
+}
+
+BOP_CONFIG["lm"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "lm",
+ "obj_ds_name": "lm",
+ "train_pbr_ds_name": ["lm.pbr"],
+ "train_synt_real_ds_names": [
("lm.pbr", 1),
],
-)
-
-
-BOP_CONFIG["tless"] = dict(
- input_resize=(720, 540),
- urdf_ds_name="tless.cad",
- obj_ds_name="tless.cad",
- train_pbr_ds_name=["tless.pbr"],
- inference_ds_name=["tless.bop19"],
- test_ds_name=["tless.bop19"],
- train_synt_real_ds_names=[("tless.pbr", 4), ("tless.primesense.train", 1)],
- train_opengl_ds_names=[("tless.opengl", 1)],
- train_mysynt_ds_names=[("synthetic.tless-1M.train", 1)],
-)
-
-BOP_CONFIG["tudl"] = dict(
- input_resize=(640, 480),
- urdf_ds_name="tudl",
- obj_ds_name="tudl",
- train_pbr_ds_name=["tudl.pbr"],
- inference_ds_name=["tudl.bop19"],
- test_ds_name=["tudl.bop19"],
- train_synt_real_ds_names=[("tudl.pbr", 10), ("tudl.train.real", 1)],
- train_opengl_ds_names=[("tudl.opengl", 1)],
- train_mysynt_ds_names=[("synthetic.tudl-1M.train", 1)],
-)
-
-
-BOP_CONFIG["ycbv"] = dict(
- input_resize=(640, 480),
- urdf_ds_name="ycbv",
- obj_ds_name="ycbv",
- train_pbr_ds_name=["ycbv.pbr"],
- train_pbr_real_ds_names=[("ycbv.pbr", 1), ()],
- inference_ds_name=["ycbv.bop19"],
- test_ds_name=["ycbv.bop19"],
- train_synt_real_ds_names=[("ycbv.pbr", 20), ("ycbv.train.synt", 1), ("ycbv.train.real", 3)],
- train_opengl_ds_names=[("ycbv.opengl", 1)],
- train_mysynt_ds_names=[("synthetic.ycbv-1M.train", 1)],
-)
-
-BOP_CONFIG["ruapc"] = dict(
+}
+
+
+BOP_CONFIG["tless"] = {
+ "input_resize": (720, 540),
+ "urdf_ds_name": "tless.cad",
+ "obj_ds_name": "tless.cad",
+ "train_pbr_ds_name": ["tless.pbr"],
+ "inference_ds_name": ["tless.bop19"],
+ "test_ds_name": ["tless.bop19"],
+ "train_synt_real_ds_names": [("tless.pbr", 4), ("tless.primesense.train", 1)],
+ "train_opengl_ds_names": [("tless.opengl", 1)],
+ "train_mysynt_ds_names": [("synthetic.tless-1M.train", 1)],
+}
+
+BOP_CONFIG["tudl"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "tudl",
+ "obj_ds_name": "tudl",
+ "train_pbr_ds_name": ["tudl.pbr"],
+ "inference_ds_name": ["tudl.bop19"],
+ "test_ds_name": ["tudl.bop19"],
+ "train_synt_real_ds_names": [("tudl.pbr", 10), ("tudl.train.real", 1)],
+ "train_opengl_ds_names": [("tudl.opengl", 1)],
+ "train_mysynt_ds_names": [("synthetic.tudl-1M.train", 1)],
+}
+
+
+BOP_CONFIG["ycbv"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "ycbv",
+ "obj_ds_name": "ycbv",
+ "train_pbr_ds_name": ["ycbv.pbr"],
+ "train_pbr_real_ds_names": [("ycbv.pbr", 1), ()],
+ "inference_ds_name": ["ycbv.bop19"],
+ "test_ds_name": ["ycbv.bop19"],
+ "train_synt_real_ds_names": [
+ ("ycbv.pbr", 20),
+ ("ycbv.train.synt", 1),
+ ("ycbv.train.real", 3),
+ ],
+ "train_opengl_ds_names": [("ycbv.opengl", 1)],
+ "train_mysynt_ds_names": [("synthetic.ycbv-1M.train", 1)],
+}
+
+BOP_CONFIG["ruapc"] = {
# TODO: input resize
- input_resize=(640, 480),
- urdf_ds_name="ruapc",
- obj_ds_name="ruapc",
- train_pbr_ds_name=[],
- train_pbr_real_ds_names=[],
- inference_ds_name=["ruapc.bop19"],
- test_ds_name=["ruapc.bop19"],
-)
-
-BOP_CONFIG["tyol"] = dict(
+ "input_resize": (640, 480),
+ "urdf_ds_name": "ruapc",
+ "obj_ds_name": "ruapc",
+ "train_pbr_ds_name": [],
+ "train_pbr_real_ds_names": [],
+ "inference_ds_name": ["ruapc.bop19"],
+ "test_ds_name": ["ruapc.bop19"],
+}
+
+BOP_CONFIG["tyol"] = {
# TODO: input resize
- input_resize=(640, 480),
- urdf_ds_name="tyol",
- obj_ds_name="tyol",
- train_pbr_ds_name=[],
- train_pbr_real_ds_names=[],
- inference_ds_name=["tyol.bop19"],
- test_ds_name=["tyol.bop19"],
-)
-
-BOP_CONFIG["moped"] = dict(
- input_resize=(640, 480),
- urdf_ds_name="moped",
- obj_ds_name="moped",
- train_pbr_ds_name=[],
- train_pbr_real_ds_names=[],
- inference_ds_name=["moped"],
- test_ds_name=["moped"],
-)
-
-for k, v in BOP_CONFIG.items():
+ "input_resize": (640, 480),
+ "urdf_ds_name": "tyol",
+ "obj_ds_name": "tyol",
+ "train_pbr_ds_name": [],
+ "train_pbr_real_ds_names": [],
+ "inference_ds_name": ["tyol.bop19"],
+ "test_ds_name": ["tyol.bop19"],
+}
+
+BOP_CONFIG["moped"] = {
+ "input_resize": (640, 480),
+ "urdf_ds_name": "moped",
+ "obj_ds_name": "moped",
+ "train_pbr_ds_name": [],
+ "train_pbr_real_ds_names": [],
+ "inference_ds_name": ["moped"],
+ "test_ds_name": ["moped"],
+}
+
+for _k, v in BOP_CONFIG.items():
v["panda3d_obj_ds_name"] = v["obj_ds_name"] + ".panda3d"
-PBR_DETECTORS = dict(
- hb="detector-bop-hb-pbr--497808",
- icbin="detector-bop-icbin-pbr--947409",
- itodd="detector-bop-itodd-pbr--509908",
- lmo="detector-bop-lmo-pbr--517542",
- tless="detector-bop-tless-pbr--873074",
- tudl="detector-bop-tudl-pbr--728047",
- ycbv="detector-bop-ycbv-pbr--970850",
- hope="detector-bop-hope-pbr--15246",
-)
-
-PBR_COARSE = dict(
- hb="coarse-bop-hb-pbr--70752",
- icbin="coarse-bop-icbin-pbr--915044",
- itodd="coarse-bop-itodd-pbr--681884",
- lmo="coarse-bop-lmo-pbr--707448",
- tless="coarse-bop-tless-pbr--506801",
- tudl="coarse-bop-tudl-pbr--373484",
- ycbv="coarse-bop-ycbv-pbr--724183",
- hope="bop-hope-pbr-coarse-transnoise-zxyavg-225203",
-)
-
-PBR_REFINER = dict(
- hb="refiner-bop-hb-pbr--247731",
- icbin="refiner-bop-icbin-pbr--841882",
- itodd="refiner-bop-itodd-pbr--834427",
- lmo="refiner-bop-lmo-pbr--325214",
- tless="refiner-bop-tless-pbr--233420",
- tudl="refiner-bop-tudl-pbr--487212",
- ycbv="refiner-bop-ycbv-pbr--604090",
- hope="bop-hope-pbr-refiner--955392",
-)
-
-SYNT_REAL_DETECTORS = dict(
- tudl="detector-bop-tudl-synt+real--298779",
- tless="detector-bop-tless-synt+real--452847",
- ycbv="detector-bop-ycbv-synt+real--292971",
-)
-
-SYNT_REAL_COARSE = dict(
- tudl="coarse-bop-tudl-synt+real--610074",
- tless="coarse-bop-tless-synt+real--160982",
- ycbv="coarse-bop-ycbv-synt+real--822463",
-)
-
-SYNT_REAL_REFINER = dict(
- tudl="refiner-bop-tudl-synt+real--423239",
- tless="refiner-bop-tless-synt+real--881314",
- ycbv="refiner-bop-ycbv-synt+real--631598",
-)
+PBR_DETECTORS = {
+ "hb": "detector-bop-hb-pbr--497808",
+ "icbin": "detector-bop-icbin-pbr--947409",
+ "itodd": "detector-bop-itodd-pbr--509908",
+ "lmo": "detector-bop-lmo-pbr--517542",
+ "tless": "detector-bop-tless-pbr--873074",
+ "tudl": "detector-bop-tudl-pbr--728047",
+ "ycbv": "detector-bop-ycbv-pbr--970850",
+ "hope": "detector-bop-hope-pbr--15246",
+}
+
+PBR_COARSE = {
+ "hb": "coarse-bop-hb-pbr--70752",
+ "icbin": "coarse-bop-icbin-pbr--915044",
+ "itodd": "coarse-bop-itodd-pbr--681884",
+ "lmo": "coarse-bop-lmo-pbr--707448",
+ "tless": "coarse-bop-tless-pbr--506801",
+ "tudl": "coarse-bop-tudl-pbr--373484",
+ "ycbv": "coarse-bop-ycbv-pbr--724183",
+ "hope": "bop-hope-pbr-coarse-transnoise-zxyavg-225203",
+}
+
+PBR_REFINER = {
+ "hb": "refiner-bop-hb-pbr--247731",
+ "icbin": "refiner-bop-icbin-pbr--841882",
+ "itodd": "refiner-bop-itodd-pbr--834427",
+ "lmo": "refiner-bop-lmo-pbr--325214",
+ "tless": "refiner-bop-tless-pbr--233420",
+ "tudl": "refiner-bop-tudl-pbr--487212",
+ "ycbv": "refiner-bop-ycbv-pbr--604090",
+ "hope": "bop-hope-pbr-refiner--955392",
+}
+
+SYNT_REAL_DETECTORS = {
+ "tudl": "detector-bop-tudl-synt+real--298779",
+ "tless": "detector-bop-tless-synt+real--452847",
+ "ycbv": "detector-bop-ycbv-synt+real--292971",
+}
+
+SYNT_REAL_COARSE = {
+ "tudl": "coarse-bop-tudl-synt+real--610074",
+ "tless": "coarse-bop-tless-synt+real--160982",
+ "ycbv": "coarse-bop-ycbv-synt+real--822463",
+}
+
+SYNT_REAL_REFINER = {
+ "tudl": "refiner-bop-tudl-synt+real--423239",
+ "tless": "refiner-bop-tless-synt+real--881314",
+ "ycbv": "refiner-bop-ycbv-synt+real--631598",
+}
for k, v in PBR_COARSE.items():
if k not in SYNT_REAL_COARSE:
diff --git a/happypose/pose_estimators/megapose/config.py b/happypose/pose_estimators/megapose/config.py
index de80ac98..03244045 100644
--- a/happypose/pose_estimators/megapose/config.py
+++ b/happypose/pose_estimators/megapose/config.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,6 +16,7 @@
# Standard Library
import os
+import sys
from pathlib import Path
# Third Party
@@ -28,7 +28,9 @@
PROJECT_ROOT = Path(happypose.__file__).parent.parent
PROJECT_DIR = PROJECT_ROOT
-LOCAL_DATA_DIR = Path(os.environ.get("HAPPYPOSE_DATA_DIR", Path(PROJECT_DIR) / "local_data"))
+LOCAL_DATA_DIR = Path(
+ os.environ.get("HAPPYPOSE_DATA_DIR", Path(PROJECT_DIR) / "local_data"),
+)
BOP_DS_DIR = LOCAL_DATA_DIR / "bop_datasets"
NB_DATA_DIR = LOCAL_DATA_DIR / "notebook_data"
SHAPENET_DIR = LOCAL_DATA_DIR / "shapenetcorev2"
@@ -51,7 +53,11 @@
if not BLENDER_INSTALL_DIR.exists():
BLENDER_INSTALL_DIR = Path(os.environ["HOME"]) / BLENDER_VERSION
-PYTHON_BIN_PATH = Path(os.environ["CONDA_PREFIX"]) / "bin/python"
+PYTHON_BIN_PATH = (
+ Path(os.environ["CONDA_PREFIX"]) / "bin/python"
+ if "CONDA_PREFIX" in os.environ
+ else Path(sys.executable)
+)
BOP_PANDA3D_DS_DIR = LOCAL_DATA_DIR / "bop_datasets"
@@ -101,14 +107,12 @@
"tv_stand",
]
-SHAPENET_MODELNET_CATEGORIES = set(
- [
- "guitar",
- "bathtub,bathing tub,bath,tub",
- "bookshelf",
- "sofa,couch,lounge",
- ]
-)
+SHAPENET_MODELNET_CATEGORIES = {
+ "guitar",
+ "bathtub,bathing tub,bath,tub",
+ "bookshelf",
+ "sofa,couch,lounge",
+}
YCBV_OBJECT_NAMES = [
["obj_000001", "01_master_chef_can"],
diff --git a/happypose/pose_estimators/megapose/datasets/__init__.py b/happypose/pose_estimators/megapose/datasets/__init__.py
index 73a7b275..09aba5e2 100644
--- a/happypose/pose_estimators/megapose/datasets/__init__.py
+++ b/happypose/pose_estimators/megapose/datasets/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,4 +12,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/pose_estimators/megapose/docker/Dockerfile.megapose b/happypose/pose_estimators/megapose/docker/Dockerfile.megapose
index c5236c60..f5d5d38e 100644
--- a/happypose/pose_estimators/megapose/docker/Dockerfile.megapose
+++ b/happypose/pose_estimators/megapose/docker/Dockerfile.megapose
@@ -10,7 +10,7 @@ SHELL ["/bin/bash", "-c"]
ENV DEBIAN_FRONTEND noninteractive
-# Needed to deal with this issue
+# Needed to deal with this issue
# https://forums.developer.nvidia.com/t/the-repository-https-developer-download-nvidia-com-compute-cuda-repos-ubuntu1804-x86-64-release-does-not-have-a-release-file/175263
RUN apt-get install -y --no-install-recommends ca-certificates \
&& rm -rf /var/lib/apt/lists/* \
@@ -26,7 +26,7 @@ RUN rm -f /etc/apt/sources.list.d/nvidia-ml.list
RUN apt update && apt upgrade -y && \
- apt install -y vim tar wget htop xorg openbox bzip2 \
+ apt install -y vim tar wget htop xorg openbox bzip2 \
tar apt-utils
# Install Anaconda
@@ -59,7 +59,7 @@ RUN source $CONDA_DIR/bin/activate && \
scipy pypng h5py seaborn kornia meshcat pyarrow dt_apriltags open3d structlog \
imageio
-# Blender
+# Blender
RUN cd $HOME && \
wget https://mirrors.dotsrc.org/blender/release/Blender2.93/blender-2.93.8-linux-x64.tar.xz && \
tar -xvf blender-2.93.8-linux-x64.tar.xz && rm blender-2.93.8-linux-x64.tar.xz
diff --git a/happypose/pose_estimators/megapose/evaluation/__init__.py b/happypose/pose_estimators/megapose/evaluation/__init__.py
index 73a7b275..09aba5e2 100644
--- a/happypose/pose_estimators/megapose/evaluation/__init__.py
+++ b/happypose/pose_estimators/megapose/evaluation/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,4 +12,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/pose_estimators/megapose/evaluation/bop.py b/happypose/pose_estimators/megapose/evaluation/bop.py
index 0729e167..e1cfe076 100644
--- a/happypose/pose_estimators/megapose/evaluation/bop.py
+++ b/happypose/pose_estimators/megapose/evaluation/bop.py
@@ -15,7 +15,6 @@
"""
-
# Standard Library
import argparse
import importlib
@@ -23,21 +22,29 @@
import os
import subprocess
import sys
-import pandas as pd
from pathlib import Path
# Third Party
import numpy as np
+import pandas as pd
import torch
from tqdm import tqdm
# MegaPose
-from happypose.pose_estimators.megapose.config import BOP_TOOLKIT_DIR, LOCAL_DATA_DIR, PROJECT_DIR
+from happypose.pose_estimators.megapose.config import (
+ BOP_TOOLKIT_DIR,
+ LOCAL_DATA_DIR,
+ PROJECT_DIR,
+)
from happypose.pose_estimators.megapose.evaluation.eval_config import BOPEvalConfig
from happypose.toolbox.datasets.scene_dataset import ObjectData
from happypose.toolbox.inference.utils import make_detections_from_object_data
+from happypose.toolbox.utils.tensor_collection import (
+ PandasTensorCollection,
+ filter_top_pose_estimates,
+)
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Note we are actually using the bop_toolkit_lib that is directly conda installed
# inside the docker image. This is just to access the scripts.
@@ -46,47 +53,7 @@
DUMMY_EVAL_SCRIPT_PATH = BOP_TOOLKIT_DIR / "scripts/eval_bop19_dummy.py"
-##################################
-##################################
-import os
-
-# Official Task 4 detections (CNOS fastSAM)
-EXTERNAL_DETECTIONS_FILES = {
- "ycbv": 'cnos-fastsam_ycbv-test_f4f2127c-6f59-447c-95b3-28e1e591f1a1.json',
- "lmo": 'cnos-fastsam_lmo-test_3cb298ea-e2eb-4713-ae9e-5a7134c5da0f.json',
- "tless": 'cnos-fastsam_tless-test_8ca61cb0-4472-4f11-bce7-1362a12d396f.json',
- "tudl": 'cnos-fastsam_tudl-test_c48a2a95-1b41-4a51-9920-a667cb3d7149.json',
- "icbin": 'cnos-fastsam_icbin-test_f21a9faf-7ef2-4325-885f-f4b6460f4432.json',
- "itodd": 'cnos-fastsam_itodd-test_df32d45b-301c-4fc9-8769-797904dd9325.json',
- "hb": 'cnos-fastsam_hb-test_db836947-020a-45bd-8ec5-c95560b68011.json',
-}
-
-
-# # Official Task 1 detections (gdrnppdet-pbrreal)
-# EXTERNAL_DETECTIONS_FILES = {
-# "ycbv": 'gdrnppdet-pbrreal_ycbv-test_abe6c5f1-cb26-4bbd-addc-bb76dd722a96.json',
-# "lmo": 'gdrnppdet-pbrreal_lmo-test_202a2f15-cbd0-49df-90de-650428c6d157.json',
-# "tless": 'gdrnppdet-pbrreal_tless-test_e112ecb4-7f56-4107-8a21-945bc7661267.json',
-# "tudl": 'gdrnppdet-pbrreal_tudl-test_66fd26f1-bebf-493b-a42a-d71e8d10c479.json',
-# "icbin": 'gdrnppdet-pbrreal_icbin-test_a46668ed-f76b-40ca-9954-708b198c2ab0.json',
-# "itodd": 'gdrnppdet-pbrreal_itodd-test_9559c160-9507-4d09-94a5-ef0d6e8f22ce.json',
-# "hb": 'gdrnppdet-pbrreal_hb-test_94485f5a-98ea-48f1-9472-06f4ceecad41.json',
-# }
-
-
-EXTERNAL_DETECTIONS_DIR = os.environ.get('EXTERNAL_DETECTIONS_DIR')
-assert(EXTERNAL_DETECTIONS_DIR is not None)
-EXTERNAL_DETECTIONS_DIR = Path(EXTERNAL_DETECTIONS_DIR)
-
-CNOS_SUBMISSION_PATHS = {ds_name: EXTERNAL_DETECTIONS_DIR / fname for ds_name, fname in EXTERNAL_DETECTIONS_FILES.items()}
-# Check if all paths exist
-assert( sum(p.exists() for p in CNOS_SUBMISSION_PATHS.values()) == len(EXTERNAL_DETECTIONS_FILES))
-##################################
-##################################
-
-
# Third Party
-import bop_toolkit_lib
from bop_toolkit_lib import inout # noqa
@@ -142,11 +109,13 @@ def convert_results_to_coco(results_path, out_json_path, detection_method):
def convert_results_to_bop(
- results_path: Path, out_csv_path: Path, method: str,
- use_pose_score: bool = True
+ results_path: Path, out_csv_path: Path, method: str, use_pose_score: bool = True
):
predictions = torch.load(results_path)["predictions"]
predictions = predictions[method]
+ if method == "coarse":
+ predictions = get_best_coarse_predictions(predictions)
+
print("Predictions from:", results_path)
print("Method:", method)
print("Number of predictions: ", len(predictions))
@@ -181,16 +150,32 @@ def convert_results_to_bop(
inout.save_bop_results(out_csv_path, preds)
return out_csv_path
+
+def get_best_coarse_predictions(coarse_preds: PandasTensorCollection):
+ group_cols = ["scene_id", "view_id", "label", "instance_id"]
+ coarse_preds = filter_top_pose_estimates(
+ coarse_preds,
+ top_K=1,
+ group_cols=group_cols,
+ filter_field="coarse_score",
+ ascending=False,
+ )
+ coarse_preds.infos = coarse_preds.infos.rename(
+ columns={"coarse_score": "pose_score"}
+ )
+ return coarse_preds
+
+
def _run_bop_evaluation(filename, eval_dir, eval_detection=False, dummy=False):
myenv = os.environ.copy()
myenv["PYTHONPATH"] = BOP_TOOLKIT_DIR.as_posix()
- ld_library_path = os.environ['LD_LIBRARY_PATH']
- conda_prefix = os.environ['CONDA_PREFIX']
- myenv["LD_LIBRARY_PATH"] = f'{conda_prefix}/lib:{ld_library_path}'
+ ld_library_path = os.environ["LD_LIBRARY_PATH"]
+ conda_prefix = os.environ["CONDA_PREFIX"]
+ myenv["LD_LIBRARY_PATH"] = f"{conda_prefix}/lib:{ld_library_path}"
myenv["BOP_DATASETS_PATH"] = str(LOCAL_DATA_DIR / "bop_datasets")
myenv["BOP_RESULTS_PATH"] = str(eval_dir)
myenv["BOP_EVAL_PATH"] = str(eval_dir)
- renderer_type = 'vispy' # other options: 'cpp', 'python'
+ renderer_type = "vispy" # other options: 'cpp', 'python'
if dummy:
cmd = [
"python",
@@ -241,11 +226,15 @@ def run_evaluation(cfg: BOPEvalConfig) -> None:
csv_path = eval_dir / f"{method}_{cfg.dataset.split('.')[0]}-{cfg.split}.csv"
# pose scores give better AR scores in general
- convert_results_to_bop(results_path, csv_path, cfg.method, use_pose_score=cfg.use_post_score)
+ convert_results_to_bop(
+ results_path, csv_path, cfg.method, use_pose_score=cfg.use_post_score
+ )
if not cfg.convert_only:
_run_bop_evaluation(csv_path, cfg.eval_dir, eval_detection=False)
- scores_pose_path = eval_dir / csv_path.with_suffix("").name / "scores_bop19.json"
+ scores_pose_path = (
+ eval_dir / csv_path.with_suffix("").name / "scores_bop19.json"
+ )
scores_detection_path = None
if cfg.detection_method is not None:
@@ -262,92 +251,112 @@ def run_evaluation(cfg: BOPEvalConfig) -> None:
return scores_pose_path, scores_detection_path
-def load_sam_predictions(ds_dir_name, scene_ds_dir):
- ds_name = ds_dir_name
- detections_path = CNOS_SUBMISSION_PATHS[ds_name]
+
+def load_external_detections(scene_ds_dir: Path):
"""
- # dets_lst: list of dictionary, each element = detection of one object in an image
- $ df_all_dets[0].keys()
- > ['scene_id', 'image_id', 'category_id', 'bbox', 'score', 'time', 'segmentation']
- - For the evaluation of Megapose, we only need the 'scene_id', 'image_id', 'category_id', 'score', 'time' and 'bbox'
- - We also need need to change the format of bounding boxes as explained below
+ Loads external detections
"""
+ ds_name = scene_ds_dir.name
+
+ bop_detections_paths = get_external_detections_paths()
+ detections_path = bop_detections_paths[ds_name]
+
dets_lst = []
for det in json.loads(detections_path.read_text()):
- # We don't need the segmentation mask (not always present in the submissions)
- if 'segmentation' in det:
- del det['segmentation']
- # Bounding box formats:
- # - BOP format: [xmin, ymin, width, height]
- # - Megapose expects: [xmin, ymin, xmax, ymax]
- x, y, w, h = det['bbox']
- det['bbox'] = [float(v) for v in [x, y, x+w, y+h]]
- det['bbox_modal'] = det['bbox']
-
- # HACK: object models are same in lm and lmo -> obj labels start with 'lm'
- if ds_name == 'lmo':
- ds_name = 'lm'
-
- det['label'] = '{}-obj_{}'.format(ds_name, str(det["category_id"]).zfill(6))
-
+ det = format_det_bop2megapose(det, ds_name)
dets_lst.append(det)
df_all_dets = pd.DataFrame.from_records(dets_lst)
-
df_targets = pd.read_json(scene_ds_dir / "test_targets_bop19.json")
-
return df_all_dets, df_targets
-def get_sam_detections(data, df_all_dets, df_targets, dt_det):
- # We assume a unique image ("view") associated with a unique scene_id is
- im_info = data['im_infos'][0]
- scene_id, view_id = im_info['scene_id'], im_info['view_id']
-
- df_dets_scene_img = df_all_dets.loc[(df_all_dets['scene_id'] == scene_id) & (df_all_dets['image_id'] == view_id)]
- df_targets_scene_img = df_targets[(df_targets['scene_id'] == scene_id) & (df_targets['im_id'] == view_id)]
-
- dt_det += df_dets_scene_img.time.iloc[0]
-
- #################
- # Filter detections based on 2 criteria
- # - 1) Localization 6D task: we can assume that we know which object category and how many instances
- # are present in the image
- obj_ids = df_targets_scene_img.obj_id.to_list()
- df_dets_scene_img_obj_filt = df_dets_scene_img[df_dets_scene_img['category_id'].isin(obj_ids)]
- # In case none of the detections category ids match the ones present in the scene,
- # keep only one detection to avoid downstream error
- if len(df_dets_scene_img_obj_filt) > 0:
- df_dets_scene_img = df_dets_scene_img_obj_filt
- else:
- df_dets_scene_img = df_dets_scene_img[:1]
- # TODO: retain only corresponding inst_count number for each detection category_id
+def get_external_detections_paths():
+ EXTERNAL_DETECTIONS_DIR = os.environ.get("EXTERNAL_DETECTIONS_DIR")
+ assert EXTERNAL_DETECTIONS_DIR is not None
+ EXTERNAL_DETECTIONS_DIR = Path(EXTERNAL_DETECTIONS_DIR)
+
+ files_name_path = EXTERNAL_DETECTIONS_DIR / "bop_detections_filenames.json"
+ try:
+ bop_detections_filenames = json.loads(files_name_path.read_text())
+ except json.decoder.JSONDecodeError as e:
+ print("Check json formatting {files_name_path.as_posix()}")
+ raise e
+ bop_detections_paths = {
+ ds_name: EXTERNAL_DETECTIONS_DIR / fname
+ for ds_name, fname in bop_detections_filenames.items()
+ }
+
+ return bop_detections_paths
+
- # - 2) Retain detections with best cnos scores (kind of redundant with finalized 1) )
- # based on expected number of objects in the scene (from groundtruth)
- nb_gt_dets = df_targets_scene_img.inst_count.sum()
-
- # TODO: put that as a parameter somewhere?
- MARGIN = 1 # if 0, some images will have no detections
- K_MULT = 1
- nb_det = K_MULT*nb_gt_dets + MARGIN
- df_dets_scene_img = df_dets_scene_img.sort_values('score', ascending=False).head(nb_det)
- #################
+def format_det_bop2megapose(det, ds_name):
+ # Segmentation mask not needed
+ if "segmentation" in det:
+ del det["segmentation"]
+ # Bounding box formats:
+ # - BOP format: [xmin, ymin, width, height]
+ # - Megapose expects: [xmin, ymin, xmax, ymax]
+ x, y, w, h = det["bbox"]
+ x1, y1, x2, y2 = x, y, x + w, y + h
+ det["bbox"] = [float(v) for v in [x1, y1, x2, y2]]
+ det["bbox_modal"] = det["bbox"]
+
+ # HACK: object models are same in lm and lmo
+ # -> lmo obj labels actually start with 'lm'
+ if ds_name == "lmo":
+ ds_name = "lm"
+
+ det["label"] = "{}-obj_{}".format(ds_name, str(det["category_id"]).zfill(6))
+
+ return det
+
+
+def filter_detections_scene_view(scene_id, view_id, df_all_dets, df_targets):
+ """
+ Retrieve detections of scene/view id pair and filter using bop targets.
+ """
+ df_dets_scene_img = df_all_dets.loc[
+ (df_all_dets["scene_id"] == scene_id) & (df_all_dets["image_id"] == view_id)
+ ]
+ df_targets_scene_img = df_targets[
+ (df_targets["scene_id"] == scene_id) & (df_targets["im_id"] == view_id)
+ ]
- lst_dets_scene_img = df_dets_scene_img.to_dict('records')
+ df_dets_scene_img = keep_best_detections(df_dets_scene_img, df_targets_scene_img)
- if len(lst_dets_scene_img) == 0:
- raise(ValueError('lst_dets_scene_img empty!: ', f'scene_id: {scene_id}, image_id/view_id: {view_id}'))
+ # Keep only best detections for objects ("targets") given in bop target file
+ lst_dets_scene_img = df_dets_scene_img.to_dict("records")
- # Do not forget the scores that are not present in object data
- scores = []
- list_object_data = []
+ # Do not forget the scores that are not present in object img_data
+ scores, list_object_data = [], []
for det in lst_dets_scene_img:
list_object_data.append(ObjectData.from_json(det))
- scores.append(det['score'])
- sam_detections = make_detections_from_object_data(list_object_data).to(device)
- sam_detections.infos['score'] = scores
- return sam_detections
+ scores.append(det["score"])
+ detections = make_detections_from_object_data(list_object_data).to(device)
+ detections.infos["score"] = scores
+ detections.infos["time"] = df_dets_scene_img.time.iloc[0]
+ return detections
+
+
+def keep_best_detections(df_dets_scene_img, df_targets_scene_img):
+ lst_df_target = []
+ nb_targets = len(df_targets_scene_img)
+ for it in range(nb_targets):
+ target = df_targets_scene_img.iloc[it]
+ n_best = target.inst_count
+ df_filt_target = df_dets_scene_img[
+ df_dets_scene_img["category_id"] == target.obj_id
+ ].sort_values("score", ascending=False)[:n_best]
+ if len(df_filt_target) > 0:
+ lst_df_target.append(df_filt_target)
+
+ # if missing dets, keep only one detection to avoid downstream error
+ df_dets_scene_img = (
+ pd.concat(lst_df_target) if len(lst_df_target) > 0 else df_dets_scene_img[:1]
+ )
+
+ return df_dets_scene_img
if __name__ == "__main__":
diff --git a/happypose/pose_estimators/megapose/evaluation/data_utils.py b/happypose/pose_estimators/megapose/evaluation/data_utils.py
index 08dbdbca..e1a149e9 100644
--- a/happypose/pose_estimators/megapose/evaluation/data_utils.py
+++ b/happypose/pose_estimators/megapose/evaluation/data_utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +15,7 @@
# Standard Library
-from typing import List, Optional
+from typing import Optional
# Third Party
import numpy as np
@@ -31,16 +30,18 @@
def parse_obs_data(
obs: SceneObservation,
- object_labels: Optional[List[str]] = None,
+ object_labels: Optional[list[str]] = None,
) -> PandasTensorCollection:
"""Parses object data into PandasTensorCollection.
Args:
+ ----
obs: The scene observation.
object_labels: If specified will only parse information for these
object labels.
Returns:
+ -------
PandasTensorCollection
infos: pd.DataFrame with fields ['label',
'scene_id', 'view_id', 'visib_fract']
@@ -52,23 +53,23 @@ def parse_obs_data(
masks: (optional)
"""
-
- raise ValueError("This function is deprecated.")
+ msg = "This function is deprecated."
+ raise ValueError(msg)
infos = []
TWO = []
bboxes = []
masks = []
TWC = torch.as_tensor(obs.camera_data.TWC.matrix).float()
- for n, obj_data in enumerate(obs.object_datas):
+ for _n, obj_data in enumerate(obs.object_datas):
if object_labels is not None and obj_data.label not in object_labels:
continue
- info = dict(
- label=obj_data.label,
- scene_id=obs.infos.scene_id,
- view_id=obs.infos.view_id,
- visib_fract=getattr(obj_data, "visib_fract", 1),
- )
+ info = {
+ "label": obj_data.label,
+ "scene_id": obs.infos.scene_id,
+ "view_id": obs.infos.view_id,
+ "visib_fract": getattr(obj_data, "visib_fract", 1),
+ }
infos.append(info)
TWO.append(torch.tensor(obj_data.TWO.matrix).float())
bboxes.append(torch.tensor(obj_data.bbox_modal).float())
diff --git a/happypose/pose_estimators/megapose/evaluation/eval_config.py b/happypose/pose_estimators/megapose/evaluation/eval_config.py
index 18b628e5..9aba8b1e 100644
--- a/happypose/pose_estimators/megapose/evaluation/eval_config.py
+++ b/happypose/pose_estimators/megapose/evaluation/eval_config.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +16,7 @@
# Standard Library
from dataclasses import dataclass
-from typing import List, Optional
+from typing import Optional
# MegaPose
from happypose.pose_estimators.megapose.inference.types import InferenceConfig
@@ -41,14 +40,15 @@ class HardwareConfig:
@dataclass
class EvalConfig:
- """Eval Config
+ """Eval Config.
Two options for creating an eval configuration:
1. Create it manually, and set `run_id`.
2. If `run_id` is None, then use `config_id`, `run_comment`and
`run_postfix` to create a `run_id`
- In 2., the parameters of the config are set-up using the function `update_cfg_with_config_id`.
+ In 2., the parameters of the config are set-up using the function
+ `update_cfg_with_config_id`.
"""
# Network
@@ -84,17 +84,16 @@ class EvalConfig:
@dataclass
class FullEvalConfig(EvalConfig):
-
# Full eval
- detection_coarse_types: Optional[List] = None
- ds_names: Optional[List[str]] = None
+ detection_coarse_types: Optional[list] = None
+ ds_names: Optional[list[str]] = None
run_bop_eval: bool = True
- modelnet_categories: Optional[List[str]] = None
+ eval_coarse_also: bool = False
+ convert_only: bool = False
@dataclass
class BOPEvalConfig:
-
results_path: str
dataset: str
split: str
diff --git a/happypose/pose_estimators/megapose/evaluation/evaluation.py b/happypose/pose_estimators/megapose/evaluation/evaluation.py
index 1c010525..0d7cb155 100644
--- a/happypose/pose_estimators/megapose/evaluation/evaluation.py
+++ b/happypose/pose_estimators/megapose/evaluation/evaluation.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +16,7 @@
# Standard Library
from pathlib import Path
-from typing import Any, Dict, Optional
+from typing import Any, Optional
# Third Party
import torch
@@ -25,13 +24,12 @@
# MegaPose
import happypose
+
# import happypose.pose_estimators.megapose as megapose
import happypose.pose_estimators.megapose.evaluation.evaluation_runner
import happypose.toolbox.datasets.datasets_cfg
import happypose.toolbox.inference.utils
-from happypose.pose_estimators.megapose.evaluation.eval_config import (
- EvalConfig,
-)
+from happypose.pose_estimators.megapose.evaluation.eval_config import EvalConfig
from happypose.pose_estimators.megapose.evaluation.evaluation_runner import (
EvaluationRunner,
)
@@ -41,20 +39,11 @@
from happypose.pose_estimators.megapose.evaluation.prediction_runner import (
PredictionRunner,
)
-from happypose.pose_estimators.megapose.evaluation.runner_utils import (
- format_results,
-)
-from happypose.pose_estimators.megapose.inference.depth_refiner import (
- DepthRefiner,
-)
-from happypose.pose_estimators.megapose.inference.icp_refiner import (
- ICPRefiner,
-)
-from happypose.pose_estimators.megapose.inference.pose_estimator import (
- PoseEstimator,
-)
+from happypose.pose_estimators.megapose.evaluation.runner_utils import format_results
+from happypose.pose_estimators.megapose.inference.depth_refiner import DepthRefiner
+from happypose.pose_estimators.megapose.inference.icp_refiner import ICPRefiner
+from happypose.pose_estimators.megapose.inference.pose_estimator import PoseEstimator
from happypose.toolbox.datasets.datasets_cfg import make_object_dataset
-
from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
from happypose.toolbox.utils.distributed import get_rank, get_tmp_dir
from happypose.toolbox.utils.logging import get_logger
@@ -69,15 +58,18 @@ def generate_save_key(detection_type: str, coarse_estimation_type: str) -> str:
def get_save_dir(cfg: EvalConfig) -> Path:
"""Returns a save dir.
- Example
-
+ Example:
+ -------
.../ycbv.bop19/gt+SO3_grid
You must remove the '.bop19' from the name in order for the
bop_toolkit_lib to process it correctly.
"""
- save_key = generate_save_key(cfg.inference.detection_type, cfg.inference.coarse_estimation_type)
+ save_key = generate_save_key(
+ cfg.inference.detection_type,
+ cfg.inference.coarse_estimation_type,
+ )
assert cfg.save_dir is not None
assert cfg.ds_name is not None
@@ -88,7 +80,7 @@ def get_save_dir(cfg: EvalConfig) -> Path:
def run_eval(
cfg: EvalConfig,
save_dir: Optional[Path] = None,
-) -> Dict[str, Any]:
+) -> dict[str, Any]:
"""Run eval for a single setting on a single dataset.
A single setting is a (detection_type, coarse_estimation_type) such
@@ -98,12 +90,15 @@ def run_eval(
cfg.save_dir / ds_name / eval_key / results.pth.tar
- Returns:
+ Returns
+ -------
dict: If you are rank_0 process, otherwise returns None
"""
-
- save_key = generate_save_key(cfg.inference.detection_type, cfg.inference.coarse_estimation_type)
+ save_key = generate_save_key(
+ cfg.inference.detection_type,
+ cfg.inference.coarse_estimation_type,
+ )
if save_dir is None:
save_dir = get_save_dir(cfg)
@@ -112,33 +107,48 @@ def run_eval(
logger.info(f"Running eval on ds_name={cfg.ds_name} with setting={save_key}")
# Load the dataset
- ds_kwargs = dict(load_depth=True)
- scene_ds = happypose.toolbox.datasets.datasets_cfg.make_scene_dataset(cfg.ds_name, **ds_kwargs)
- urdf_ds_name, obj_ds_name = happypose.toolbox.datasets.datasets_cfg.get_obj_ds_info(cfg.ds_name)
+ ds_kwargs = {"load_depth": True}
+ scene_ds = happypose.toolbox.datasets.datasets_cfg.make_scene_dataset(
+ cfg.ds_name,
+ **ds_kwargs,
+ )
+ urdf_ds_name, obj_ds_name = happypose.toolbox.datasets.datasets_cfg.get_obj_ds_info(
+ cfg.ds_name,
+ )
# drop frames if this was specified
if cfg.n_frames is not None:
- scene_ds.frame_index = scene_ds.frame_index[: cfg.n_frames].reset_index(drop=True)
+ scene_ds.frame_index = scene_ds.frame_index[: cfg.n_frames].reset_index(
+ drop=True,
+ )
# Load detector model
if cfg.inference.detection_type == "detector":
assert cfg.detector_run_id is not None
- detector_model = happypose.toolbox.inference.utils.load_detector(cfg.detector_run_id)
+ detector_model = happypose.toolbox.inference.utils.load_detector(
+ cfg.detector_run_id,
+ )
elif cfg.inference.detection_type == "gt":
detector_model = None
- elif cfg.inference.detection_type == "sam":
+ elif cfg.inference.detection_type == "exte":
detector_model = None
else:
- raise ValueError(f"Unknown detection_type={cfg.inference.detection_type}")
+ msg = f"Unknown detection_type={cfg.inference.detection_type}"
+ raise ValueError(msg)
# Load the coarse and mrefiner models
# Needed to deal with the fact that str and Optional[str] are incompatible types.
# See https://stackoverflow.com/a/53287330
assert cfg.coarse_run_id is not None
assert cfg.refiner_run_id is not None
- # TODO (emaitre): This fuction seems to take the wrong parameters. Trying to fix this
+ # TODO (emaitre): This fuction seems to take the wrong parameters.
+ # Trying to fix this
"""
- coarse_model, refiner_model, mesh_db = happypose.toolbox.inference.utils.load_pose_models(
+ (
+ coarse_model,
+ refiner_model,
+ mesh_db,
+ ) = happypose.toolbox.inference.utils.load_pose_models(
coarse_run_id=cfg.coarse_run_id,
refiner_run_id=cfg.refiner_run_id,
n_workers=cfg.n_rendering_workers,
@@ -149,22 +159,27 @@ def run_eval(
"""
object_ds = make_object_dataset(obj_ds_name)
-
- coarse_model, refiner_model, mesh_db = happypose.toolbox.inference.utils.load_pose_models(
+ (
+ coarse_model,
+ refiner_model,
+ mesh_db,
+ ) = happypose.toolbox.inference.utils.load_pose_models(
coarse_run_id=cfg.coarse_run_id,
refiner_run_id=cfg.refiner_run_id,
object_dataset=object_ds,
force_panda3d_renderer=True,
)
-
renderer = refiner_model.renderer
if cfg.inference.run_depth_refiner:
if cfg.inference.depth_refiner == "icp":
depth_refiner: Optional[DepthRefiner] = ICPRefiner(mesh_db, renderer)
elif cfg.inference.depth_refiner == "teaserpp":
- from happypose.pose_estimators.megapose.inference.teaserpp_refiner import TeaserppRefiner
+ from happypose.pose_estimators.megapose.inference.teaserpp_refiner import (
+ TeaserppRefiner,
+ )
+
depth_refiner = TeaserppRefiner(mesh_db, renderer)
else:
depth_refiner = None
@@ -203,7 +218,7 @@ def run_eval(
# Compute eval metrics
# TODO (lmanuelli): Fix this up.
# TODO (ylabbe): Clean this.
- eval_metrics, eval_dfs = dict(), dict()
+ eval_metrics, eval_dfs = {}, {}
if not cfg.skip_evaluation:
assert "modelnet" in cfg.ds_name
object_ds = make_object_dataset(obj_ds_name)
diff --git a/happypose/pose_estimators/megapose/evaluation/evaluation_runner.py b/happypose/pose_estimators/megapose/evaluation/evaluation_runner.py
index 677d2dca..665284d7 100644
--- a/happypose/pose_estimators/megapose/evaluation/evaluation_runner.py
+++ b/happypose/pose_estimators/megapose/evaluation/evaluation_runner.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -27,17 +26,21 @@
# MegaPose
import happypose.toolbox.utils.tensor_collection as tc
-from happypose.pose_estimators.megapose.evaluation.data_utils import (
- parse_obs_data,
-)
from happypose.toolbox.datasets.samplers import DistributedSceneSampler
-from happypose.toolbox.datasets.scene_dataset import SceneDataset, SceneObservation
+from happypose.toolbox.datasets.scene_dataset import SceneObservation
from happypose.toolbox.utils.distributed import get_rank, get_tmp_dir, get_world_size
class EvaluationRunner:
- def __init__(self, scene_ds, meters, batch_size=64, cache_data=True, n_workers=4, sampler=None):
-
+ def __init__(
+ self,
+ scene_ds,
+ meters,
+ batch_size=64,
+ cache_data=True,
+ n_workers=4,
+ sampler=None,
+ ):
self.rank = get_rank()
self.world_size = get_world_size()
self.tmp_dir = get_tmp_dir()
@@ -45,7 +48,10 @@ def __init__(self, scene_ds, meters, batch_size=64, cache_data=True, n_workers=4
self.scene_ds = scene_ds
if sampler is None:
sampler = DistributedSceneSampler(
- scene_ds, num_replicas=self.world_size, rank=self.rank, shuffle=True
+ scene_ds,
+ num_replicas=self.world_size,
+ rank=self.rank,
+ shuffle=True,
)
dataloader = DataLoader(
scene_ds,
@@ -62,17 +68,17 @@ def __init__(self, scene_ds, meters, batch_size=64, cache_data=True, n_workers=4
self.meters = meters
self.meters = OrderedDict(
- {k: v for k, v in sorted(self.meters.items(), key=lambda item: item[0])}
+ dict(sorted(self.meters.items(), key=lambda item: item[0])),
)
@staticmethod
def make_empty_predictions():
- infos = dict(
- view_id=np.empty(0, dtype=np.int),
- scene_id=np.empty(0, dtype=np.int),
- label=np.empty(0, dtype=np.object),
- score=np.empty(0, dtype=np.float),
- )
+ infos = {
+ "view_id": np.empty(0, dtype=np.int),
+ "scene_id": np.empty(0, dtype=np.int),
+ "label": np.empty(0, dtype=np.object),
+ "score": np.empty(0, dtype=np.float),
+ }
poses = torch.empty(0, 4, 4, dtype=torch.float)
return tc.PandasTensorCollection(infos=pd.DataFrame(infos), poses=poses)
@@ -83,12 +89,12 @@ def evaluate(self, obj_predictions, device="cuda"):
meter.reset()
obj_predictions = obj_predictions.to(device)
for data in tqdm(self.dataloader):
- for k, meter in self.meters.items():
+ for _k, meter in self.meters.items():
meter.add(obj_predictions, data["gt_data"].to(device))
return self.summary()
def summary(self):
- summary, dfs = dict(), dict()
+ summary, dfs = {}, {}
for meter_k, meter in self.meters.items():
if len(meter.datas) > 0:
meter.gather_distributed(tmp_dir=self.tmp_dir)
diff --git a/happypose/pose_estimators/megapose/evaluation/meters/__init__.py b/happypose/pose_estimators/megapose/evaluation/meters/__init__.py
index 73a7b275..09aba5e2 100644
--- a/happypose/pose_estimators/megapose/evaluation/meters/__init__.py
+++ b/happypose/pose_estimators/megapose/evaluation/meters/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,4 +12,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/pose_estimators/megapose/evaluation/meters/base.py b/happypose/pose_estimators/megapose/evaluation/meters/base.py
index d1998982..2e8f16df 100644
--- a/happypose/pose_estimators/megapose/evaluation/meters/base.py
+++ b/happypose/pose_estimators/megapose/evaluation/meters/base.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
from collections import defaultdict
from pathlib import Path
diff --git a/happypose/pose_estimators/megapose/evaluation/meters/lf_utils.py b/happypose/pose_estimators/megapose/evaluation/meters/lf_utils.py
index c170da84..ee0e440c 100644
--- a/happypose/pose_estimators/megapose/evaluation/meters/lf_utils.py
+++ b/happypose/pose_estimators/megapose/evaluation/meters/lf_utils.py
@@ -1,27 +1,32 @@
import torch
from torch.nn import functional as F
+
def normalize(quaternion: torch.Tensor, eps: float = 1e-12) -> torch.Tensor:
r"""Normalizes a quaternion.
The quaternion should be in (x, y, z, w) format.
+
Args:
+ ----
quaternion (torch.Tensor): a tensor containing a quaternion to be
normalized. The tensor can be of shape :math:`(*, 4)`.
eps (Optional[bool]): small value to avoid division by zero.
Default: 1e-12.
+
Return:
+ ------
torch.Tensor: the normalized quaternion of shape :math:`(*, 4)`.
"""
if not isinstance(quaternion, torch.Tensor):
- raise TypeError("Input type is not a torch.Tensor. Got {}".format(
- type(quaternion)))
+ msg = f"Input type is not a torch.Tensor. Got {type(quaternion)}"
+ raise TypeError(msg)
if not quaternion.shape[-1] == 4:
- raise ValueError(
- "Input must be a tensor of shape (*, 4). Got {}".format(
- quaternion.shape))
+ msg = f"Input must be a tensor of shape (*, 4). Got {quaternion.shape}"
+ raise ValueError(msg)
return F.normalize(quaternion, p=2.0, dim=-1, eps=eps)
+
def angular_distance(q1, q2, eps: float = 1e-7):
q1 = normalize(q1)
q2 = normalize(q2)
@@ -32,4 +37,4 @@ def angular_distance(q1, q2, eps: float = 1e-7):
@torch.jit.script
def acos_safe(t, eps: float = 1e-7):
- return torch.acos(torch.clamp(t, min=-1.0 + eps, max=1.0 - eps))
\ No newline at end of file
+ return torch.acos(torch.clamp(t, min=-1.0 + eps, max=1.0 - eps))
diff --git a/happypose/pose_estimators/megapose/evaluation/meters/modelnet_meters.py b/happypose/pose_estimators/megapose/evaluation/meters/modelnet_meters.py
index c0dedf29..36dab9cf 100644
--- a/happypose/pose_estimators/megapose/evaluation/meters/modelnet_meters.py
+++ b/happypose/pose_estimators/megapose/evaluation/meters/modelnet_meters.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -33,15 +32,16 @@
from happypose.toolbox.lib3d.camera_geometry import project_points
from happypose.toolbox.lib3d.distances import dists_add
from happypose.toolbox.lib3d.transform import Transform
-from happypose.toolbox.lib3d.transform_ops import transform_pts
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class ModelNetErrorMeter(Meter):
def __init__(self, mesh_db, sample_n_points=None):
self.reset()
- self.mesh_db = mesh_db.batched(resample_n_points=sample_n_points).to(device).float()
+ self.mesh_db = (
+ mesh_db.batched(resample_n_points=sample_n_points).to(device).float()
+ )
def is_data_valid(self, data):
valid = False
@@ -52,9 +52,12 @@ def is_data_valid(self, data):
def add(self, pred_data, gt_data):
pred_data = pred_data.float()
gt_data = gt_data.float()
-
+
matches = one_to_one_matching(
- pred_data.infos, gt_data.infos, keys=("scene_id", "view_id"), allow_pred_missing=False
+ pred_data.infos,
+ gt_data.infos,
+ keys=("scene_id", "view_id"),
+ allow_pred_missing=False,
)
pred_data = pred_data[matches.pred_id]
@@ -84,7 +87,7 @@ def add(self, pred_data, gt_data):
uv_dists = torch.norm(uv_pred - uv_gt, dim=-1)
uv_avg = uv_dists.mean()
- df = xr.Dataset(matches).rename(dict(dim_0="match_id"))
+ df = xr.Dataset(matches).rename({"dim_0": "match_id"})
df["add"] = "match_id", np.array([dist_add.item()])
df["diameter"] = "match_id", np.array([diameter_1])
df["proj_error"] = "match_id", np.array([uv_avg.item()])
diff --git a/happypose/pose_estimators/megapose/evaluation/meters/utils.py b/happypose/pose_estimators/megapose/evaluation/meters/utils.py
index f586b97e..346281c1 100644
--- a/happypose/pose_estimators/megapose/evaluation/meters/utils.py
+++ b/happypose/pose_estimators/megapose/evaluation/meters/utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,9 +14,7 @@
"""
-
# Standard Library
-from collections import OrderedDict
# Third Party
import numpy as np
@@ -25,7 +22,10 @@
def one_to_one_matching(
- pred_infos, gt_infos, keys=("scene_id", "view_id"), allow_pred_missing=False
+ pred_infos,
+ gt_infos,
+ keys=("scene_id", "view_id"),
+ allow_pred_missing=False,
):
keys = list(keys)
pred_infos["pred_id"] = np.arange(len(pred_infos))
@@ -36,30 +36,36 @@ def one_to_one_matching(
print("matches_gb =", matches_gb)
for v in matches_gb.values():
print("v matched = ", v)
- assert all([len(v) == 1 for v in matches_gb.values()])
+ assert all(len(v) == 1 for v in matches_gb.values())
if not allow_pred_missing:
assert len(matches) == len(gt_infos)
return matches
-def add_inst_num(infos, group_keys=["scene_id", "view_id", "label"], key="pred_inst_num"):
-
+def add_inst_num(
+ infos,
+ group_keys=["scene_id", "view_id", "label"],
+ key="pred_inst_num",
+):
inst_num = np.empty(len(infos), dtype=int)
- for group_name, group_ids in infos.groupby(group_keys).groups.items():
+ for _group_name, group_ids in infos.groupby(group_keys).groups.items():
inst_num[group_ids.values] = np.arange(len(group_ids))
infos[key] = inst_num
return infos
def get_top_n_ids(
- infos, group_keys=("scene_id", "view_id", "label"), top_key="score", n_top=-1, targets=None
+ infos,
+ group_keys=("scene_id", "view_id", "label"),
+ top_key="score",
+ n_top=-1,
+ targets=None,
):
-
infos["id_before_top_n"] = np.arange(len(infos))
group_keys = list(group_keys)
if targets is not None:
- targets_inst_count = dict()
+ targets_inst_count = {}
for k, ids in targets.groupby(group_keys).groups.items():
targets_inst_count[k] = targets.loc[ids[0], "inst_count"]
@@ -87,18 +93,24 @@ def get_top_n(group_k):
def add_valid_gt(
- gt_infos, group_keys=("scene_id", "view_id", "label"), visib_gt_min=-1, targets=None
+ gt_infos,
+ group_keys=("scene_id", "view_id", "label"),
+ visib_gt_min=-1,
+ targets=None,
):
-
if visib_gt_min > 0:
gt_infos["valid"] = gt_infos["visib_fract"] >= visib_gt_min
if targets is not None:
gt_infos["valid"] = np.logical_and(
- gt_infos["valid"], np.isin(gt_infos["label"], targets["label"])
+ gt_infos["valid"],
+ np.isin(gt_infos["label"], targets["label"]),
)
elif targets is not None:
valid_ids = get_top_n_ids(
- gt_infos, group_keys=group_keys, top_key="visib_fract", targets=targets
+ gt_infos,
+ group_keys=group_keys,
+ top_key="visib_fract",
+ targets=targets,
)
gt_infos["valid"] = False
gt_infos.loc[valid_ids, "valid"] = True
@@ -108,7 +120,10 @@ def add_valid_gt(
def get_candidate_matches(
- pred_infos, gt_infos, group_keys=["scene_id", "view_id", "label"], only_valids=True
+ pred_infos,
+ gt_infos,
+ group_keys=["scene_id", "view_id", "label"],
+ only_valids=True,
):
pred_infos["pred_id"] = np.arange(len(pred_infos))
gt_infos["gt_id"] = np.arange(len(gt_infos))
diff --git a/happypose/pose_estimators/megapose/evaluation/prediction_runner.py b/happypose/pose_estimators/megapose/evaluation/prediction_runner.py
index 4c4e0ec3..7030db98 100644
--- a/happypose/pose_estimators/megapose/evaluation/prediction_runner.py
+++ b/happypose/pose_estimators/megapose/evaluation/prediction_runner.py
@@ -16,14 +16,10 @@
# Standard Library
-import time
from collections import defaultdict
from typing import Dict, Optional
-from pathlib import Path
-
# Third Party
-import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
@@ -31,38 +27,27 @@
# MegaPose
import happypose.pose_estimators.megapose
import happypose.toolbox.utils.tensor_collection as tc
-from happypose.pose_estimators.megapose.inference.pose_estimator import (
- PoseEstimator,
+from happypose.pose_estimators.megapose.evaluation.bop import (
+ filter_detections_scene_view,
+ load_external_detections,
)
+from happypose.pose_estimators.megapose.inference.pose_estimator import PoseEstimator
from happypose.pose_estimators.megapose.inference.types import (
DetectionsType,
InferenceConfig,
ObservationTensor,
PoseEstimatesType,
)
-from happypose.pose_estimators.megapose.config import (
- BOP_DS_DIR
-)
-from happypose.pose_estimators.megapose.evaluation.bop import (
- get_sam_detections,
- load_sam_predictions
-)
-
-from happypose.pose_estimators.megapose.training.utils import CudaTimer
from happypose.toolbox.datasets.samplers import DistributedSceneSampler
-from happypose.toolbox.datasets.scene_dataset import SceneDataset, SceneObservation, ObjectData
-from happypose.toolbox.utils.distributed import get_rank, get_tmp_dir, get_world_size
-from happypose.toolbox.utils.logging import get_logger
-
+from happypose.toolbox.datasets.scene_dataset import SceneDataset, SceneObservation
# Temporary
-from happypose.toolbox.inference.utils import make_detections_from_object_data
-import pandas as pd
-import json
+from happypose.toolbox.utils.distributed import get_rank, get_tmp_dir, get_world_size
+from happypose.toolbox.utils.logging import get_logger
logger = get_logger(__name__)
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class PredictionRunner:
@@ -73,13 +58,14 @@ def __init__(
batch_size: int = 1,
n_workers: int = 4,
) -> None:
-
self.inference_cfg = inference_cfg
self.rank = get_rank()
self.world_size = get_world_size()
self.tmp_dir = get_tmp_dir()
- sampler = DistributedSceneSampler(scene_ds, num_replicas=self.world_size, rank=self.rank)
+ sampler = DistributedSceneSampler(
+ scene_ds, num_replicas=self.world_size, rank=self.rank
+ )
self.sampler = sampler
self.scene_ds = scene_ds
dataloader = DataLoader(
@@ -99,14 +85,15 @@ def run_inference_pipeline(
pose_estimator: PoseEstimator,
obs_tensor: ObservationTensor,
gt_detections: DetectionsType,
- sam_detections: DetectionsType,
+ exte_detections: DetectionsType,
initial_estimates: Optional[PoseEstimatesType] = None,
) -> Dict[str, PoseEstimatesType]:
"""Runs inference pipeline, extracts the results.
Returns: A dict with keys
- 'final': final preds
- - 'refiner/final': preds at final refiner iteration (before depth refinement)
+ - 'refiner/final': preds at final refiner iteration (before depth
+ refinement).
- 'depth_refinement': preds after depth refinement.
@@ -116,26 +103,28 @@ def run_inference_pipeline(
if self.inference_cfg.detection_type == "gt":
detections = gt_detections
run_detector = False
- elif self.inference_cfg.detection_type == "sam":
- # print("sam_detections =", sam_detections.bboxes)
- detections = sam_detections
+ elif self.inference_cfg.detection_type == "exte":
+ # print("exte_detections =", exte_detections.bboxes)
+ detections = exte_detections
run_detector = False
elif self.inference_cfg.detection_type == "detector":
detections = None
run_detector = True
else:
- raise ValueError(f"Unknown detection type {self.inference_cfg.detection_type}")
+ msg = f"Unknown detection type {self.inference_cfg.detection_type}"
+ raise ValueError(msg)
coarse_estimates = None
if self.inference_cfg.coarse_estimation_type == "external":
# TODO (ylabbe): This is hacky, clean this for modelnet eval.
coarse_estimates = initial_estimates
- coarse_estimates = happypose.toolbox.inference.utils.add_instance_id(coarse_estimates)
+ coarse_estimates = happypose.toolbox.inference.utils.add_instance_id(
+ coarse_estimates
+ )
coarse_estimates.infos["instance_id"] = 0
run_detector = False
- t = time.time()
preds, extra_data = pose_estimator.run_inference_pipeline(
obs_tensor,
detections=detections,
@@ -147,36 +136,48 @@ def run_inference_pipeline(
bsz_images=self.inference_cfg.bsz_images,
bsz_objects=self.inference_cfg.bsz_objects,
)
- elapsed = time.time() - t
# TODO (lmanuelli): Process this into a dict with keys like
# - 'refiner/iteration=1`
# - 'refiner/iteration=5`
# - `depth_refiner`
# Note: Since we support multi-hypotheses we need to potentially
- # go back and extract out the 'refiner/iteration=1`, `refiner/iteration=5` things for the ones that were actually the highest scoring at the end.
+ # go back and extract out the 'refiner/iteration=1`, `refiner/iteration=5`
+ # things for the ones that were actually the highest scoring at the end.
- all_preds = dict()
+ ref_it_str = f"refiner/iteration={self.inference_cfg.n_refiner_iterations}"
data_TCO_refiner = extra_data["refiner"]["preds"]
all_preds = {
"final": preds,
- f"refiner/iteration={self.inference_cfg.n_refiner_iterations}": data_TCO_refiner,
+ ref_it_str: data_TCO_refiner,
"refiner/final": data_TCO_refiner,
"coarse": extra_data["coarse"]["preds"],
}
+ # Only keep necessary metadata
+ del extra_data["coarse"]["data"]["TCO"]
+ all_preds_data = {
+ "coarse": extra_data["coarse"]["data"],
+ "refiner": extra_data["refiner"]["data"],
+ "scoring": extra_data["scoring"],
+ }
+
+ # If depth refinement, add it to preds and metadata
if self.inference_cfg.run_depth_refiner:
- all_preds[f"depth_refiner"] = extra_data["depth_refiner"]["preds"]
+ all_preds["depth_refiner"] = extra_data["depth_refiner"]["preds"]
+ all_preds_data["depth_refiner"] = extra_data["depth_refiner"]["data"]
- for k, v in all_preds.items():
+ # remove masks tensors as they are never used by megapose
+ for v in all_preds.values():
if "mask" in v.tensors:
- breakpoint()
v.delete_tensor("mask")
- return all_preds
+ return all_preds, all_preds_data
- def get_predictions(self, pose_estimator: PoseEstimator) -> Dict[str, PoseEstimatesType]:
+ def get_predictions(
+ self, pose_estimator: PoseEstimator
+ ) -> Dict[str, PoseEstimatesType]:
"""Runs predictions
Returns: A dict with keys
@@ -185,8 +186,6 @@ def get_predictions(self, pose_estimator: PoseEstimator) -> Dict[str, PoseEstima
- 'depth_refiner'
With the predictions at the various settings/iterations.
-
-
"""
predictions_list = defaultdict(list)
@@ -196,29 +195,29 @@ def get_predictions(self, pose_estimator: PoseEstimator) -> Dict[str, PoseEstima
# format it and store it in a dataframe that will be accessed later
######
# Temporary solution
- if self.inference_cfg.detection_type == "sam":
- df_all_dets, df_targets = load_sam_predictions(self.scene_ds.ds_dir.name, self.scene_ds.ds_dir)
+ if self.inference_cfg.detection_type == "exte":
+ df_all_dets, df_targets = load_external_detections(self.scene_ds.ds_dir)
for n, data in enumerate(tqdm(self.dataloader)):
# data is a dict
rgb = data["rgb"]
depth = data["depth"]
K = data["cameras"].K
- im_info = data['im_infos'][0]
- scene_id, view_id = im_info['scene_id'], im_info['view_id']
+ im_info = data["im_infos"][0]
+ scene_id, view_id = im_info["scene_id"], im_info["view_id"]
+
# Dirty but avoids creating error when running with real detector
- dt_det = 0
+ dt_det_exte = 0
- ######
- # Filter the dataframe according to scene id and view id
- # Transform the data in ObjectData and then Detections
- ######
# Temporary solution
- if self.inference_cfg.detection_type == "sam":
- # We assume a unique image ("view") associated with a unique scene_id is
- sam_detections = get_sam_detections(data=data, df_all_dets=df_all_dets, df_targets=df_targets, dt_det=dt_det)
+ if self.inference_cfg.detection_type == "exte":
+ exte_detections = filter_detections_scene_view(
+ scene_id, view_id, df_all_dets, df_targets
+ )
+ if len(exte_detections) > 0:
+ dt_det_exte += exte_detections.infos["time"].iloc[0]
else:
- sam_detections = None
+ exte_detections = None
gt_detections = data["gt_detections"].cuda()
initial_data = None
if data["initial_data"]:
@@ -231,30 +230,65 @@ def get_predictions(self, pose_estimator: PoseEstimator) -> Dict[str, PoseEstima
if n == 0:
with torch.no_grad():
self.run_inference_pipeline(
- pose_estimator, obs_tensor, gt_detections, sam_detections, initial_estimates=initial_data
+ pose_estimator,
+ obs_tensor,
+ gt_detections,
+ exte_detections,
+ initial_estimates=initial_data,
)
- cuda_timer = CudaTimer()
- cuda_timer.start()
with torch.no_grad():
- all_preds = self.run_inference_pipeline(
- pose_estimator, obs_tensor, gt_detections, sam_detections, initial_estimates=initial_data
+ all_preds, all_preds_data = self.run_inference_pipeline(
+ pose_estimator,
+ obs_tensor,
+ gt_detections,
+ exte_detections,
+ initial_estimates=initial_data,
)
- cuda_timer.end()
- duration = cuda_timer.elapsed()
-
- total_duration = duration + dt_det
# Add metadata to the predictions for later evaluation
- for k, v in all_preds.items():
- v.infos['time'] = total_duration
- v.infos['scene_id'] = scene_id
- v.infos['view_id'] = view_id
- predictions_list[k].append(v)
+ for pred_name, pred in all_preds.items():
+ dt_pipeline = compute_pose_est_total_time(
+ all_preds_data,
+ pred_name,
+ )
+ pred.infos["time"] = dt_det_exte + dt_pipeline
+ pred.infos["scene_id"] = scene_id
+ pred.infos["view_id"] = view_id
+ predictions_list[pred_name].append(pred)
- # Concatenate the lists of PandasTensorCollections
+ # Concatenate the lists of PandasTensorCollections
predictions = dict()
for k, v in predictions_list.items():
predictions[k] = tc.concatenate(v)
- return predictions
\ No newline at end of file
+ return predictions
+
+
+def compute_pose_est_total_time(all_preds_data: dict, pred_name: str):
+ # all_preds_data:
+ # dict_keys(
+ # ["final", "refiner/iteration=5", "refiner/final", "coarse", "coarse_filter"]
+ # ) # optionally 'depth_refiner'
+ dt_coarse = all_preds_data["coarse"]["time"]
+ dt_coarse_refiner = dt_coarse + all_preds_data["refiner"]["time"]
+ if "depth_refiner" in all_preds_data:
+ dt_coarse_refiner_depth = (
+ dt_coarse_refiner + all_preds_data["depth_refiner"]["time"]
+ )
+
+ if pred_name.startswith("coarse"):
+ return dt_coarse
+ elif pred_name.startswith("refiner"):
+ return dt_coarse_refiner
+ elif pred_name == "depth_refiner":
+ return dt_coarse_refiner_depth
+ elif pred_name == "final":
+ return (
+ dt_coarse_refiner_depth
+ if "depth_refiner" in all_preds_data
+ else dt_coarse_refiner
+ )
+ else:
+ msg = f"{pred_name} extra data not in {all_preds_data.keys()}"
+ raise ValueError(msg)
diff --git a/happypose/pose_estimators/megapose/evaluation/runner_utils.py b/happypose/pose_estimators/megapose/evaluation/runner_utils.py
index 903b1258..2e90079d 100644
--- a/happypose/pose_estimators/megapose/evaluation/runner_utils.py
+++ b/happypose/pose_estimators/megapose/evaluation/runner_utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
from collections import OrderedDict, defaultdict
@@ -30,7 +28,7 @@
def run_pred_eval(pred_runner, pred_kwargs, eval_runner, eval_preds=None):
- all_predictions = dict()
+ all_predictions = {}
for pred_prefix, pred_kwargs_n in pred_kwargs.items():
print("Prediction :", pred_prefix)
preds = pred_runner.get_predictions(**pred_kwargs_n)
@@ -38,9 +36,9 @@ def run_pred_eval(pred_runner, pred_kwargs, eval_runner, eval_preds=None):
all_predictions[f"{pred_prefix}/{preds_name}"] = preds_n
all_predictions = OrderedDict(
- {k: v for k, v in sorted(all_predictions.items(), key=lambda item: item[0])}
+ dict(sorted(all_predictions.items(), key=lambda item: item[0])),
)
- eval_metrics, eval_dfs = dict(), dict()
+ eval_metrics, eval_dfs = {}, {}
for preds_k, preds in all_predictions.items():
print("Evaluation :", preds_k)
@@ -63,7 +61,7 @@ def gather_predictions(all_predictions):
def format_results(predictions, eval_metrics, eval_dfs, print_metrics=True):
- summary = dict()
+ summary = {}
df = defaultdict(list)
summary_txt = ""
for k, v in eval_metrics.items():
@@ -79,12 +77,12 @@ def format_results(predictions, eval_metrics, eval_dfs, print_metrics=True):
logger.info(summary_txt)
df = pd.DataFrame(df)
- results = dict(
- summary=summary,
- summary_txt=summary_txt,
- predictions=predictions,
- metrics=eval_metrics,
- summary_df=df,
- dfs=eval_dfs,
- )
+ results = {
+ "summary": summary,
+ "summary_txt": summary_txt,
+ "predictions": predictions,
+ "metrics": eval_metrics,
+ "summary_df": df,
+ "dfs": eval_dfs,
+ }
return results
diff --git a/happypose/pose_estimators/megapose/evaluation/utils.py b/happypose/pose_estimators/megapose/evaluation/utils.py
index ddf71838..e56ffdad 100644
--- a/happypose/pose_estimators/megapose/evaluation/utils.py
+++ b/happypose/pose_estimators/megapose/evaluation/utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -48,13 +47,12 @@ def get_symmetry_transformations_torch(trans_list):
def compute_pose_error(T1, T2):
- """
- Args:
+ """Args:
+ ----
Two sets of poses in world frame
T1: [B,4,4]
- T2: [B,4,4]
+ T2: [B,4,4].
"""
-
trans_err = torch.linalg.norm(T1[..., :3, 3] - T2[..., :3, 3], dim=-1)
R1 = T1[..., :3, :3]
R2 = T2[..., :3, :3]
@@ -67,27 +65,25 @@ def compute_pose_error(T1, T2):
def compute_errors(preds, method, obj_dataset, max_sym_rot_step_deg=1):
- """
- Compute the errors between gt_pose and predicted pose.
+ """Compute the errors between gt_pose and predicted pose.
Args:
-
+ ----
preds: This is results['predictions'] where results is from results.pth.tar
method: The type of method we should use for evaluation
methods: str, e.g. 'gt_detections+coarse_init'
"""
-
preds_gt = preds[f"{method}/ground_truth"]
TCO_gt = preds_gt.poses.cuda() # [B,4,4]
device = TCO_gt.device
- TOC_gt = torch.linalg.inv(TCO_gt)
+ torch.linalg.inv(TCO_gt)
for key, p in preds.items():
if not key.startswith(method):
continue
- if re.search("refiner/iteration=\d*$", key) or re.search("refiner/init$", key):
+ if re.search("refiner/iteration=\\d*$", key) or re.search("refiner/init$", key):
pass
else:
continue
@@ -97,7 +93,7 @@ def compute_errors(preds, method, obj_dataset, max_sym_rot_step_deg=1):
object_labels = p.infos.label.unique()
object_labels.sort()
- obj_info_dict = dict()
+ obj_info_dict = {}
for val in obj_dataset.objects:
obj_info_dict[val["label"]] = val
@@ -109,7 +105,8 @@ def compute_errors(preds, method, obj_dataset, max_sym_rot_step_deg=1):
bop_info = obj_info["bop_info"]
max_sym_rot_step = np.deg2rad(max_sym_rot_step_deg)
trans_list = get_symmetry_transformations(
- bop_info, max_sym_disc_step=max_sym_rot_step
+ bop_info,
+ max_sym_disc_step=max_sym_rot_step,
)
syms = get_symmetry_transformations_torch(trans_list)
else:
@@ -123,7 +120,6 @@ def compute_errors(preds, method, obj_dataset, max_sym_rot_step_deg=1):
TCO_pred_obj = p.poses[idx_list].cuda()
TCO_gt_obj = TCO_gt[idx_list]
-
# Assumes symmetries don't have any offsets
pts = create_default_object_pts().to(device)
mssd_out = mssd_torch(TCO_pred_obj, TCO_gt_obj, pts, syms)
@@ -137,13 +133,11 @@ def compute_errors(preds, method, obj_dataset, max_sym_rot_step_deg=1):
p.infos.loc[idx_list, "trans_err"] = trans_err.tolist()
p.infos.loc[idx_list, "rot_err_deg"] = roterr_deg.tolist()
-
-
p_init = preds[f"{method}/refiner/init"]
for key, p in preds.items():
if not key.startswith(method):
continue
- if re.search("refiner/iteration=\d*$", key):
+ if re.search("refiner/iteration=\\d*$", key):
pass
else:
continue
@@ -155,7 +149,7 @@ def compute_errors(preds, method, obj_dataset, max_sym_rot_step_deg=1):
def create_plots(result_name):
- """Make the png figures from the"""
+ """Make the png figures from the."""
pass
@@ -178,6 +172,7 @@ def mssd_torch(T_est, T_gt, pts, syms):
Based on https://github.com/thodan/bop_toolkit/blob/master/bop_toolkit_lib/pose_error.py#L96
Args:
+ ----
T_est: [B,4,4] tensor, estimated pose
T_gt: [B,4,4] tensor, ground-truth pose
pts: [N,3] tensor, 3D model points
@@ -185,6 +180,7 @@ def mssd_torch(T_est, T_gt, pts, syms):
Returns:
+ -------
err: [B,] mssd
T_gt_sym: [B,4,4] the closest symmetry aware transform
sym: [B,4,4] symmetry transform that led to T_gt_sym
@@ -239,19 +235,20 @@ def mssd_torch(T_est, T_gt, pts, syms):
def load_zephyr_hypotheses(ds_name, device="cuda", debug=False, hypotheses_type="all"):
- """Load Zephyr ppf hypotheses (and SIFT)
+ """Load Zephyr ppf hypotheses (and SIFT).
Args:
+ ----
ds_name: str ['ycbv.bop19', 'lmo.bop19']
hypotheses_type: ['all', 'ppf', 'sift']
Returns:
+ -------
PandasTensorCollection:
poses: [N,4,4]
infos: has columns ['pose_hypothesis_id']
"""
-
assert hypotheses_type in ["ppf", "sift", "all"]
zephyr_dir = LOCAL_DATA_DIR / "external_detections/zephyr"
if ds_name == "ycbv.bop19":
@@ -259,7 +256,8 @@ def load_zephyr_hypotheses(ds_name, device="cuda", debug=False, hypotheses_type=
elif ds_name == "lmo.bop19":
fname = zephyr_dir / f"lmo_test_pose_hypotheses_{hypotheses_type}.pth"
else:
- raise ValueError(f"Unknown dataset {ds_name}")
+ msg = f"Unknown dataset {ds_name}"
+ raise ValueError(msg)
p = torch.load(fname)
p.infos = p.infos.rename(columns={"object_label": "label"})
@@ -268,7 +266,7 @@ def load_zephyr_hypotheses(ds_name, device="cuda", debug=False, hypotheses_type=
def load_ppf_hypotheses(ds_name, device="cuda", debug=False):
- """Load Zephyr ppf hypotheses
+ """Load Zephyr ppf hypotheses.
The columns of the dataframe are
@@ -282,7 +280,8 @@ def load_ppf_hypotheses(ds_name, device="cuda", debug=False):
elif ds_name == "lmo.bop19":
fname = zephyr_dir / "lmo_list_bop_test_v1.txt"
else:
- raise ValueError(f"Unknown dataset {ds_name}")
+ msg = f"Unknown dataset {ds_name}"
+ raise ValueError(msg)
df = pd.read_csv(fname, delim_whitespace=True)
@@ -341,14 +340,15 @@ def load_dtoid_detections(ds_name):
elif ds_name == "lm.bop19":
fname = dtoid_dir / "lm_preds.csv"
else:
- raise ValueError(f"Unknown dataset {ds_name}")
+ msg = f"Unknown dataset {ds_name}"
+ raise ValueError(msg)
df = pd.read_csv(fname)
def parse_image_fn(image_fn):
ds, split, scene_id, modality, ext = image_fn.split("/")
scene_id = int(scene_id)
view_id = int(ext.split(".")[0])
- return dict(scene_id=scene_id, view_id=view_id)
+ return {"scene_id": scene_id, "view_id": view_id}
x1 = df.loc[:, "x"].values
y1 = df.loc[:, "y"].values
@@ -358,7 +358,10 @@ def parse_image_fn(image_fn):
infos = pd.DataFrame([parse_image_fn(image_fn) for image_fn in df["image_fn"]])
infos.loc[:, "label"] = [f"obj_{object_id:06d}" for object_id in df["object_id"]]
infos.loc[:, "score"] = -1
- bboxes = np.concatenate([x1[:, None], y1[:, None], x2[:, None], y2[:, None]], axis=1)
+ bboxes = np.concatenate(
+ [x1[:, None], y1[:, None], x2[:, None], y2[:, None]],
+ axis=1,
+ )
bboxes = torch.tensor(bboxes).float()
ids_valids = (bboxes >= 0).all(dim=1).nonzero().flatten().tolist()
bboxes = bboxes[ids_valids]
@@ -368,21 +371,25 @@ def parse_image_fn(image_fn):
return detections
-def compute_errors_single_object(TCO_gt, TCO_pred, obj_label, obj_dataset, max_sym_rot_step_deg=1):
- """
- Compute the errors between gt_pose and predicted pose.
+def compute_errors_single_object(
+ TCO_gt,
+ TCO_pred,
+ obj_label,
+ obj_dataset,
+ max_sym_rot_step_deg=1,
+):
+ """Compute the errors between gt_pose and predicted pose.
Args:
-
+ ----
TCO_gt: [4,4] The pose you want to compute error relative to
poses: [B,4,4]
obj_dataset:
"""
-
device = TCO_pred.device
B = TCO_pred.shape[0]
- obj_info_dict = dict()
+ obj_info_dict = {}
for val in obj_dataset.objects:
obj_info_dict[val["label"]] = val
@@ -391,7 +398,10 @@ def compute_errors_single_object(TCO_gt, TCO_pred, obj_label, obj_dataset, max_s
if obj_info["is_symmetric"]:
bop_info = obj_info["bop_info"]
max_sym_rot_step = np.deg2rad(max_sym_rot_step_deg)
- trans_list = get_symmetry_transformations(bop_info, max_sym_disc_step=max_sym_rot_step)
+ trans_list = get_symmetry_transformations(
+ bop_info,
+ max_sym_disc_step=max_sym_rot_step,
+ )
syms = get_symmetry_transformations_torch(trans_list)
else:
syms = torch.eye(4, device=device).unsqueeze(0)
diff --git a/happypose/pose_estimators/megapose/inference/__init__.py b/happypose/pose_estimators/megapose/inference/__init__.py
index 73a7b275..09aba5e2 100644
--- a/happypose/pose_estimators/megapose/inference/__init__.py
+++ b/happypose/pose_estimators/megapose/inference/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,4 +12,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/pose_estimators/megapose/inference/depth_refiner.py b/happypose/pose_estimators/megapose/inference/depth_refiner.py
index a552aa49..c948d32e 100644
--- a/happypose/pose_estimators/megapose/inference/depth_refiner.py
+++ b/happypose/pose_estimators/megapose/inference/depth_refiner.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +16,7 @@
# Standard Library
from abc import ABC, abstractmethod
-from typing import Optional, Tuple
+from typing import Optional
# Third Party
import torch
@@ -34,10 +33,11 @@ def refine_poses(
masks: Optional[torch.tensor] = None,
depth: Optional[torch.tensor] = None,
K: Optional[torch.tensor] = None,
- ) -> Tuple[PoseEstimatesType, dict]:
+ ) -> tuple[PoseEstimatesType, dict]:
"""Run the depth refinement.
Args:
+ ----
predictions: len(predictions) = N, index into depth, masks, K using
the batch_im_id field.
depth: [B, H, W]
diff --git a/happypose/pose_estimators/megapose/inference/detector.py b/happypose/pose_estimators/megapose/inference/detector.py
index 98221f08..97732139 100644
--- a/happypose/pose_estimators/megapose/inference/detector.py
+++ b/happypose/pose_estimators/megapose/inference/detector.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
from typing import Any, Optional
@@ -28,9 +26,9 @@
# MegaPose
import happypose.pose_estimators.megapose
import happypose.toolbox.utils.tensor_collection as tc
+from happypose.toolbox.inference.detector import DetectorModule
from happypose.toolbox.inference.types import DetectionsType, ObservationTensor
-from happypose.toolbox.inference.detector import DetectorModule
class Detector(DetectorModule):
def __init__(self, model: torch.nn.Module) -> None:
@@ -38,7 +36,9 @@ def __init__(self, model: torch.nn.Module) -> None:
self.model = model
self.model.eval()
self.config = model.config
- self.category_id_to_label = {v: k for k, v in self.config.label_to_category_id.items()}
+ self.category_id_to_label = {
+ v: k for k, v in self.config.label_to_category_id.items()
+ }
def image_tensor_from_numpy(
self,
@@ -46,11 +46,12 @@ def image_tensor_from_numpy(
) -> torch.tensor:
"""Convert numpy image to torch tensor.
-
Args:
+ ----
rgb: [H,W,3]
Returns:
+ -------
rgb_tensor: [3,H,W] torch.tensor with dtype torch.float
"""
assert rgb.dtype == np.uint8
@@ -74,6 +75,7 @@ def get_detections(
"""Runs the detector on the given images.
Args:
+ ----
detection_th: If specified only keep detections above this
threshold.
mask_th: Threshold to use when computing masks
@@ -82,28 +84,28 @@ def get_detections(
"""
-
# [B,3,H,W]
RGB_DIMS = [0, 1, 2]
images = observation.images[:, RGB_DIMS]
# TODO (lmanuelli): Why are we splitting this up into a list of tensors?
- outputs_ = self.model([image_n for image_n in images])
+ outputs_ = self.model(list(images))
infos = []
bboxes = []
masks = []
for n, outputs_n in enumerate(outputs_):
outputs_n["labels"] = [
- self.category_id_to_label[category_id.item()] for category_id in outputs_n["labels"]
+ self.category_id_to_label[category_id.item()]
+ for category_id in outputs_n["labels"]
]
for obj_id in range(len(outputs_n["boxes"])):
bbox = outputs_n["boxes"][obj_id]
- info = dict(
- batch_im_id=n,
- label=outputs_n["labels"][obj_id],
- score=outputs_n["scores"][obj_id].item(),
- )
+ info = {
+ "batch_im_id": n,
+ "label": outputs_n["labels"][obj_id],
+ "score": outputs_n["scores"][obj_id].item(),
+ }
mask = outputs_n["masks"][obj_id, 0] > mask_th
bboxes.append(torch.as_tensor(bbox))
masks.append(torch.as_tensor(mask))
@@ -113,9 +115,14 @@ def get_detections(
bboxes = torch.stack(bboxes).cuda().float()
masks = torch.stack(masks).cuda()
else:
- infos = dict(score=[], label=[], batch_im_id=[])
+ infos = {"score": [], "label": [], "batch_im_id": []}
bboxes = torch.empty(0, 4).cuda().float()
- masks = torch.empty(0, images.shape[1], images.shape[2], dtype=torch.bool).cuda()
+ masks = torch.empty(
+ 0,
+ images.shape[1],
+ images.shape[2],
+ dtype=torch.bool,
+ ).cuda()
outputs = tc.PandasTensorCollection(
infos=pd.DataFrame(infos),
@@ -130,7 +137,8 @@ def get_detections(
# Keep only the top-detection for each class label
if one_instance_per_class:
outputs = happypose.toolbox.inference.utils.filter_detections(
- outputs, one_instance_per_class=True
+ outputs,
+ one_instance_per_class=True,
)
# Add instance_id column to dataframe
diff --git a/happypose/pose_estimators/megapose/inference/icp_refiner.py b/happypose/pose_estimators/megapose/inference/icp_refiner.py
index 0a1b3bec..16dba411 100644
--- a/happypose/pose_estimators/megapose/inference/icp_refiner.py
+++ b/happypose/pose_estimators/megapose/inference/icp_refiner.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +15,7 @@
# Standard Library
-from typing import List, Optional, Tuple
+from typing import Optional
# Third Party
import cv2
@@ -25,7 +24,6 @@
from scipy import ndimage
# MegaPose
-from happypose.pose_estimators.megapose.config import DEBUG_DATA_DIR
from happypose.pose_estimators.megapose.inference.depth_refiner import DepthRefiner
from happypose.pose_estimators.megapose.inference.refiner_utils import compute_masks
from happypose.toolbox.inference.types import PoseEstimatesType
@@ -34,11 +32,17 @@
from happypose.toolbox.renderer.types import Panda3dLightData
-def get_normal(depth_refine, fx=-1, fy=-1, cx=-1, cy=-1, bbox=np.array([0]), refine=True):
+def get_normal(
+ depth_refine,
+ fx=-1,
+ fy=-1,
+ cx=-1,
+ cy=-1,
+ bbox=np.array([0]),
+ refine=True,
+):
# Copied from https://github.com/kirumang/Pix2Pose/blob/master/pix2pose_util/common_util.py
- """
- fast normal computation
- """
+ """Fast normal computation."""
res_y = depth_refine.shape[0]
res_x = depth_refine.shape[1]
centerX = cx
@@ -64,22 +68,26 @@ def get_normal(depth_refine, fx=-1, fy=-1, cx=-1, cy=-1, bbox=np.array([0]), ref
uv_table = uv_table[bbox[0] : bbox[2], bbox[1] : bbox[3]]
v_x = np.zeros((bbox[2] - bbox[0], bbox[3] - bbox[1], 3))
v_y = np.zeros((bbox[2] - bbox[0], bbox[3] - bbox[1], 3))
- normals = np.zeros((bbox[2] - bbox[0], bbox[3] - bbox[1], 3))
+ np.zeros((bbox[2] - bbox[0], bbox[3] - bbox[1], 3))
depth_refine = depth_refine[bbox[0] : bbox[2], bbox[1] : bbox[3]]
else:
v_x = np.zeros((res_y, res_x, 3))
v_y = np.zeros((res_y, res_x, 3))
- normals = np.zeros((res_y, res_x, 3))
+ np.zeros((res_y, res_x, 3))
uv_table_sign = np.copy(uv_table)
uv_table = np.abs(np.copy(uv_table))
dig = np.gradient(depth_refine, 2, edge_order=2)
v_y[:, :, 0] = uv_table_sign[:, :, 1] * constant_x * dig[0]
- v_y[:, :, 1] = depth_refine * constant_y + (uv_table_sign[:, :, 0] * constant_y) * dig[0]
+ v_y[:, :, 1] = (
+ depth_refine * constant_y + (uv_table_sign[:, :, 0] * constant_y) * dig[0]
+ )
v_y[:, :, 2] = dig[0]
- v_x[:, :, 0] = depth_refine * constant_x + uv_table_sign[:, :, 1] * constant_x * dig[1]
+ v_x[:, :, 0] = (
+ depth_refine * constant_x + uv_table_sign[:, :, 1] * constant_x * dig[1]
+ )
v_x[:, :, 1] = uv_table_sign[:, :, 0] * constant_y * dig[1]
v_x[:, :, 2] = dig[1]
@@ -126,27 +134,56 @@ def getXYZ(depth, fx, fy, cx, cy, bbox=np.array([0])):
def icp_refinement(
- depth_measured, depth_rendered, object_mask_measured, cam_K, TCO_pred, n_min_points=1000
+ depth_measured,
+ depth_rendered,
+ object_mask_measured,
+ cam_K,
+ TCO_pred,
+ n_min_points=1000,
):
# Inspired from https://github.com/kirumang/Pix2Pose/blob/843effe0097e9982f4b07dd90b04ede2b9ee9294/tools/5_evaluation_bop_icp3d.py#L57
- points_tgt = np.zeros((depth_measured.shape[0], depth_measured.shape[1], 6), np.float32)
+ points_tgt = np.zeros(
+ (depth_measured.shape[0], depth_measured.shape[1], 6),
+ np.float32,
+ )
points_tgt[:, :, :3] = getXYZ(
- depth_measured, fx=cam_K[0, 0], fy=cam_K[1, 1], cx=cam_K[0, 2], cy=cam_K[1, 2]
+ depth_measured,
+ fx=cam_K[0, 0],
+ fy=cam_K[1, 1],
+ cx=cam_K[0, 2],
+ cy=cam_K[1, 2],
)
points_tgt[:, :, 3:] = get_normal(
- depth_measured, fx=cam_K[0, 0], fy=cam_K[1, 1], cx=cam_K[0, 2], cy=cam_K[1, 2], refine=True
+ depth_measured,
+ fx=cam_K[0, 0],
+ fy=cam_K[1, 1],
+ cx=cam_K[0, 2],
+ cy=cam_K[1, 2],
+ refine=True,
)
depth_valid = np.logical_and(depth_measured > 0.2, depth_measured < 5)
depth_valid = np.logical_and(depth_valid, object_mask_measured)
points_tgt = points_tgt[depth_valid]
- points_src = np.zeros((depth_measured.shape[0], depth_measured.shape[1], 6), np.float32)
+ points_src = np.zeros(
+ (depth_measured.shape[0], depth_measured.shape[1], 6),
+ np.float32,
+ )
points_src[:, :, :3] = getXYZ(
- depth_rendered, cam_K[0, 0], cam_K[1, 1], cam_K[0, 2], cam_K[1, 2]
+ depth_rendered,
+ cam_K[0, 0],
+ cam_K[1, 1],
+ cam_K[0, 2],
+ cam_K[1, 2],
)
points_src[:, :, 3:] = get_normal(
- depth_rendered, fx=cam_K[0, 0], fy=cam_K[1, 1], cx=cam_K[0, 2], cy=cam_K[1, 2], refine=True
+ depth_rendered,
+ fx=cam_K[0, 0],
+ fy=cam_K[1, 1],
+ cx=cam_K[0, 2],
+ cy=cam_K[1, 2],
+ refine=True,
)
points_src = points_src[np.logical_and(depth_valid, depth_rendered > 0)]
@@ -165,7 +202,8 @@ def icp_refinement(
tolerence = 0.05
icp_fnc = cv2.ppf_match_3d_ICP(100, tolerence=tolerence, numLevels=4)
retval, residual, pose = icp_fnc.registerModelToScene(
- points_src.reshape(-1, 6), points_tgt.reshape(-1, 6)
+ points_src.reshape(-1, 6),
+ points_tgt.reshape(-1, 6),
)
TCO_pred_refined = pose @ TCO_pred_refined
TCO_pred_refined = torch.tensor(TCO_pred_refined, dtype=torch.float32).cuda()
@@ -198,9 +236,8 @@ def refine_poses(
masks: Optional[torch.tensor] = None,
depth: Optional[torch.tensor] = None,
K: Optional[torch.tensor] = None,
- ) -> Tuple[PoseEstimatesType, dict]:
+ ) -> tuple[PoseEstimatesType, dict]:
"""Runs icp refinement. See superclass DepthRefiner for full documentation."""
-
assert depth is not None
assert K is not None
@@ -250,7 +287,12 @@ def refine_poses(
mask = masks[view_id].squeeze().cpu().numpy()
TCO_refined, retval = icp_refinement(
- depth_measured, depth_rendered, mask, cam_K, TCO_pred, n_min_points=1000
+ depth_measured,
+ depth_rendered,
+ mask,
+ cam_K,
+ TCO_pred,
+ n_min_points=1000,
)
# Assign poses to predictions refined
@@ -258,5 +300,5 @@ def refine_poses(
if retval != -1:
predictions_refined.poses[n] = TCO_refined
- extra_data = dict()
+ extra_data = {}
return (predictions_refined, extra_data)
diff --git a/happypose/pose_estimators/megapose/inference/pose_estimator.py b/happypose/pose_estimators/megapose/inference/pose_estimator.py
index e54af605..e327f4bd 100644
--- a/happypose/pose_estimators/megapose/inference/pose_estimator.py
+++ b/happypose/pose_estimators/megapose/inference/pose_estimator.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,14 +13,12 @@
limitations under the License.
"""
-
from __future__ import annotations
# Standard Library
import time
from collections import defaultdict
-from dataclasses import dataclass
-from typing import Any, Optional, Tuple
+from typing import Any, Optional
# Third Party
import numpy as np
@@ -30,16 +27,10 @@
from torch.utils.data import DataLoader, TensorDataset
# MegaPose
-import happypose.pose_estimators.megapose as megapose
import happypose.toolbox.inference.utils
import happypose.toolbox.utils.tensor_collection as tc
-from happypose.pose_estimators.megapose.inference.depth_refiner import (
- DepthRefiner,
-)
-from happypose.pose_estimators.megapose.training.utils import (
- CudaTimer,
- SimpleTimer,
-)
+from happypose.pose_estimators.megapose.inference.depth_refiner import DepthRefiner
+from happypose.pose_estimators.megapose.training.utils import CudaTimer, SimpleTimer
from happypose.toolbox.inference.pose_estimator import PoseEstimationModule
from happypose.toolbox.inference.types import (
DetectionsType,
@@ -49,12 +40,16 @@
from happypose.toolbox.lib3d.cosypose_ops import TCO_init_from_boxes_autodepth_with_R
from happypose.toolbox.utils import transform_utils
from happypose.toolbox.utils.logging import get_logger
-from happypose.toolbox.utils.tensor_collection import PandasTensorCollection
+from happypose.toolbox.utils.tensor_collection import (
+ PandasTensorCollection,
+ filter_top_pose_estimates,
+)
from happypose.toolbox.utils.timer import Timer
logger = get_logger(__name__)
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
class PoseEstimator(PoseEstimationModule):
"""Performs inference for pose estimation."""
@@ -69,7 +64,6 @@ def __init__(
bsz_images: int = 256,
SO3_grid_size: int = 576,
) -> None:
-
super().__init__()
self.coarse_model = coarse_model
self.refiner_model = refiner_model
@@ -90,7 +84,8 @@ def __init__(
self.cfg = self.coarse_model.cfg
self.mesh_db = self.coarse_model.mesh_db
else:
- raise ValueError("At least one of refiner_model or " " coarse_model must be specified.")
+ msg = "At least one of refiner_model or coarse_model must be specified."
+ raise ValueError(msg)
self.eval()
@@ -98,7 +93,7 @@ def __init__(
self.keep_all_coarse_outputs = False
self.refiner_outputs = None
self.coarse_outputs = None
- self.debug_dict: dict = dict()
+ self.debug_dict: dict = {}
def load_SO3_grid(self, grid_size: int) -> None:
"""Loads the SO(3) grid."""
@@ -114,14 +109,14 @@ def forward_refiner(
keep_all_outputs: bool = False,
cuda_timer: bool = False,
**refiner_kwargs,
- ) -> Tuple[dict, dict]:
+ ) -> tuple[dict, dict]:
"""Runs the refiner model for the specified number of iterations.
-
Will actually use the batched_model_predictions to stay within
batch size limit.
- Returns:
+ Returns
+ -------
(preds, extra_data)
preds:
@@ -134,7 +129,6 @@ def forward_refiner(
A dict containing additional information such as timing
"""
-
timer = Timer()
timer.start()
@@ -153,7 +147,7 @@ def forward_refiner(
model_time = 0.0
- for (batch_idx, (batch_ids,)) in enumerate(dl):
+ for batch_idx, (batch_ids,) in enumerate(dl):
data_TCO_input_ = data_TCO_input[batch_ids]
df_ = data_TCO_input_.infos
TCO_input_ = data_TCO_input_.poses
@@ -218,7 +212,8 @@ def forward_refiner(
}
logger.debug(
- f"Pose prediction on {B} poses (n_iterations={n_iterations}):" f" {timer.stop()}"
+ f"Pose prediction on {B} poses (n_iterations={n_iterations}):"
+ f" {timer.stop()}",
)
return preds, extra_data
@@ -230,16 +225,13 @@ def forward_scoring_model(
data_TCO: PoseEstimatesType,
cuda_timer: bool = False,
return_debug_data: bool = False,
- ) -> Tuple[PoseEstimatesType, dict]:
-
+ ) -> tuple[PoseEstimatesType, dict]:
"""Score the estimates using the coarse model.
-
Adds the 'pose_score' field to data_TCO.infos
Modifies PandasTensorCollection in-place.
"""
-
start_time = time.time()
assert self.coarse_model is not None
@@ -290,7 +282,7 @@ def forward_scoring_model(
images_crop_list.append(out_["images_crop"])
renders_list.append(out_["renders"])
- debug_data = dict()
+ debug_data = {}
# Combine together the data from the different batches
logits = torch.cat(logits_list)
@@ -299,8 +291,8 @@ def forward_scoring_model(
images_crop: torch.tensor = torch.cat(images_crop_list)
renders: torch.tensor = torch.cat(renders_list)
- H = images_crop.shape[2]
- W = images_crop.shape[3]
+ images_crop.shape[2]
+ images_crop.shape[3]
debug_data = {
"images_crop": images_crop,
@@ -313,7 +305,8 @@ def forward_scoring_model(
elapsed = time.time() - start_time
timing_str = (
- f"time: {elapsed:.2f}, model_time: {model_time:.2f}, render_time: {render_time:.2f}"
+ f"time: {elapsed:.2f}, model_time: {model_time:.2f}, "
+ f"render_time: {render_time:.2f}"
)
extra_data = {
@@ -337,13 +330,12 @@ def forward_coarse_model(
detections: DetectionsType,
cuda_timer: bool = False,
return_debug_data: bool = False,
- ) -> Tuple[PoseEstimatesType, dict]:
+ ) -> tuple[PoseEstimatesType, dict]:
"""Generates pose hypotheses and scores them with the coarse model.
- Generates coarse hypotheses using the SO(3) grid.
- Scores them using the coarse model.
"""
-
start_time = time.time()
happypose.toolbox.inference.types.assert_detections_valid(detections)
@@ -384,7 +376,6 @@ def forward_coarse_model(
TCO_init = []
for (batch_ids,) in dl:
-
# b = bsz_images
df_ = df_hypotheses.iloc[batch_ids.cpu().numpy()]
@@ -418,7 +409,7 @@ def forward_coarse_model(
)
del points_
-
+
out_ = coarse_model.forward_coarse(
images=images_,
K=K_,
@@ -453,7 +444,7 @@ def forward_coarse_model(
TCO = torch.cat(TCO_init)
TCO_reshape = TCO.reshape([B, M, 4, 4])
- debug_data = dict()
+ debug_data = {}
if return_debug_data:
images_crop = torch.cat(images_crop_list)
@@ -473,7 +464,8 @@ def forward_coarse_model(
elapsed = time.time() - start_time
timing_str = (
- f"time: {elapsed:.2f}, model_time: {model_time:.2f}, render_time: {render_time:.2f}"
+ f"time: {elapsed:.2f}, model_time: {model_time:.2f}, "
+ f"render_time: {render_time:.2f}"
)
extra_data = {
@@ -499,20 +491,23 @@ def forward_detection_model(
**kwargs: Any,
) -> DetectionsType:
"""Runs the detector."""
-
return self.detector_model.get_detections(observation, *args, **kwargs)
def run_depth_refiner(
self,
observation: ObservationTensor,
predictions: PoseEstimatesType,
- ) -> Tuple[PoseEstimatesType, dict]:
+ ) -> tuple[PoseEstimatesType, dict]:
"""Runs the depth refiner."""
assert self.depth_refiner is not None, "You must specify a depth refiner"
depth = observation.depth
K = observation.K
- refined_preds, extra_data = self.depth_refiner.refine_poses(predictions, depth=depth, K=K)
+ refined_preds, extra_data = self.depth_refiner.refine_poses(
+ predictions,
+ depth=depth,
+ K=K,
+ )
return refined_preds, extra_data
@@ -529,9 +524,9 @@ def run_inference_pipeline(
run_depth_refiner: bool = False,
bsz_images: Optional[int] = None,
bsz_objects: Optional[int] = None,
- cuda_timer: bool = False,
+ cuda_timer: Optional[bool] = False,
coarse_estimates: Optional[PoseEstimatesType] = None,
- ) -> Tuple[PoseEstimatesType, dict]:
+ ) -> tuple[PoseEstimatesType, dict]:
"""Runs the entire pose estimation pipeline.
Performs the following steps
@@ -543,13 +538,13 @@ def run_inference_pipeline(
5. Score refined hypotheses
6. Select highest scoring refined hypotheses.
- Returns:
+ Returns
+ -------
data_TCO_final: final predictions
data: Dict containing additional data about the different
steps in the pipeline.
"""
-
timing_str = ""
timer = SimpleTimer()
timer.start()
@@ -568,18 +563,22 @@ def run_inference_pipeline(
start_time = time.time()
detections = self.forward_detection_model(observation)
detections = detections.cuda()
- print("detections detector =", detections.bboxes)
+ print("# detections =", len(detections.bboxes))
elapsed = time.time() - start_time
timing_str += f"detection={elapsed:.2f}, "
# Ensure that detections has the instance_id column
assert detections is not None
+ assert (
+ len(detections) > 0
+ ), "TOFIX: currently, dealing with absence of detections is not supported"
detections = happypose.toolbox.inference.utils.add_instance_id(detections)
# Filter detections
if detection_filter_kwargs is not None:
detections = happypose.toolbox.inference.utils.filter_detections(
- detections, **detection_filter_kwargs
+ detections,
+ **detection_filter_kwargs,
)
# Run the coarse estimator using detections
@@ -591,9 +590,13 @@ def run_inference_pipeline(
timing_str += f"coarse={coarse_extra_data['time']:.2f}, "
# Extract top-K coarse hypotheses
- data_TCO_filtered = self.filter_pose_estimates(
- data_TCO_coarse, top_K=n_pose_hypotheses, filter_field="coarse_logit"
+ data_TCO_filtered = filter_top_pose_estimates(
+ data_TCO_coarse,
+ top_K=n_pose_hypotheses,
+ group_cols=["batch_im_id", "label", "instance_id"],
+ filter_field="coarse_logit",
)
+
else:
data_TCO_coarse = coarse_estimates
coarse_extra_data = None
@@ -618,14 +621,20 @@ def run_inference_pipeline(
timing_str += f"scoring={scoring_extra_data['time']:.2f}, "
# Extract the highest scoring pose estimate for each instance_id
- data_TCO_final_scored = self.filter_pose_estimates(
- data_TCO_scored, top_K=1, filter_field="pose_logit"
+ data_TCO_final_scored = filter_top_pose_estimates(
+ data_TCO_scored,
+ top_K=1,
+ group_cols=["batch_im_id", "label", "instance_id"],
+ filter_field="pose_logit",
)
# Optionally run ICP or TEASER++
if run_depth_refiner:
depth_refiner_start = time.time()
- data_TCO_depth_refiner, _ = self.run_depth_refiner(observation, data_TCO_final_scored)
+ data_TCO_depth_refiner, _ = self.run_depth_refiner(
+ observation,
+ data_TCO_final_scored,
+ )
data_TCO_final = data_TCO_depth_refiner
depth_refiner_time = time.time() - depth_refiner_start
timing_str += f"depth refiner={depth_refiner_time:.2f}"
@@ -636,12 +645,18 @@ def run_inference_pipeline(
timer.stop()
timing_str = f"total={timer.elapsed():.2f}, {timing_str}"
- extra_data: dict = dict()
+ extra_data: dict = {}
extra_data["coarse"] = {"preds": data_TCO_coarse, "data": coarse_extra_data}
extra_data["coarse_filter"] = {"preds": data_TCO_filtered}
- extra_data["refiner_all_hypotheses"] = {"preds": preds, "data": refiner_extra_data}
+ extra_data["refiner_all_hypotheses"] = {
+ "preds": preds,
+ "data": refiner_extra_data,
+ }
extra_data["scoring"] = {"preds": data_TCO_scored, "data": scoring_extra_data}
- extra_data["refiner"] = {"preds": data_TCO_final_scored, "data": refiner_extra_data}
+ extra_data["refiner"] = {
+ "preds": data_TCO_final_scored,
+ "data": refiner_extra_data,
+ }
extra_data["timing_str"] = timing_str
extra_data["time"] = timer.elapsed()
@@ -649,29 +664,3 @@ def run_inference_pipeline(
extra_data["depth_refiner"] = {"preds": data_TCO_depth_refiner}
return data_TCO_final, extra_data
-
- def filter_pose_estimates(
- self,
- data_TCO: PoseEstimatesType,
- top_K: int,
- filter_field: str,
- ascending: bool = False,
- ) -> PoseEstimatesType:
- """Filter the pose estimates by retaining only the top-K coarse model scores.
-
- Retain only the top_K estimates corresponding to each hypothesis_id
-
- Args:
- top_K: how many estimates to retain
- filter_field: The field to filter estimates by
- """
-
- df = data_TCO.infos
-
- group_cols = ["batch_im_id", "label", "instance_id"]
- # Logic from https://stackoverflow.com/a/40629420
- df = df.sort_values(filter_field, ascending=ascending).groupby(group_cols).head(top_K)
-
- data_TCO_filtered = data_TCO[df.index.tolist()]
-
- return data_TCO_filtered
\ No newline at end of file
diff --git a/happypose/pose_estimators/megapose/inference/refiner_utils.py b/happypose/pose_estimators/megapose/inference/refiner_utils.py
index 10608f1a..7905172f 100644
--- a/happypose/pose_estimators/megapose/inference/refiner_utils.py
+++ b/happypose/pose_estimators/megapose/inference/refiner_utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -18,7 +17,6 @@
# Third Party
import numpy as np
import open3d as o3d
-import transforms3d as t3d
def numpy_to_open3d(xyz):
@@ -28,17 +26,16 @@ def numpy_to_open3d(xyz):
def compute_masks(mask_type, depth_rendered, depth_measured, depth_delta_thresh=0.1):
- """
- Function for computing masks
+ """Function for computing masks.
Args:
+ ----
mask_type: str
depth_rendered: [H,W]
depth_measured: [H,W]
depth_delta_thresh: 0.1
"""
-
mask_rendered = depth_rendered > 0
mask_measured = np.logical_and(depth_measured > 0, depth_rendered > 0)
@@ -48,7 +45,8 @@ def compute_masks(mask_type, depth_rendered, depth_measured, depth_delta_thresh=
depth_delta = np.abs(depth_measured - depth_rendered)
mask_measured[depth_delta > depth_delta_thresh] = 0
else:
- raise ValueError(f"Unknown mask type {mask_type}")
+ msg = f"Unknown mask type {mask_type}"
+ raise ValueError(msg)
# Most conservative
mask_rendered = mask_measured
diff --git a/happypose/pose_estimators/megapose/inference/teaserpp_refiner.py b/happypose/pose_estimators/megapose/inference/teaserpp_refiner.py
index ccf476ce..c2ecab7f 100644
--- a/happypose/pose_estimators/megapose/inference/teaserpp_refiner.py
+++ b/happypose/pose_estimators/megapose/inference/teaserpp_refiner.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +16,7 @@
# Standard Library
import time
-from typing import Optional, Tuple
+from typing import Optional
# Third Party
import numpy as np
@@ -27,7 +26,10 @@
# MegaPose
from happypose.pose_estimators.megapose.inference.depth_refiner import DepthRefiner
-from happypose.pose_estimators.megapose.inference.refiner_utils import compute_masks, numpy_to_open3d
+from happypose.pose_estimators.megapose.inference.refiner_utils import (
+ compute_masks,
+ numpy_to_open3d,
+)
from happypose.pose_estimators.megapose.inference.types import PoseEstimatesType
from happypose.toolbox.lib3d.rigid_mesh_database import BatchedMeshes
from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
@@ -59,13 +61,14 @@ def compute_teaserpp_refinement(
max_num_points=None,
normals_src=None,
use_farthest_point_sampling: bool = True,
- **solver_params_kwargs
+ **solver_params_kwargs,
) -> dict:
- """Compute registration using Teaser++
+ """Compute registration using Teaser++.
Follows the example of https://github.com/MIT-SPARK/TEASER-plusplus#minimal-python-3-example
Args:
+ ----
depth_src: [H,W,3]
depth_tgt: [H,W, 3]
cam_K: [3,3] intrinsics matrix
@@ -74,12 +77,12 @@ def compute_teaserpp_refinement(
normals_src: (optional) normals for the src pointcloud
Returns:
+ -------
A dict.
- 'T_tgt_src': The rigid transform that aligns src to tgt.
"""
-
if solver_params is None:
solver_params = get_solver_params(**solver_params_kwargs)
@@ -127,11 +130,11 @@ def compute_teaserpp_refinement(
pc_tgt = pc_src_mask
solver = teaserpp_python.RobustRegistrationSolver(solver_params)
- start = time.time()
+ time.time()
# teaserpp wants [3,N] pointclouds
solver.solve(pc_src.transpose(), pc_tgt.transpose())
- end = time.time()
+ time.time()
solution = solver.getSolution()
@@ -196,17 +199,18 @@ def refine_poses(
masks: Optional[torch.tensor] = None,
depth: Optional[torch.tensor] = None,
K: Optional[torch.tensor] = None,
- ) -> Tuple[PoseEstimatesType, dict]:
+ ) -> tuple[PoseEstimatesType, dict]:
"""Runs Teaserpp refiner. See superclass DepthRefiner for full documentation.
To generate correspondences for Teaser++ we use the following approach.
1. Render depth image depth_rendered at the estimated pose from predictions.
2. Generate 3D --> 3D correspondences across rendered and observed depth images.
- by assuming that pose is correctly aligned in rgb space. So depth_rendered[u,v]
- corresponds to depth_observed[u,v].
+ by assuming that pose is correctly aligned in rgb space.
+ So depth_rendered[u,v] corresponds to depth_observed[u,v].
3. Estimate a mask to filter out some outliers in our generated correspondences.
Args:
+ ----
predictions: PandasTensorCollection
Index into depth, K with batch_im_id
depth: [B, H, W]
@@ -214,7 +218,6 @@ def refine_poses(
K: [B,3,3]
"""
-
assert depth is not None
assert K is not None
@@ -281,7 +284,10 @@ def refine_poses(
TCO_refined = T_tgt_src @ TCO_pred
device = predictions_refined.poses_input[n].device
predictions_refined.poses_input[n] = predictions.poses[n].clone()
- predictions_refined.poses[n] = torch.tensor(TCO_refined, device=device)
+ predictions_refined.poses[n] = torch.tensor(
+ TCO_refined,
+ device=device,
+ )
self.debug = out
diff --git a/happypose/pose_estimators/megapose/inference/types.py b/happypose/pose_estimators/megapose/inference/types.py
index 969cdb88..3d8216f4 100644
--- a/happypose/pose_estimators/megapose/inference/types.py
+++ b/happypose/pose_estimators/megapose/inference/types.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,13 +12,12 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
-
from __future__ import annotations
-# Standard Library
from dataclasses import dataclass
-from typing import Optional, Tuple
+
+# Standard Library
+from typing import Optional
# Third Party
import numpy as np
@@ -102,10 +100,8 @@ class InferenceConfig:
@dataclass
class ObservationTensor:
- """
-
- images: [B,C,H,W] with C=3 (rgb) or C=4 (rgbd). RGB dimensions should already
- be normalized to be in [0,1] by diving the uint8 values by 255
+ """images: [B,C,H,W] with C=3 (rgb) or C=4 (rgbd). RGB dimensions should already
+ be normalized to be in [0,1] by diving the uint8 values by 255.
K: [B,3,3] camera intrinsics
"""
@@ -122,14 +118,14 @@ def cuda(self) -> ObservationTensor:
@property
def batch_size(self) -> int:
"""Returns the batch size."""
-
return self.images.shape[0]
@property
def depth(self) -> torch.tensor:
"""Returns depth tensor.
- Returns:
+ Returns
+ -------
torch.tensor with shape [B,H,W]
"""
assert self.channel_dim == 4
@@ -141,7 +137,6 @@ def channel_dim(self) -> int:
return self.images.shape[1]
def is_valid(self) -> bool:
-
if not self.images.ndim == 4:
return False
@@ -175,12 +170,12 @@ def from_numpy(
"""Create an ObservationData type from numpy data.
Args:
+ ----
rgb: [H,W,3] np.uint8
depth: [H,W] np.float
K: [3,3] np.float
"""
-
assert rgb.dtype == np.uint8
rgb_tensor = torch.as_tensor(rgb).float() / 255
@@ -201,17 +196,17 @@ def from_numpy(
@staticmethod
def from_torch_batched(
- rgb: torch.Tensor, depth: torch.Tensor, K: torch.Tensor
+ rgb: torch.Tensor,
+ depth: torch.Tensor,
+ K: torch.Tensor,
) -> ObservationTensor:
- """
-
- Args:
+ """Args:
+ ----
rgb: [B,3,H,W] torch.uint8
depth: [B,1,H,W] torch.float
- K: [B,3,3] torch.float
+ K: [B,3,3] torch.float.
"""
-
assert rgb.dtype == torch.uint8
# [B,3,H,W]
@@ -221,7 +216,6 @@ def from_torch_batched(
# [C,H,W]
if depth is not None:
-
if depth.ndim == 3:
depth.unsqueeze(1)
diff --git a/happypose/pose_estimators/megapose/lib3d/__init__.py b/happypose/pose_estimators/megapose/lib3d/__init__.py
deleted file mode 100644
index 33599f44..00000000
--- a/happypose/pose_estimators/megapose/lib3d/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-
-# Local Folder
-from .transform import Transform
diff --git a/happypose/pose_estimators/megapose/license_files/python_license_header.txt b/happypose/pose_estimators/megapose/license_files/python_license_header.txt
index 73a7b275..1d77caef 100644
--- a/happypose/pose_estimators/megapose/license_files/python_license_header.txt
+++ b/happypose/pose_estimators/megapose/license_files/python_license_header.txt
@@ -13,4 +13,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/pose_estimators/megapose/models/mask_rcnn.py b/happypose/pose_estimators/megapose/models/mask_rcnn.py
index 26622cc3..ef2ff3e6 100644
--- a/happypose/pose_estimators/megapose/models/mask_rcnn.py
+++ b/happypose/pose_estimators/megapose/models/mask_rcnn.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -29,7 +28,6 @@ def __init__(
backbone_str="resnet50-fpn",
anchor_sizes=((32,), (64,), (128,), (256,), (512,)),
):
-
assert backbone_str == "resnet50-fpn"
backbone = resnet_fpn_backbone("resnet50", pretrained=False)
diff --git a/happypose/pose_estimators/megapose/models/pose_rigid.py b/happypose/pose_estimators/megapose/models/pose_rigid.py
index 103ef739..3646ba16 100644
--- a/happypose/pose_estimators/megapose/models/pose_rigid.py
+++ b/happypose/pose_estimators/megapose/models/pose_rigid.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -19,17 +18,14 @@
import time
from collections import defaultdict
from dataclasses import dataclass
-from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+from typing import Any, Callable, Optional, Union
# Third Party
import numpy as np
import torch
from torch import nn
-from happypose.pose_estimators.megapose.training.utils import (
- CudaTimer,
- SimpleTimer,
-)
+from happypose.pose_estimators.megapose.training.utils import CudaTimer, SimpleTimer
# HappyPose
from happypose.toolbox.datasets.scene_dataset import Resolution
@@ -52,18 +48,20 @@
logger = get_logger(__name__)
+
@dataclass
class PosePredictorOutputCosypose:
TCO_output: torch.Tensor
TCO_input: torch.Tensor
renders: torch.Tensor
images_crop: torch.Tensor
- labels: List[str]
+ labels: list[str]
K: torch.Tensor
K_crop: torch.Tensor
boxes_rend: torch.Tensor
boxes_crop: torch.Tensor
+
@dataclass
class PosePredictorOutput:
TCO_output: torch.Tensor
@@ -73,14 +71,14 @@ class PosePredictorOutput:
TCV_O_input: torch.Tensor
KV_crop: torch.Tensor
tCR: torch.Tensor
- labels: List[str]
+ labels: list[str]
K: torch.Tensor
K_crop: torch.Tensor
- network_outputs: Dict[str, torch.Tensor]
+ network_outputs: dict[str, torch.Tensor]
boxes_rend: torch.Tensor
boxes_crop: torch.Tensor
renderings_logits: torch.Tensor
- timing_dict: Dict[str, float]
+ timing_dict: dict[str, float]
@dataclass
@@ -134,7 +132,7 @@ def __init__(
assert isinstance(n_features, int)
# TODO: Change to torch ModuleDict
- self.heads: Dict[str, Union[torch.nn.Linear, Callable]] = dict()
+ self.heads: dict[str, Union[torch.nn.Linear, Callable]] = {}
self.predict_pose_update = predict_pose_update
if self.predict_pose_update:
self._pose_dim = 9
@@ -143,7 +141,11 @@ def __init__(
self.predict_rendered_views_logits = predict_rendered_views_logits
if self.predict_rendered_views_logits:
- self.views_logits_head = nn.Linear(n_features, self.n_rendered_views, bias=True)
+ self.views_logits_head = nn.Linear(
+ n_features,
+ self.n_rendered_views,
+ bias=True,
+ )
self.heads["renderings_logits"] = self.views_logits_head
# Dimensions for indexing into input and rendered images
@@ -175,23 +177,23 @@ def __init__(
)
self.debug = False
- self.timing_dict: Dict[str, float] = defaultdict(float)
+ self.timing_dict: dict[str, float] = defaultdict(float)
self.debug_data = PosePredictorDebugData()
@property
- def input_rgb_dims(self) -> List[int]:
+ def input_rgb_dims(self) -> list[int]:
return self._input_rgb_dims
@property
- def input_depth_dims(self) -> List[int]:
+ def input_depth_dims(self) -> list[int]:
return self._input_depth_dims
@property
- def render_rgb_dims(self) -> List[int]:
+ def render_rgb_dims(self) -> list[int]:
return self._render_rgb_dims
@property
- def render_depth_dims(self) -> List[int]:
+ def render_depth_dims(self) -> list[int]:
return self._render_depth_dims
def crop_inputs(
@@ -200,16 +202,18 @@ def crop_inputs(
K: torch.Tensor,
TCO: torch.Tensor,
tCR: torch.Tensor,
- labels: List[str],
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
+ labels: list[str],
+ ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Crop input images.
- The image is cropped using the reprojection of the object points in the input pose (TCO).
+ The image is cropped using the reprojection of the object points in the input
+ pose (TCO).
The reference point reprojects to the center of the cropped image.
Please note that the unlike DeepIm, we do not explicitly use the input bounding
box for cropping.
Args:
+ ----
images (torch.Tensor): (bsz, ndims, h, w) where ndims is 3 or 4.
K (torch.Tensor): (bsz, 3, 3), intrinsics of input images
TCO (torch.Tensor): (bsz, 4, 4)
@@ -217,13 +221,13 @@ def crop_inputs(
labels (List[str]): Object labels
Returns:
+ -------
images_cropped: Images cropped and resized to self.render_size
K_crop: Intrinsics of the fictive cropped camera.
- boxes_rend: smallest bounding box defined by the reprojection of object points in
- pose TCO.
+ boxes_rend: smallest bounding box defined by the reprojection of object
+ points in pose TCO.
boxes_crop: bounding box used to crop the input image.
"""
-
bsz = images.shape[0]
assert K.shape == (bsz, 3, 3)
assert tCR.shape == (bsz, 3)
@@ -246,20 +250,29 @@ def crop_inputs(
)
K_crop = get_K_crop_resize(
- K=K.clone(), boxes=boxes_crop, orig_size=images.shape[-2:], crop_resize=self.render_size
+ K=K.clone(),
+ boxes=boxes_crop,
+ orig_size=images.shape[-2:],
+ crop_resize=self.render_size,
).detach()
if self.debug:
TCR = TCO.clone()
TCR[:, :3, -1] = tCR
self.debug_data.ref_point_uv = project_points_robust(
- torch.zeros(bsz, 1, 3).to(K.device), K, TCR
+ torch.zeros(bsz, 1, 3).to(K.device),
+ K,
+ TCR,
)
self.debug_data.origin_uv = project_points_robust(
- torch.zeros(bsz, 1, 3).to(K.device), K, TCO
+ torch.zeros(bsz, 1, 3).to(K.device),
+ K,
+ TCO,
)
self.debug_data.origin_uv_crop = project_points_robust(
- torch.zeros(bsz, 1, 3).to(K.device), K_crop, TCO
+ torch.zeros(bsz, 1, 3).to(K.device),
+ K_crop,
+ TCO,
)
return images_cropped, K_crop, boxes_rend, boxes_crop
@@ -269,12 +282,13 @@ def compute_crops_multiview(
K: torch.Tensor,
TCV_O: torch.Tensor,
tCR: torch.Tensor,
- labels: List[str],
+ labels: list[str],
) -> torch.Tensor:
"""Computes the intrinsics of the fictive camera used to
render the additional viewpoints.
Args:
+ ----
images (torch.Tensor): _description_
K (torch.Tensor): _description_
TCV_O (torch.Tensor): _description_
@@ -282,9 +296,9 @@ def compute_crops_multiview(
labels (List[str]): _description_
Returns:
+ -------
K_crop
"""
-
labels_mv = []
bsz = len(labels)
n_views = TCV_O.shape[1]
@@ -314,13 +328,20 @@ def compute_crops_multiview(
return_crops=False,
)
K_crop = get_K_crop_resize(
- K=K.clone(), boxes=boxes_crop, orig_size=images.shape[-2:], crop_resize=self.render_size
+ K=K.clone(),
+ boxes=boxes_crop,
+ orig_size=images.shape[-2:],
+ crop_resize=self.render_size,
)
K_crop = K_crop.view(bsz, n_views, 3, 3)
return K_crop
def update_pose(
- self, TCO: torch.Tensor, K_crop: torch.Tensor, pose_outputs: torch.Tensor, tCR: torch.Tensor
+ self,
+ TCO: torch.Tensor,
+ K_crop: torch.Tensor,
+ pose_outputs: torch.Tensor,
+ tCR: torch.Tensor,
) -> torch.Tensor:
assert pose_outputs.shape[-1] == 9
dR = compute_rotation_matrix_from_ortho6d(pose_outputs[:, 0:6])
@@ -328,13 +349,15 @@ def update_pose(
TCO_updated = pose_update_with_reference_point(TCO, K_crop, vxvyvz, dR, tCR)
return TCO_updated
- def net_forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
+ def net_forward(self, x: torch.Tensor) -> dict[str, torch.Tensor]:
"""Forward pass of the neural network.
Args:
+ ----
x (torch.Tensor): input tensor (images + renderings)
Returns:
+ -------
Dict[str, torch.Tensor]: Output of each network head.
"""
x = self.backbone(x)
@@ -345,14 +368,14 @@ def net_forward(self, x: torch.Tensor) -> Dict[str, torch.Tensor]:
x = x.flatten(2).mean(dim=-1)
else:
raise ValueError
- outputs = dict()
+ outputs = {}
for k, head in self.heads.items():
outputs[k] = head(x)
return outputs
def render_images_multiview(
self,
- labels: List[str],
+ labels: list[str],
TCV_O: torch.Tensor,
KV: torch.Tensor,
random_ambient_light: bool = False,
@@ -360,15 +383,16 @@ def render_images_multiview(
"""Render multiple images.
Args:
+ ----
labels: list[str] with length bsz
TCV_O: [bsz, n_views, 4, 4] pose of the cameras defining each view
KV: [bsz, n_views, 4, 4] intrinsics of the associated cameras
random_ambient_light: Whether to use randomize ambient light parameter.
- Returns
+ Returns:
+ -------
renders: [bsz, n_views*n_channels, H, W]
"""
-
labels_mv = []
bsz = len(labels)
n_views = TCV_O.shape[1]
@@ -384,12 +408,15 @@ def render_images_multiview(
Panda3dLightData(
light_type="ambient",
color=(intensity, intensity, intensity, 1.0),
- )
+ ),
]
light_datas.append(lights)
else:
if self.render_normals:
- ambient_light = Panda3dLightData(light_type="ambient", color=(1.0, 1.0, 1.0, 1.0))
+ ambient_light = Panda3dLightData(
+ light_type="ambient",
+ color=(1.0, 1.0, 1.0, 1.0),
+ )
light_datas = [[ambient_light] for _ in range(len(labels_mv))]
else:
light_datas = [make_scene_lights() for _ in range(len(labels_mv))]
@@ -421,7 +448,10 @@ def render_images_multiview(
renders = torch.cat(cat_list, dim=1)
n_channels = renders.shape[1]
- renders = renders.view(bsz, n_views, n_channels, *renders.shape[-2:]).flatten(1, 2)
+ renders = renders.view(bsz, n_views, n_channels, *renders.shape[-2:]).flatten(
+ 1,
+ 2,
+ )
return renders # [bsz, n_views*n_channels, H, W]
def normalize_images(
@@ -431,18 +461,18 @@ def normalize_images(
tCR: torch.Tensor,
images_inplace: bool = False,
renders_inplace: bool = False,
- ) -> Tuple[torch.Tensor, torch.Tensor]:
- """Normalize the depth images by the distance from the camera
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ """Normalize the depth images by the distance from the camera.
If we are using depth then this involves inplace ops so to be
safe we will make copies of the tensors
Args:
+ ----
images: [bsz, C, H, W]
renders: [bsz, n_view*n_render_channels, H, W]
tCR: [bsz, 3] anchor point for rendering
"""
-
# NOTE (lmanuelli): Avoid errors with inplace ops as the same
# input might be used in multiple iterations. Since we re-crop
# on each iteration this might not be a problem but err'ing on
@@ -454,7 +484,6 @@ def normalize_images(
renders = renders.clone()
if self.input_depth:
-
if not images_inplace:
images = images.clone()
C = images.shape[1]
@@ -467,12 +496,14 @@ def normalize_images(
images[:, self._input_depth_dims] = depth_norm
if self.render_depth:
-
# Need to index into the right channels, assuming no normals
# 1-view --> depth_dims = [3]
# 2-view --> depth_dims = [3,7]
- depth_dims = self._render_depth_dims[0] + self._n_single_render_channels * torch.arange(
- 0, self.n_rendered_views
+ depth_dims = self._render_depth_dims[
+ 0
+ ] + self._n_single_render_channels * torch.arange(
+ 0,
+ self.n_rendered_views,
)
depth = renders[:, depth_dims]
@@ -481,15 +512,15 @@ def normalize_images(
return images, renders
def normalize_depth(self, depth: torch.Tensor, tCR: torch.Tensor) -> torch.Tensor:
- """
- Args:
+ """Args:
+ ----
depth: [B,-1,1,H,W]
- tCR: [B,3]
+ tCR: [B,3].
- Returns:
+ Returns
+ -------
depth_norm: same shape as depth
"""
-
# [B,]
z_norm = tCR[:, 2]
@@ -504,11 +535,13 @@ def normalize_depth(self, depth: torch.Tensor, tCR: torch.Tensor) -> torch.Tenso
elif self.depth_normalization_type == "tCR_center_clamp":
depth_norm = torch.clamp(depth - z_norm_unsqz, -2, 2)
elif self.depth_normalization_type == "tCR_center_obj_diam":
- raise NotImplementedError("Not yet implemented")
+ msg = "Not yet implemented"
+ raise NotImplementedError(msg)
elif self.depth_normalization_type == "none":
depth_norm = depth
else:
- raise ValueError(f"Unknown depth_normalization_type = {self.depth_normalization_type}")
+ msg = f"Unknown depth_normalization_type = {self.depth_normalization_type}"
+ raise ValueError(msg)
return depth_norm
@@ -516,13 +549,12 @@ def forward(
self,
images: torch.Tensor,
K: torch.Tensor,
- labels: List[str],
+ labels: list[str],
TCO: torch.Tensor,
n_iterations: int = 1,
random_ambient_light: bool = False,
- ) -> Dict[str, PosePredictorOutput]:
-
- timing_dict: Dict[str, float] = defaultdict(float)
+ ) -> dict[str, PosePredictorOutput]:
+ timing_dict: dict[str, float] = defaultdict(float)
if not self.input_depth:
# Remove the depth dimension if it is not used
@@ -535,7 +567,7 @@ def forward(
dtype = TCO.dtype
device = TCO.device
- outputs = dict()
+ outputs = {}
TCO_input = TCO
for n in range(n_iterations):
TCO_input = normalize_T(TCO_input).detach()
@@ -556,21 +588,36 @@ def forward(
n_views = TCV_O_input.shape[1]
tCV_R = TCV_O_input_flatten[..., :3, [-1]] + TCV_O_input_flatten[
- ..., :3, :3
+ ...,
+ :3,
+ :3,
] @ tOR.unsqueeze(1).repeat(1, n_views, 1).flatten(0, 1).unsqueeze(-1)
tCV_R = tCV_R.squeeze(-1).view(bsz, TCV_O_input.shape[1], 3)
images_crop, K_crop, boxes_rend, boxes_crop = self.crop_inputs(
- images, K, TCO_input, tCR, labels
+ images,
+ K,
+ TCO_input,
+ tCR,
+ labels,
)
- KV_crop = self.compute_crops_multiview(images, K, TCV_O_input, tCV_R, labels)
+ KV_crop = self.compute_crops_multiview(
+ images,
+ K,
+ TCV_O_input,
+ tCV_R,
+ labels,
+ )
if not self.remove_TCO_rendering:
KV_crop[:, 0] = K_crop
t = time.time()
renders = self.render_images_multiview(
- labels, TCV_O_input, KV_crop, random_ambient_light=random_ambient_light
+ labels,
+ TCV_O_input,
+ KV_crop,
+ random_ambient_light=random_ambient_light,
)
render_time = time.time() - t
timing_dict["render"] = render_time
@@ -586,7 +633,12 @@ def forward(
# would expect this to error out
network_outputs = self.net_forward(x)
if self.predict_pose_update:
- TCO_output = self.update_pose(TCO_input, K_crop, network_outputs["pose"], tCR)
+ TCO_output = self.update_pose(
+ TCO_input,
+ K_crop,
+ network_outputs["pose"],
+ tCR,
+ )
else:
TCO_output = TCO_input.detach().clone()
@@ -595,7 +647,10 @@ def forward(
assert not self.predict_pose_update
else:
renderings_logits = torch.empty(
- bsz, self.n_rendered_views, dtype=dtype, device=device
+ bsz,
+ self.n_rendered_views,
+ dtype=dtype,
+ device=device,
)
outputs[f"iteration={n+1}"] = PosePredictorOutput(
@@ -621,15 +676,17 @@ def forward(
return outputs
def forward_coarse_tensor(
- self, x: torch.Tensor, cuda_timer: bool = False
- ) -> Dict[str, Union[torch.Tensor, float]]:
-
+ self,
+ x: torch.Tensor,
+ cuda_timer: bool = False,
+ ) -> dict[str, Union[torch.Tensor, float]]:
"""Forward pass on coarse model given an input tensor.
The input already contains the concatenated input + rendered images and has
been appropriately normalized.
Args:
+ ----
x: [B,C,H,W] where C=9 typically. This is the concatenated
input + rendered image
@@ -654,18 +711,19 @@ def forward_coarse(
self,
images: torch.Tensor,
K: torch.Tensor,
- labels: List[str],
+ labels: list[str],
TCO_input: torch.Tensor,
cuda_timer: bool = False,
return_debug_data: bool = False,
- ) -> Dict[str, Any]:
+ ) -> dict[str, Any]:
# TODO: Is this still necessary ?
- """Run the coarse model given images + poses
+ """Run the coarse model given images + poses.
Only valid if we are using the coarse model.
Args:
+ ----
images: [B,C,H,W] torch tensor, should already be normalized to
[0,255] --> [0,1]
K: [B,3,3] camera intrinsics
@@ -674,6 +732,7 @@ def forward_coarse(
Returns:
+ -------
dict:
- logits: tensor [B,]
- scores tensor [B,]
@@ -682,7 +741,8 @@ def forward_coarse(
"""
### DEBUG
- has_nans = lambda tensor: bool(tensor.isnan().any())
+ def has_nans(tensor):
+ return bool(tensor.isnan().any())
assert (
self.predict_rendered_views_logits
@@ -691,26 +751,35 @@ def forward_coarse(
if not self.input_depth:
# Remove the depth dimension if it is not used
images = images[:, self.input_rgb_dims]
-
+
bsz, nchannels, h, w = images.shape
assert TCO_input.shape == (bsz, 4, 4)
assert K.shape == (bsz, 3, 3)
assert len(labels) == bsz
- if has_nans(images): print('images has NANS')
- if has_nans(K): print('K has NANS')
- if has_nans(TCO_input): print('TCO_input has NANS')
+ if has_nans(images):
+ print("images has NANS")
+ if has_nans(K):
+ print("K has NANS")
+ if has_nans(TCO_input):
+ print("TCO_input has NANS")
TCO_input = normalize_T(TCO_input).detach()
- if has_nans(TCO_input): print('TCO_input has NANS')
+ if has_nans(TCO_input):
+ print("TCO_input has NANS")
tCR = TCO_input[..., :3, -1]
images_crop, K_crop, boxes_rend, boxes_crop = self.crop_inputs(
- images, K, TCO_input, tCR, labels
+ images,
+ K,
+ TCO_input,
+ tCR,
+ labels,
)
- if has_nans(images_crop): print('images_crop has NANS')
- if has_nans(K_crop): print('K_crop has NANS')
-
+ if has_nans(images_crop):
+ print("images_crop has NANS")
+ if has_nans(K_crop):
+ print("K_crop has NANS")
# [B,1,4,4], hack to use the multi-view function
TCO_V_input = TCO_input.unsqueeze(1)
@@ -724,11 +793,14 @@ def forward_coarse(
)
render_time = time.time() - render_start
- if has_nans(renders): print('renders before norm has NANS')
+ if has_nans(renders):
+ print("renders before norm has NANS")
images_crop, renders = self.normalize_images(images_crop, renders, tCR)
- if has_nans(renders): print('renders after norm has NANS')
+ if has_nans(renders):
+ print("renders after norm has NANS")
x = torch.cat((images_crop, renders), dim=1)
- if has_nans(x): print('x has NANS')
+ if has_nans(x):
+ print("x has NANS")
out = self.forward_coarse_tensor(x, cuda_timer=cuda_timer)
diff --git a/happypose/pose_estimators/megapose/models/resnet.py b/happypose/pose_estimators/megapose/models/resnet.py
index 2a3159e3..e3a2e8b6 100644
--- a/happypose/pose_estimators/megapose/models/resnet.py
+++ b/happypose/pose_estimators/megapose/models/resnet.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,10 +21,9 @@
from torch.utils.model_zoo import load_url as load_state_dict_from_url
# Standard Library
-from typing import Any, Callable, List, Optional, Type, Union
+from typing import Any, Callable, Optional, Union
# Third Party
-import torch
import torch.nn as nn
from torch import Tensor
@@ -44,9 +42,13 @@
def conv3x3(
- in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1
+ in_planes: int,
+ out_planes: int,
+ stride: int = 1,
+ groups: int = 1,
+ dilation: int = 1,
) -> nn.Conv2d:
- """3x3 convolution with padding"""
+ """3x3 convolution with padding."""
return nn.Conv2d(
in_planes,
out_planes,
@@ -60,7 +62,7 @@ def conv3x3(
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
- """1x1 convolution"""
+ """1x1 convolution."""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
@@ -78,14 +80,17 @@ def __init__(
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
- super(BasicBlock, self).__init__()
+ super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
- raise ValueError("BasicBlock only supports groups=1 and base_width=64")
+ msg = "BasicBlock only supports groups=1 and base_width=64"
+ raise ValueError(msg)
if dilation > 1:
- raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
- # Both self.conv1 and self.downsample layers downsample the input when stride != 1
+ msg = "Dilation > 1 not supported in BasicBlock"
+ raise NotImplementedError(msg)
+ # Both self.conv1 and self.downsample layers downsample the input
+ # when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
@@ -114,11 +119,14 @@ def forward(self, x: Tensor) -> Tensor:
class Bottleneck(nn.Module):
- # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
- # while original implementation places the stride at the first 1x1 convolution(self.conv1)
- # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
- # This variant is also known as ResNet V1.5 and improves accuracy according to
- # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
+ """
+ Bottleneck in torchvision places the stride for downsampling at 3x3
+ convolution(self.conv2) while original implementation places the stride at the first
+ 1x1 convolution(self.conv1) according to "Deep residual learning for image
+ recognition"https://arxiv.org/abs/1512.03385. This variant is also known as ResNet
+ V1.5 and improves accuracy according to
+ https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch
+ """
expansion: int = 4
@@ -133,11 +141,12 @@ def __init__(
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
- super(Bottleneck, self).__init__()
+ super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
- # Both self.conv2 and self.downsample layers downsample the input when stride != 1
+ # Both self.conv2 and self.downsample layers downsample the input when
+ # stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
@@ -174,17 +183,17 @@ def forward(self, x: Tensor) -> Tensor:
class ResNet(nn.Module):
def __init__(
self,
- block: Type[Union[BasicBlock, Bottleneck]],
- layers: List[int],
+ block: type[Union[BasicBlock, Bottleneck]],
+ layers: list[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
n_inputs: int = 3,
- replace_stride_with_dilation: Optional[List[bool]] = None,
+ replace_stride_with_dilation: Optional[list[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
- super(ResNet, self).__init__()
+ super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
@@ -196,28 +205,45 @@ def __init__(
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
- raise ValueError(
- "replace_stride_with_dilation should be None or a 3-element tuple, got {}".format(
- replace_stride_with_dilation
- )
+ msg = (
+ f"replace_stride_with_dilation should be None or a 3-element tuple, "
+ f"got {replace_stride_with_dilation}"
)
+ raise ValueError(msg)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
- n_inputs, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
+ n_inputs,
+ self.inplanes,
+ kernel_size=7,
+ stride=2,
+ padding=3,
+ bias=False,
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
- block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
+ block,
+ 128,
+ layers[1],
+ stride=2,
+ dilate=replace_stride_with_dilation[0],
)
self.layer3 = self._make_layer(
- block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
+ block,
+ 256,
+ layers[2],
+ stride=2,
+ dilate=replace_stride_with_dilation[1],
)
self.layer4 = self._make_layer(
- block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
+ block,
+ 512,
+ layers[3],
+ stride=2,
+ dilate=replace_stride_with_dilation[2],
)
for m in self.modules():
@@ -228,7 +254,8 @@ def __init__(
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
- # so that the residual branch starts with zeros, and each residual block behaves like an identity.
+ # so that the residual branch starts with zeros, and each residual block behaves
+ # like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
@@ -239,7 +266,7 @@ def __init__(
def _make_layer(
self,
- block: Type[Union[BasicBlock, Bottleneck]],
+ block: type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
@@ -268,7 +295,7 @@ def _make_layer(
self.base_width,
previous_dilation,
norm_layer,
- )
+ ),
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
@@ -280,7 +307,7 @@ def _make_layer(
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
- )
+ ),
)
return nn.Sequential(*layers)
@@ -304,11 +331,11 @@ def forward(self, x: Tensor) -> Tensor:
def _resnet(
arch: str,
- block: Type[Union[BasicBlock, Bottleneck]],
- layers: List[int],
+ block: type[Union[BasicBlock, Bottleneck]],
+ layers: list[int],
pretrained: bool,
progress: bool,
- **kwargs: Any
+ **kwargs: Any,
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
diff --git a/happypose/pose_estimators/megapose/models/torchvision_resnet.py b/happypose/pose_estimators/megapose/models/torchvision_resnet.py
index ac4ee5b9..92c88486 100644
--- a/happypose/pose_estimators/megapose/models/torchvision_resnet.py
+++ b/happypose/pose_estimators/megapose/models/torchvision_resnet.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,12 +15,13 @@
# Standard Library
-from typing import Any, Callable, List, Optional, Type, Union
+from typing import Any, Callable, Optional, Union
# Third Party
import torch
import torch.nn as nn
from torch import Tensor
+from torch.hub import load_state_dict_from_url
__all__ = [
"ResNet",
@@ -51,9 +51,13 @@
def conv3x3(
- in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1
+ in_planes: int,
+ out_planes: int,
+ stride: int = 1,
+ groups: int = 1,
+ dilation: int = 1,
) -> nn.Conv2d:
- """3x3 convolution with padding"""
+ """3x3 convolution with padding."""
return nn.Conv2d(
in_planes,
out_planes,
@@ -67,7 +71,7 @@ def conv3x3(
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
- """1x1 convolution"""
+ """1x1 convolution."""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
@@ -85,14 +89,17 @@ def __init__(
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
- super(BasicBlock, self).__init__()
+ super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
- raise ValueError("BasicBlock only supports groups=1 and base_width=64")
+ msg = "BasicBlock only supports groups=1 and base_width=64"
+ raise ValueError(msg)
if dilation > 1:
- raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
- # Both self.conv1 and self.downsample layers downsample the input when stride != 1
+ msg = "Dilation > 1 not supported in BasicBlock"
+ raise NotImplementedError(msg)
+ # Both self.conv1 and self.downsample layers downsample the input
+ # when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
@@ -121,11 +128,14 @@ def forward(self, x: Tensor) -> Tensor:
class Bottleneck(nn.Module):
- # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
- # while original implementation places the stride at the first 1x1 convolution(self.conv1)
- # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
- # This variant is also known as ResNet V1.5 and improves accuracy according to
- # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
+ """
+ Bottleneck in torchvision places the stride for downsampling at 3x3
+ convolution(self.conv2) while original implementation places the stride at the first
+ 1x1 convolution(self.conv1) according to "Deep residual learning for image
+ recognition"https://arxiv.org/abs/1512.03385. This variant is also known as ResNet
+ V1.5 and improves accuracy according to
+ https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
+ """
expansion: int = 4
@@ -140,11 +150,12 @@ def __init__(
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None,
) -> None:
- super(Bottleneck, self).__init__()
+ super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
- # Both self.conv2 and self.downsample layers downsample the input when stride != 1
+ # Both self.conv2 and self.downsample layers downsample the input
+ # when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
@@ -181,17 +192,17 @@ def forward(self, x: Tensor) -> Tensor:
class ResNet(nn.Module):
def __init__(
self,
- block: Type[Union[BasicBlock, Bottleneck]],
- layers: List[int],
+ block: type[Union[BasicBlock, Bottleneck]],
+ layers: list[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
- replace_stride_with_dilation: Optional[List[bool]] = None,
+ replace_stride_with_dilation: Optional[list[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None,
n_input_channels: int = 3,
) -> None:
- super(ResNet, self).__init__()
+ super().__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
@@ -203,28 +214,45 @@ def __init__(
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
- raise ValueError(
- "replace_stride_with_dilation should be None or a 3-element tuple, got {}".format(
- replace_stride_with_dilation
- )
+ msg = (
+ f"replace_stride_with_dilation should be None or a 3-element tuple, "
+ f"got {replace_stride_with_dilation}"
)
+ raise ValueError(msg)
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(
- n_input_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
+ n_input_channels,
+ self.inplanes,
+ kernel_size=7,
+ stride=2,
+ padding=3,
+ bias=False,
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
- block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
+ block,
+ 128,
+ layers[1],
+ stride=2,
+ dilate=replace_stride_with_dilation[0],
)
self.layer3 = self._make_layer(
- block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
+ block,
+ 256,
+ layers[2],
+ stride=2,
+ dilate=replace_stride_with_dilation[1],
)
self.layer4 = self._make_layer(
- block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
+ block,
+ 512,
+ layers[3],
+ stride=2,
+ dilate=replace_stride_with_dilation[2],
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
@@ -237,7 +265,8 @@ def __init__(
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
- # so that the residual branch starts with zeros, and each residual block behaves like an identity.
+ # so that the residual branch starts with zeros, and each residual block behaves
+ # like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
@@ -248,7 +277,7 @@ def __init__(
def _make_layer(
self,
- block: Type[Union[BasicBlock, Bottleneck]],
+ block: type[Union[BasicBlock, Bottleneck]],
planes: int,
blocks: int,
stride: int = 1,
@@ -277,7 +306,7 @@ def _make_layer(
self.base_width,
previous_dilation,
norm_layer,
- )
+ ),
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
@@ -289,7 +318,7 @@ def _make_layer(
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
- )
+ ),
)
return nn.Sequential(*layers)
@@ -318,11 +347,11 @@ def forward(self, x: Tensor) -> Tensor:
def _resnet(
arch: str,
- block: Type[Union[BasicBlock, Bottleneck]],
- layers: List[int],
+ block: type[Union[BasicBlock, Bottleneck]],
+ layers: list[int],
pretrained: bool,
progress: bool,
- **kwargs: Any
+ **kwargs: Any,
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
@@ -336,6 +365,7 @@ def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) ->
`"Deep Residual Learning for Image Recognition" `_.
Args:
+ ----
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
@@ -347,6 +377,7 @@ def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) ->
`"Deep Residual Learning for Image Recognition" `_.
Args:
+ ----
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
@@ -358,6 +389,7 @@ def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) ->
`"Deep Residual Learning for Image Recognition" `_.
Args:
+ ----
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
@@ -369,10 +401,18 @@ def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) ->
`"Deep Residual Learning for Image Recognition" `_.
Args:
+ ----
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
- return _resnet("resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
+ return _resnet(
+ "resnet101",
+ Bottleneck,
+ [3, 4, 23, 3],
+ pretrained,
+ progress,
+ **kwargs,
+ )
def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
@@ -380,39 +420,75 @@ def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) ->
`"Deep Residual Learning for Image Recognition" `_.
Args:
+ ----
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
- return _resnet("resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
+ return _resnet(
+ "resnet152",
+ Bottleneck,
+ [3, 8, 36, 3],
+ pretrained,
+ progress,
+ **kwargs,
+ )
-def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
+def resnext50_32x4d(
+ pretrained: bool = False,
+ progress: bool = True,
+ **kwargs: Any,
+) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" `_.
Args:
+ ----
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
- return _resnet("resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
+ return _resnet(
+ "resnext50_32x4d",
+ Bottleneck,
+ [3, 4, 6, 3],
+ pretrained,
+ progress,
+ **kwargs,
+ )
-def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
+def resnext101_32x8d(
+ pretrained: bool = False,
+ progress: bool = True,
+ **kwargs: Any,
+) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" `_.
Args:
+ ----
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
- return _resnet("resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
+ return _resnet(
+ "resnext101_32x8d",
+ Bottleneck,
+ [3, 4, 23, 3],
+ pretrained,
+ progress,
+ **kwargs,
+ )
-def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
+def wide_resnet50_2(
+ pretrained: bool = False,
+ progress: bool = True,
+ **kwargs: Any,
+) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" `_.
@@ -422,14 +498,26 @@ def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: A
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
+ ----
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
- return _resnet("wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
+ return _resnet(
+ "wide_resnet50_2",
+ Bottleneck,
+ [3, 4, 6, 3],
+ pretrained,
+ progress,
+ **kwargs,
+ )
-def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
+def wide_resnet101_2(
+ pretrained: bool = False,
+ progress: bool = True,
+ **kwargs: Any,
+) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" `_.
@@ -439,8 +527,16 @@ def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs:
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
+ ----
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
- return _resnet("wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
+ return _resnet(
+ "wide_resnet101_2",
+ Bottleneck,
+ [3, 4, 23, 3],
+ pretrained,
+ progress,
+ **kwargs,
+ )
diff --git a/happypose/pose_estimators/megapose/models/wide_resnet.py b/happypose/pose_estimators/megapose/models/wide_resnet.py
index 55b0f3d2..8b73b588 100644
--- a/happypose/pose_estimators/megapose/models/wide_resnet.py
+++ b/happypose/pose_estimators/megapose/models/wide_resnet.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -22,20 +21,31 @@
def conv3x3(in_planes, out_planes, stride=1):
- """3x3 convolution with padding"""
- return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
+ """3x3 convolution with padding."""
+ return nn.Conv2d(
+ in_planes,
+ out_planes,
+ kernel_size=3,
+ stride=stride,
+ padding=1,
+ bias=False,
+ )
class BasicBlockV2(nn.Module):
r"""BasicBlock V2 from
- `"Identity Mappings in Deep Residual Networks"`_ paper.
+ `"Identity Mappings in Deep Residual Networks"
+ `_ paper.
This is used for ResNet V2 for 18, 34 layers.
+
Args:
+ ----
inplanes (int): number of input channels.
planes (int): number of output channels.
stride (int): stride size.
downsample (Module) optional downsample module to downsample the input.
"""
+
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
@@ -63,7 +73,12 @@ def __init__(self, block, layers, width, num_inputs=3, maxpool=True):
config = [int(v * width) for v in (64, 128, 256, 512)]
self.inplanes = config[0]
self.conv1 = nn.Conv2d(
- num_inputs, self.inplanes, kernel_size=5, stride=2, padding=2, bias=False
+ num_inputs,
+ self.inplanes,
+ kernel_size=5,
+ stride=2,
+ padding=2,
+ bias=False,
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
@@ -87,14 +102,18 @@ def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Conv2d(
- self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False
+ self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False,
)
layers = [
block(self.inplanes, planes, stride, downsample),
]
self.inplanes = planes * block.expansion
- for i in range(1, blocks):
+ for _i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
@@ -116,13 +135,23 @@ def forward(self, x):
class WideResNet18(WideResNet):
def __init__(self, n_inputs=3, width=1.0):
- super().__init__(block=BasicBlockV2, layers=CONFIG[18], width=width, num_inputs=n_inputs)
+ super().__init__(
+ block=BasicBlockV2,
+ layers=CONFIG[18],
+ width=width,
+ num_inputs=n_inputs,
+ )
self.n_features = int(512 * width)
class WideResNet34(WideResNet):
def __init__(self, n_inputs=3, width=1.0):
- super().__init__(block=BasicBlockV2, layers=CONFIG[34], width=width, num_inputs=n_inputs)
+ super().__init__(
+ block=BasicBlockV2,
+ layers=CONFIG[34],
+ width=width,
+ num_inputs=n_inputs,
+ )
self.n_features = int(512 * width)
diff --git a/happypose/pose_estimators/megapose/scripts/__init__.py b/happypose/pose_estimators/megapose/scripts/__init__.py
index 73a7b275..09aba5e2 100644
--- a/happypose/pose_estimators/megapose/scripts/__init__.py
+++ b/happypose/pose_estimators/megapose/scripts/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,4 +12,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/pose_estimators/megapose/scripts/bop_calc_gt_info.py b/happypose/pose_estimators/megapose/scripts/bop_calc_gt_info.py
index 64ea52e9..b0d5587f 100644
--- a/happypose/pose_estimators/megapose/scripts/bop_calc_gt_info.py
+++ b/happypose/pose_estimators/megapose/scripts/bop_calc_gt_info.py
@@ -1,13 +1,9 @@
import argparse
-import glob
import json
import os
-import sys
-from copy import deepcopy
from pathlib import Path
import numpy as np
-import trimesh
# from bop_toolkit_lib import config
# from bop_toolkit_lib import dataset_params
@@ -16,22 +12,27 @@
################################################################################
################################################################################
parser = argparse.ArgumentParser()
-parser.add_argument('--chunk-dir', type=str)
-parser.add_argument('--shapenet-dir', type=str)
-parser.add_argument('--gso-dir', type=str)
-parser.add_argument('--renderer-type', type=str, default='cpp')
-parser.add_argument('--overwrite-models', action='store_true')
+parser.add_argument("--chunk-dir", type=str)
+parser.add_argument("--shapenet-dir", type=str)
+parser.add_argument("--gso-dir", type=str)
+parser.add_argument("--renderer-type", type=str, default="cpp")
+parser.add_argument("--overwrite-models", action="store_true")
args = parser.parse_args()
chunk_dir = Path(args.chunk_dir)
-chunk_infos = json.loads((chunk_dir / 'chunk_infos.json').read_text())
-cam_infos_path = (chunk_dir / 'bop_data/camera.json')
+chunk_infos = json.loads((chunk_dir / "chunk_infos.json").read_text())
+cam_infos_path = chunk_dir / "bop_data/camera.json"
cam_infos = json.loads(cam_infos_path.read_text())
-scene_gt_tpath = (chunk_dir / 'bop_data/train_pbr/{scene_id:06d}/scene_gt.json')
-scene_gt_info_tpath = (chunk_dir / 'bop_data/train_pbr/{scene_id:06d}/scene_gt_info.json')
-depth_gt_info_tpath = (chunk_dir / 'bop_data/train_pbr/{scene_id:06d}/depth/{im_id:06d}.png')
-vis_mask_visib_tpath = (chunk_dir / 'bop_data/train_pbr/{scene_id:06d}/mask_visib/{im_id:06d}_{inst_id:06d}.png')
+scene_gt_tpath = chunk_dir / "bop_data/train_pbr/{scene_id:06d}/scene_gt.json"
+scene_gt_info_tpath = chunk_dir / "bop_data/train_pbr/{scene_id:06d}/scene_gt_info.json"
+depth_gt_info_tpath = (
+ chunk_dir / "bop_data/train_pbr/{scene_id:06d}/depth/{im_id:06d}.png"
+)
+vis_mask_visib_tpath = (
+ chunk_dir
+ / "bop_data/train_pbr/{scene_id:06d}/mask_visib/{im_id:06d}_{inst_id:06d}.png"
+)
if args.shapenet_dir:
shapenet_dir = Path(args.shapenet_dir)
@@ -39,81 +40,100 @@
else:
is_shapenet = False
gso_dir = Path(args.gso_dir)
-scale = chunk_infos['scale']
+scale = chunk_infos["scale"]
-p = dict(
- dataset=chunk_dir,
- dataset_split='train_pbr',
- dataset_split_type='train_pbr',
+p = {
+ "dataset": chunk_dir,
+ "dataset_split": "train_pbr",
+ "dataset_split_type": "train_pbr",
# renderer_type='python',
- delta=15,
-)
-p['renderer_type'] = args.renderer_type
+ "delta": 15,
+}
+p["renderer_type"] = args.renderer_type
# Initialize a renderer.
-im_width, im_height = cam_infos['width'], cam_infos['height']
+im_width, im_height = cam_infos["width"], cam_infos["height"]
ren_width, ren_height = 3 * im_width, 3 * im_height
ren_cx_offset, ren_cy_offset = im_width, im_height
large_ren = renderer.create_renderer(
- ren_width, ren_height, p['renderer_type'],
- mode='depth')
+ ren_width,
+ ren_height,
+ p["renderer_type"],
+ mode="depth",
+)
-misc.log('Initializing renderer...')
-obj_name_to_id = dict()
-for obj_id, obj in enumerate(chunk_infos['scene_infos']['objects']):
+misc.log("Initializing renderer...")
+obj_name_to_id = {}
+for obj_id, obj in enumerate(chunk_infos["scene_infos"]["objects"]):
if is_shapenet:
- synset_id, source_id = obj['synset_id'], obj['source_id']
- obj_name = obj['category_id']
- ply_path = Path(shapenet_dir) / f'{synset_id}/{source_id}' / 'models/model_normalized_scaled.ply'
+ synset_id, source_id = obj["synset_id"], obj["source_id"]
+ obj_name = obj["category_id"]
+ ply_path = (
+ Path(shapenet_dir)
+ / f"{synset_id}/{source_id}"
+ / "models/model_normalized_scaled.ply"
+ )
else:
- obj_name = obj['category_id']
- gso_id = obj_name.split('gso_')[1]
- ply_path = Path(gso_dir) / f'{gso_id}' / 'meshes/model.ply'
+ obj_name = obj["category_id"]
+ gso_id = obj_name.split("gso_")[1]
+ ply_path = Path(gso_dir) / f"{gso_id}" / "meshes/model.ply"
obj_name_to_id[obj_name] = obj_id
large_ren.add_object(obj_id, str(ply_path))
scene_ids = [0]
-misc.log(f'Processing scene ids: {scene_ids}')
+misc.log(f"Processing scene ids: {scene_ids}")
for scene_id in scene_ids:
# Load scene info and ground-truth poses.
- scene_dir = chunk_dir / f'bop_data/train_pbr/{scene_id:06d}'
- scene_camera = inout.load_scene_camera(scene_dir / 'scene_camera.json')
+ scene_dir = chunk_dir / f"bop_data/train_pbr/{scene_id:06d}"
+ scene_camera = inout.load_scene_camera(scene_dir / "scene_camera.json")
scene_gt = inout.load_scene_gt(str(scene_gt_tpath).format(scene_id=scene_id))
scene_gt_info = {}
im_ids = sorted(scene_gt.keys())
for im_counter, im_id in enumerate(im_ids):
- depth_path = str(scene_dir / f'depth/{im_id:06d}.png')
- K = scene_camera[im_id]['cam_K']
+ depth_path = str(scene_dir / f"depth/{im_id:06d}.png")
+ K = scene_camera[im_id]["cam_K"]
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
# Load depth image.
depth_im = inout.load_depth(depth_path)
- depth_im *= scene_camera[im_id]['depth_scale'] # to [mm]
+ depth_im *= scene_camera[im_id]["depth_scale"] # to [mm]
dist_im = misc.depth_im_to_dist_im_fast(depth_im, K)
im_size = (depth_im.shape[1], depth_im.shape[0])
-
# Calc gt info
if im_counter % 5 == 0:
misc.log(
- 'Calculating GT info - dataset: {} ({}, {}), scene: {}, im: {}'.format(
- p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id, im_id))
+ "Calculating GT info - dataset: {} ({}, {}), scene: {}, im: {}".format(
+ p["dataset"],
+ p["dataset_split"],
+ p["dataset_split_type"],
+ scene_id,
+ im_id,
+ ),
+ )
scene_gt_info[im_id] = []
- for gt_id, gt in enumerate(scene_gt[im_id]):
- if gt['obj_id'] not in obj_name_to_id:
+ for _gt_id, gt in enumerate(scene_gt[im_id]):
+ if gt["obj_id"] not in obj_name_to_id:
continue
# Render depth image of the object model in the ground-truth pose.
depth_gt_large = large_ren.render_object(
- obj_name_to_id[gt['obj_id']], gt['cam_R_m2c'], gt['cam_t_m2c'],
- fx, fy, cx + ren_cx_offset, cy + ren_cy_offset)['depth']
+ obj_name_to_id[gt["obj_id"]],
+ gt["cam_R_m2c"],
+ gt["cam_t_m2c"],
+ fx,
+ fy,
+ cx + ren_cx_offset,
+ cy + ren_cy_offset,
+ )["depth"]
depth_gt = depth_gt_large[
- ren_cy_offset:(ren_cy_offset + im_height),
- ren_cx_offset:(ren_cx_offset + im_width)]
+ ren_cy_offset : (ren_cy_offset + im_height),
+ ren_cx_offset : (ren_cx_offset + im_width),
+ ]
# Convert depth images to distance images.
# dist_gt = misc.depth_im_to_dist_im(depth_gt, K)
@@ -124,7 +144,11 @@
# Estimation of the visibility mask.
visib_gt = visibility.estimate_visib_mask_gt(
- dist_im, dist_gt, p['delta'], visib_mode='bop19')
+ dist_im,
+ dist_gt,
+ p["delta"],
+ visib_mode="bop19",
+ )
# Mask of the object in the GT pose.
obj_mask_gt_large = depth_gt_large > 0
@@ -163,14 +187,16 @@
bbox_visib = misc.calc_2d_bbox(xs, ys, im_size)
# Store the calculated info.
- scene_gt_info[im_id].append({
- 'px_count_all': int(px_count_all),
- 'px_count_valid': int(px_count_valid),
- 'px_count_visib': int(px_count_visib),
- 'visib_fract': float(visib_fract),
- 'bbox_obj': [int(e) for e in bbox],
- 'bbox_visib': [int(e) for e in bbox_visib]
- })
+ scene_gt_info[im_id].append(
+ {
+ "px_count_all": int(px_count_all),
+ "px_count_valid": int(px_count_valid),
+ "px_count_visib": int(px_count_visib),
+ "visib_fract": float(visib_fract),
+ "bbox_obj": [int(e) for e in bbox],
+ "bbox_visib": [int(e) for e in bbox_visib],
+ },
+ )
# Save the info for the current scene.
scene_gt_info_path = str(scene_gt_info_tpath).format(scene_id=scene_id)
misc.ensure_dir(os.path.dirname(scene_gt_info_path))
diff --git a/happypose/pose_estimators/megapose/scripts/bop_calc_masks.py b/happypose/pose_estimators/megapose/scripts/bop_calc_masks.py
index 73410480..1f51d471 100644
--- a/happypose/pose_estimators/megapose/scripts/bop_calc_masks.py
+++ b/happypose/pose_estimators/megapose/scripts/bop_calc_masks.py
@@ -1,32 +1,32 @@
import argparse
-import glob
import json
-import os
-import sys
-from copy import deepcopy
from pathlib import Path
import numpy as np
-import trimesh
from bop_toolkit_lib import inout, misc, renderer, visibility
parser = argparse.ArgumentParser()
-parser.add_argument('--chunk-dir', type=str)
-parser.add_argument('--shapenet-dir', type=str)
-parser.add_argument('--gso-dir', type=str)
-parser.add_argument('--renderer-type', type=str, default='cpp')
-parser.add_argument('--overwrite-models', action='store_true')
+parser.add_argument("--chunk-dir", type=str)
+parser.add_argument("--shapenet-dir", type=str)
+parser.add_argument("--gso-dir", type=str)
+parser.add_argument("--renderer-type", type=str, default="cpp")
+parser.add_argument("--overwrite-models", action="store_true")
args = parser.parse_args()
chunk_dir = Path(args.chunk_dir)
-chunk_infos = json.loads((chunk_dir / 'chunk_infos.json').read_text())
-cam_infos_path = (chunk_dir / 'bop_data/camera.json')
+chunk_infos = json.loads((chunk_dir / "chunk_infos.json").read_text())
+cam_infos_path = chunk_dir / "bop_data/camera.json"
cam_infos = json.loads(cam_infos_path.read_text())
-scene_gt_tpath = (chunk_dir / 'bop_data/train_pbr/{scene_id:06d}/scene_gt.json')
-scene_gt_info_tpath = (chunk_dir / 'bop_data/train_pbr/{scene_id:06d}/scene_gt_info.json')
-depth_gt_info_tpath = (chunk_dir / 'bop_data/train_pbr/{scene_id:06d}/depth/{im_id:06d}.png')
-vis_mask_visib_tpath = (chunk_dir / 'bop_data/train_pbr/{scene_id:06d}/mask_visib/{im_id:06d}_{inst_id:06d}.png')
+scene_gt_tpath = chunk_dir / "bop_data/train_pbr/{scene_id:06d}/scene_gt.json"
+scene_gt_info_tpath = chunk_dir / "bop_data/train_pbr/{scene_id:06d}/scene_gt_info.json"
+depth_gt_info_tpath = (
+ chunk_dir / "bop_data/train_pbr/{scene_id:06d}/depth/{im_id:06d}.png"
+)
+vis_mask_visib_tpath = (
+ chunk_dir
+ / "bop_data/train_pbr/{scene_id:06d}/mask_visib/{im_id:06d}_{inst_id:06d}.png"
+)
if args.shapenet_dir:
shapenet_dir = Path(args.shapenet_dir)
@@ -34,82 +34,96 @@
else:
is_shapenet = False
gso_dir = Path(args.gso_dir)
-scale = chunk_infos['scale']
+scale = chunk_infos["scale"]
-p = dict(
- dataset=chunk_dir,
- dataset_split='train_pbr',
- dataset_split_type='train_pbr',
+p = {
+ "dataset": chunk_dir,
+ "dataset_split": "train_pbr",
+ "dataset_split_type": "train_pbr",
# renderer_type='python',
- delta=15,
-)
-p['renderer_type'] = args.renderer_type
+ "delta": 15,
+}
+p["renderer_type"] = args.renderer_type
# Initialize a renderer.
-im_width, im_height = cam_infos['width'], cam_infos['height']
+im_width, im_height = cam_infos["width"], cam_infos["height"]
ren_width, ren_height = 3 * im_width, 3 * im_height
ren_cx_offset, ren_cy_offset = im_width, im_height
-ren = renderer.create_renderer(
- im_width, im_height, p['renderer_type'],
- mode='depth')
+ren = renderer.create_renderer(im_width, im_height, p["renderer_type"], mode="depth")
-misc.log('Initializing renderer...')
-obj_name_to_id = dict()
-for obj_id, obj in enumerate(chunk_infos['scene_infos']['objects']):
+misc.log("Initializing renderer...")
+obj_name_to_id = {}
+for obj_id, obj in enumerate(chunk_infos["scene_infos"]["objects"]):
if is_shapenet:
- synset_id, source_id = obj['synset_id'], obj['source_id']
- obj_name = obj['category_id']
- ply_path = Path(shapenet_dir) / f'{synset_id}/{source_id}' / 'models/model_normalized_scaled.ply'
+ synset_id, source_id = obj["synset_id"], obj["source_id"]
+ obj_name = obj["category_id"]
+ ply_path = (
+ Path(shapenet_dir)
+ / f"{synset_id}/{source_id}"
+ / "models/model_normalized_scaled.ply"
+ )
else:
- obj_name = obj['category_id']
- gso_id = obj_name.split('gso_')[1]
- ply_path = Path(gso_dir) / f'{gso_id}' / 'meshes/model.ply'
+ obj_name = obj["category_id"]
+ gso_id = obj_name.split("gso_")[1]
+ ply_path = Path(gso_dir) / f"{gso_id}" / "meshes/model.ply"
obj_name_to_id[obj_name] = obj_id
ren.add_object(obj_id, str(ply_path))
scene_ids = [0]
-misc.log(f'Processing scene ids: {scene_ids}')
+misc.log(f"Processing scene ids: {scene_ids}")
for scene_id in scene_ids:
# Load scene info and ground-truth poses.
- scene_dir = chunk_dir / f'bop_data/train_pbr/{scene_id:06d}'
- scene_camera = inout.load_scene_camera(scene_dir / 'scene_camera.json')
+ scene_dir = chunk_dir / f"bop_data/train_pbr/{scene_id:06d}"
+ scene_camera = inout.load_scene_camera(scene_dir / "scene_camera.json")
scene_gt = inout.load_scene_gt(str(scene_gt_tpath).format(scene_id=scene_id))
- mask_dir_path = str(scene_dir / 'mask')
+ mask_dir_path = str(scene_dir / "mask")
misc.ensure_dir(mask_dir_path)
- mask_visib_dir_path = str(scene_dir / 'mask_visib')
+ mask_visib_dir_path = str(scene_dir / "mask_visib")
misc.ensure_dir(mask_visib_dir_path)
scene_gt_info = {}
im_ids = sorted(scene_gt.keys())
for im_counter, im_id in enumerate(im_ids):
- depth_path = str(scene_dir / f'depth/{im_id:06d}.png')
+ depth_path = str(scene_dir / f"depth/{im_id:06d}.png")
# 1. Calc masks
if im_counter % 5 == 0:
misc.log(
- 'Calculating masks - dataset: {} ({}, {}), scene: {}, im: {}'.format(
- p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id, im_id))
-
- K = scene_camera[im_id]['cam_K']
+ "Calculating masks - dataset: {} ({}, {}), scene: {}, im: {}".format(
+ p["dataset"],
+ p["dataset_split"],
+ p["dataset_split_type"],
+ scene_id,
+ im_id,
+ ),
+ )
+
+ K = scene_camera[im_id]["cam_K"]
fx, fy, cx, cy = K[0, 0], K[1, 1], K[0, 2], K[1, 2]
# Load depth image.
depth_im = inout.load_depth(depth_path)
- depth_im *= scene_camera[im_id]['depth_scale'] # to [mm]
+ depth_im *= scene_camera[im_id]["depth_scale"] # to [mm]
dist_im = misc.depth_im_to_dist_im_fast(depth_im, K)
im_size = (depth_im.shape[1], depth_im.shape[0])
for gt_id, gt in enumerate(scene_gt[im_id]):
- if gt['obj_id'] not in obj_name_to_id:
+ if gt["obj_id"] not in obj_name_to_id:
continue
# Render the depth image
depth_gt = ren.render_object(
- obj_name_to_id[gt['obj_id']], gt['cam_R_m2c'], gt['cam_t_m2c'],
- fx, fy, cx, cy)['depth']
+ obj_name_to_id[gt["obj_id"]],
+ gt["cam_R_m2c"],
+ gt["cam_t_m2c"],
+ fx,
+ fy,
+ cx,
+ cy,
+ )["depth"]
# Convert depth image to distance image.
dist_gt = misc.depth_im_to_dist_im_fast(depth_gt, K)
@@ -119,11 +133,17 @@
# Mask of the visible part of the object silhouette.
mask_visib = visibility.estimate_visib_mask_gt(
- dist_im, dist_gt, p['delta'], visib_mode='bop19')
+ dist_im,
+ dist_gt,
+ p["delta"],
+ visib_mode="bop19",
+ )
# Save the calculated masks.
- mask_path = str(Path(mask_dir_path) / f'{im_id:06d}_{gt_id:06d}.png')
+ mask_path = str(Path(mask_dir_path) / f"{im_id:06d}_{gt_id:06d}.png")
inout.save_im(mask_path, 255 * mask.astype(np.uint8))
- mask_visib_path = str(Path(mask_visib_dir_path) / f'{im_id:06d}_{gt_id:06d}.png')
+ mask_visib_path = str(
+ Path(mask_visib_dir_path) / f"{im_id:06d}_{gt_id:06d}.png",
+ )
inout.save_im(mask_visib_path, 255 * mask_visib.astype(np.uint8))
diff --git a/happypose/pose_estimators/megapose/scripts/distributed.py b/happypose/pose_estimators/megapose/scripts/distributed.py
index c99561aa..8333c67f 100644
--- a/happypose/pose_estimators/megapose/scripts/distributed.py
+++ b/happypose/pose_estimators/megapose/scripts/distributed.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
import datetime
import os
@@ -126,8 +124,7 @@ def init_distributed_mode():
def reduce_dict(input_dict, average=True):
- """
- https://github.com/pytorch/vision/blob/master/references/detection/utils.py
+ """https://github.com/pytorch/vision/blob/master/references/detection/utils.py
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
diff --git a/happypose/pose_estimators/megapose/scripts/download.py b/happypose/pose_estimators/megapose/scripts/download.py
deleted file mode 100644
index 64630214..00000000
--- a/happypose/pose_estimators/megapose/scripts/download.py
+++ /dev/null
@@ -1,266 +0,0 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-
-
-# Standard Library
-import argparse
-import logging
-import subprocess
-import zipfile
-from pathlib import Path
-
-# Third Party
-import wget
-
-# MegaPose
-from happypose.pose_estimators.megapose.bop_config import (
- PBR_COARSE,
- PBR_DETECTORS,
- PBR_REFINER,
- SYNT_REAL_COARSE,
- SYNT_REAL_DETECTORS,
- SYNT_REAL_REFINER,
-)
-from happypose.pose_estimators.megapose.config import BOP_DS_DIR, LOCAL_DATA_DIR, PROJECT_DIR
-from happypose.toolbox.utils.logging import get_logger
-
-logger = get_logger(__name__)
-
-RCLONE_CFG_PATH = PROJECT_DIR / "megapose" / "rclone.conf"
-RCLONE_ROOT = "megapose:"
-DOWNLOAD_DIR = LOCAL_DATA_DIR / "downloads"
-DOWNLOAD_DIR.mkdir(exist_ok=True)
-
-BOP_SRC = "https://bop.felk.cvut.cz/media/data/bop_datasets/"
-BOP_DATASETS = {
- "hope": {"test_splits": ["test_all", "val"]},
- "ycbv": {
- "test_splits": ["test_all"],
- "train_splits": ["train_real", "train_synt"],
- },
- "tless": {
- "test_splits": [
- "test_primesense_all",
- ],
- "train_splits": [
- "train_primesense",
- ],
- },
- "hb": {
- "test_splits": ["test_primesense_all", "val_primesense"],
- },
- "icbin": {
- "test_splits": ["test_all"],
- },
- "itodd": {
- "test_splits": ["val", "test_all"],
- },
- "lm": {
- "test_splits": ["test_all"],
- },
- "lmo": {
- "test_splits": ["test_all"],
- "has_pbr": False,
- },
- "tudl": {
- "test_splits": [
- "test_all",
- ],
- "train_splits": [
- "train_real",
- ],
- },
- "ruapc": {
- "test_splits": [
- "test_all",
- ]
- },
- "tyol": {
- "test_splits": [
- "test_all",
- ]
- },
-}
-
-BOP_DS_NAMES = list(BOP_DATASETS.keys())
-
-
-def main():
- parser = argparse.ArgumentParser("Megapose download utility")
- parser.add_argument("--bop_dataset", default="", type=str, choices=BOP_DS_NAMES)
- parser.add_argument("--bop_src", default="bop", type=str, choices=["bop", "gdrive"])
- parser.add_argument("--bop_extra_files", default="", type=str, choices=["ycbv", "tless"])
- parser.add_argument("--model", default="", type=str)
- parser.add_argument("--urdf_models", default="", type=str)
- parser.add_argument("--ycbv_compat_models", action="store_true")
- parser.add_argument("--texture_dataset", action="store_true")
- parser.add_argument("--result_id", default="", type=str)
- parser.add_argument("--bop_result_id", default="", type=str)
- parser.add_argument("--synt_dataset", default="", type=str)
- parser.add_argument("--detections", default="", type=str)
- parser.add_argument("--pbr_training_images", action="store_true")
- parser.add_argument("--train_splits", action="store_true")
- parser.add_argument("--all_bop20_results", action="store_true")
- parser.add_argument("--all_bop20_models", action="store_true")
-
- parser.add_argument("--debug", action="store_true")
- args = parser.parse_args()
- if args.debug:
- logger.setLevel(logging.DEBUG)
-
- if args.bop_dataset:
- if args.bop_src == "bop":
- download_bop_original(
- args.bop_dataset,
- args.pbr_training_images and BOP_DATASETS[args.bop_dataset].get("has_pbr", True),
- args.train_splits,
- )
- elif args.bop_src == "gdrive":
- download_bop_gdrive(args.bop_dataset)
-
- if args.bop_extra_files:
- if args.bop_extra_files == "tless":
- # https://github.com/kirumang/Pix2Pose#download-pre-trained-weights
- gdrive_download(f"bop_datasets/tless/all_target_tless.json", BOP_DS_DIR / "tless")
- elif args.bop_extra_files == "ycbv":
- # Friendly names used with YCB-Video
- gdrive_download(f"bop_datasets/ycbv/ycbv_friendly_names.txt", BOP_DS_DIR / "ycbv")
- # Offsets between YCB-Video and BOP (extracted from BOP readme)
- gdrive_download(f"bop_datasets/ycbv/offsets.txt", BOP_DS_DIR / "ycbv")
- # Evaluation models for YCB-Video (used by other works)
- gdrive_download(f"bop_datasets/ycbv/models_original", BOP_DS_DIR / "ycbv")
- # Keyframe definition
- gdrive_download(f"bop_datasets/ycbv/keyframe.txt", BOP_DS_DIR / "ycbv")
-
- if args.urdf_models:
- gdrive_download(f"urdfs/{args.urdf_models}", LOCAL_DATA_DIR / "urdfs")
-
- if args.ycbv_compat_models:
- gdrive_download(f"bop_datasets/ycbv/models_bop-compat", BOP_DS_DIR / "ycbv")
- gdrive_download(f"bop_datasets/ycbv/models_bop-compat_eval", BOP_DS_DIR / "ycbv")
-
- if args.model:
- gdrive_download(f"experiments/{args.model}", LOCAL_DATA_DIR / "experiments")
-
- if args.detections:
- gdrive_download(
- f"saved_detections/{args.detections}.pkl", LOCAL_DATA_DIR / "saved_detections"
- )
-
- if args.result_id:
- gdrive_download(f"results/{args.result_id}", LOCAL_DATA_DIR / "results")
-
- if args.bop_result_id:
- csv_name = args.bop_result_id + ".csv"
- gdrive_download(f"bop_predictions/{csv_name}", LOCAL_DATA_DIR / "bop_predictions")
- gdrive_download(
- f"bop_eval_outputs/{args.bop_result_id}", LOCAL_DATA_DIR / "bop_predictions"
- )
-
- if args.texture_dataset:
- gdrive_download("zip_files/textures.zip", DOWNLOAD_DIR)
- logger.info("Extracting textures ...")
- zipfile.ZipFile(DOWNLOAD_DIR / "textures.zip").extractall(
- LOCAL_DATA_DIR / "texture_datasets"
- )
-
- if args.synt_dataset:
- zip_name = f"{args.synt_dataset}.zip"
- gdrive_download(f"zip_files/{zip_name}", DOWNLOAD_DIR)
- logger.info("Extracting textures ...")
- zipfile.ZipFile(DOWNLOAD_DIR / zip_name).extractall(LOCAL_DATA_DIR / "synt_datasets")
-
- if args.all_bop20_models:
- for model_dict in (
- PBR_DETECTORS,
- PBR_COARSE,
- PBR_REFINER,
- SYNT_REAL_DETECTORS,
- SYNT_REAL_COARSE,
- SYNT_REAL_REFINER,
- ):
- for model in model_dict.values():
- gdrive_download(f"experiments/{model}", LOCAL_DATA_DIR / "experiments")
-
- if args.all_bop20_results:
- # SRL
- # Third Party
- from nerfpose.bop_config import (
- PBR_INFERENCE_ID,
- SYNT_REAL_4VIEWS_INFERENCE_ID,
- SYNT_REAL_8VIEWS_INFERENCE_ID,
- SYNT_REAL_ICP_INFERENCE_ID,
- SYNT_REAL_INFERENCE_ID,
- )
-
- for result_id in (
- PBR_INFERENCE_ID,
- SYNT_REAL_INFERENCE_ID,
- SYNT_REAL_ICP_INFERENCE_ID,
- SYNT_REAL_4VIEWS_INFERENCE_ID,
- SYNT_REAL_8VIEWS_INFERENCE_ID,
- ):
- gdrive_download(f"results/{result_id}", LOCAL_DATA_DIR / "results")
-
-
-def run_rclone(cmd, args, flags):
- rclone_cmd = ["rclone", cmd] + args + flags + ["--config", str(RCLONE_CFG_PATH)]
- logger.debug(" ".join(rclone_cmd))
- subprocess.run(rclone_cmd)
-
-
-def gdrive_download(gdrive_path, local_path):
- gdrive_path = Path(gdrive_path)
- if gdrive_path.name != local_path.name:
- local_path = local_path / gdrive_path.name
- rclone_path = RCLONE_ROOT + str(gdrive_path)
- local_path = str(local_path)
- logger.info(f"Copying {rclone_path} to {local_path}")
- run_rclone("copyto", [rclone_path, local_path], flags=["-P"])
-
-
-def download_bop_original(ds_name, download_pbr, download_train):
- filename = f"{ds_name}_base.zip"
- wget_download_and_extract(BOP_SRC + filename, BOP_DS_DIR)
-
- suffixes = ["models"] + BOP_DATASETS[ds_name]["test_splits"]
- if download_pbr:
- suffixes += ["train_pbr"]
- if download_train:
- suffixes += BOP_DATASETS[ds_name].get("train_splits", [])
- for suffix in suffixes:
- wget_download_and_extract(BOP_SRC + f"{ds_name}_{suffix}.zip", BOP_DS_DIR / ds_name)
-
-
-def download_bop_gdrive(ds_name):
- gdrive_download(f"bop_datasets/{ds_name}", BOP_DS_DIR / ds_name)
-
-
-def wget_download_and_extract(url, out):
- tmp_path = DOWNLOAD_DIR / url.split("/")[-1]
- if tmp_path.exists():
- logger.info(f"{url} already downloaded: {tmp_path}...")
- else:
- logger.info(f"Download {url} at {tmp_path}...")
- wget.download(url, out=tmp_path.as_posix())
- logger.info(f"Extracting {tmp_path} at {out}.")
- zipfile.ZipFile(tmp_path).extractall(out)
-
-
-if __name__ == "__main__":
- loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
- main()
diff --git a/happypose/pose_estimators/megapose/scripts/generate_shapenet_pbr.py b/happypose/pose_estimators/megapose/scripts/generate_shapenet_pbr.py
index 8ef29a0e..fc2a9df4 100644
--- a/happypose/pose_estimators/megapose/scripts/generate_shapenet_pbr.py
+++ b/happypose/pose_estimators/megapose/scripts/generate_shapenet_pbr.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -20,14 +19,14 @@
import os
import shutil
import subprocess
-from copy import deepcopy
from pathlib import Path
-from re import I
# Third Party
import numpy as np
import torch.distributed as dist
-import yaml
+from bop_toolkit_lib.dataset.convert_scenewise_to_imagewise import (
+ convert_scene_to_imagewise,
+)
from colorama import Fore, Style
from omegaconf import OmegaConf
from tqdm import tqdm
@@ -36,10 +35,8 @@
from happypose.pose_estimators.megapose.config import (
BLENDER_INSTALL_DIR,
BLENDERPROC_DIR,
- BOP_TOOLKIT_DIR,
GSO_DIR,
GSO_NORMALIZED_DIR,
- GSO_ORIG_DIR,
LOCAL_DATA_DIR,
MEMORY,
PROJECT_DIR,
@@ -48,35 +45,25 @@
)
# from happypose.toolbox.datasets.bop import BOPDataset
-from happypose.toolbox.datasets.gso_dataset import (
- GoogleScannedObjectDataset,
- make_gso_infos,
-)
+from happypose.toolbox.datasets.gso_dataset import make_gso_infos
# from happypose.toolbox.datasets.hdf5_scene_dataset import write_scene_ds_as_hdf5
from happypose.toolbox.datasets.shapenet_object_dataset import (
ShapeNetObjectDataset,
make_shapenet_infos,
)
-from happypose.toolbox.datasets.web_scene_dataset import write_scene_ds_as_wds
-from happypose.toolbox.utils.distributed import (
- get_rank,
- get_tmp_dir,
- init_distributed_mode,
-)
+from happypose.toolbox.utils.distributed import get_rank, init_distributed_mode
from happypose.toolbox.utils.logging import get_logger
-from bop_toolkit_lib.dataset.convert_scenewise_to_imagewise import convert_scene_to_imagewise
logger = get_logger(__name__)
CC_TEXTURE_FOLDER = str(LOCAL_DATA_DIR / "cctextures")
VERBOSE_KWARGS = {
- True: dict(stdout=None, stderr=None),
- False: dict(stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL),
+ True: {"stdout": None, "stderr": None},
+ False: {"stdout": subprocess.DEVNULL, "stderr": subprocess.DEVNULL},
}
SHAPENET_ORIG_DIR = SHAPENET_DIR / "models_orig"
SHAPENET_SCALED_DIR = SHAPENET_DIR / "models_bop-renderer_scale=0.1"
-GSO_ORIG_DIR = GSO_DIR / "models_orig"
GSO_SCALED_DIR = GSO_DIR / "models_bop-renderer_scale=0.1"
@@ -105,7 +92,7 @@ def make_initializer(output_dir):
"config": {
"global": {
"output_dir": str(output_dir),
- }
+ },
},
}
@@ -155,7 +142,7 @@ def make_box_scene(used_assets=[]):
"location": [0, 0, 10],
"scale": [3, 3, 1],
},
- ]
+ ],
},
},
{
@@ -171,7 +158,12 @@ def make_box_scene(used_assets=[]):
"min": [0.5, 0.5, 0.5, 1.0],
"max": [1.0, 1.0, 1.0, 1.0],
},
- "strength": {"provider": "sampler.Value", "type": "float", "min": 3, "max": 6},
+ "strength": {
+ "provider": "sampler.Value",
+ "type": "float",
+ "min": 3,
+ "max": 6,
+ },
},
},
},
@@ -185,7 +177,10 @@ def make_box_scene(used_assets=[]):
{
"module": "manipulators.EntityManipulator",
"config": {
- "selector": {"provider": "getter.Entity", "conditions": {"name": "ground_plane.*"}},
+ "selector": {
+ "provider": "getter.Entity",
+ "conditions": {"name": "ground_plane.*"},
+ },
"mode": "once_for_all",
"cf_randomize_materials": {
"randomization_level": 1,
@@ -200,7 +195,10 @@ def make_box_scene(used_assets=[]):
{
"module": "manipulators.EntityManipulator",
"config": {
- "selector": {"provider": "getter.Entity", "conditions": {"name": ".*plane.*"}},
+ "selector": {
+ "provider": "getter.Entity",
+ "conditions": {"name": ".*plane.*"},
+ },
"cp_physics": False,
"cp_physics_collision_shape": "BOX",
"cp_category_id": 333,
@@ -230,7 +228,8 @@ def make_shapenet_loader(synset_id, category_id, source_id=None, scale=None):
{
"module": "manipulators.EntityManipulator",
"config": {
- # get all shape net objects, as we have only loaded one, this returns only one entity
+ # get all shape net objects, as we have only loaded one, this returns
+ # only one entity
"selector": {
"provider": "getter.Entity",
"conditions": {
@@ -269,7 +268,8 @@ def make_gso_loader(obj_id, category_id, scale=None):
{
"module": "manipulators.EntityManipulator",
"config": {
- # get all shape net objects, as we have only loaded one, this returns only one entity
+ # get all shape net objects, as we have only loaded one, this returns
+ # only one entity
"selector": {
"provider": "getter.Entity",
"conditions": {
@@ -309,7 +309,10 @@ def make_object_pose_sampler():
object_pose_sampler = {
"module": "object.ObjectPoseSampler",
"config": {
- "objects_to_sample": {"provider": "getter.Entity", "conditions": {"cp_physics": True}},
+ "objects_to_sample": {
+ "provider": "getter.Entity",
+ "conditions": {"cp_physics": True},
+ },
"pos_sampler": {
"provider": "sampler.Uniform3d",
"min": {
@@ -338,8 +341,10 @@ def make_light_sampler(radius_min=1, radius_max=1.5, energy=100):
"location": {
"provider": "sampler.Shell",
"center": [0, 0, 0],
- "radius_min": radius_min, # now depends on the bottom area of the box
- "radius_max": radius_max, # this one too
+ # now depends on the bottom area of the box
+ "radius_min": radius_min,
+ # this one too
+ "radius_max": radius_max,
"elevation_min": 5,
"elevation_max": 89,
"uniform_elevation": True,
@@ -351,8 +356,8 @@ def make_light_sampler(radius_min=1, radius_max=1.5, energy=100):
},
"type": "POINT",
"energy": 100,
- }
- ]
+ },
+ ],
},
}
return light_sampler
@@ -430,7 +435,7 @@ def make_camera_sampler(cam_intrinsics, num_samples=25, radius_min=0.4, radius_m
"max": 3.14159,
},
},
- }
+ },
],
},
}
@@ -452,7 +457,9 @@ def make_writer(depth_scale=0.1, ignore_dist_thresh=5.0):
"append_to_existing_output": False,
"depth_scale": depth_scale,
"ignore_dist_thres": ignore_dist_thresh,
- "postprocessing_modules": {"distance": [{"module": "postprocessing.Dist2Depth"}]},
+ "postprocessing_modules": {
+ "distance": [{"module": "postprocessing.Dist2Depth"}],
+ },
},
}
@@ -470,9 +477,9 @@ def make_script(output_dir, objects, textures, cfg, seed):
[fx, 0, cx],
[0, fy, cy],
[0, 0, 1],
- ]
+ ],
).tolist()
- intrinsics = dict(cam_K=K, resolution_x=w, resolution_y=h)
+ intrinsics = {"cam_K": K, "resolution_x": w, "resolution_y": h}
modules = [
make_initializer(output_dir),
@@ -489,7 +496,9 @@ def make_script(output_dir, objects, textures, cfg, seed):
)
elif obj["category_id"].startswith("gso"):
modules += make_gso_loader(
- obj_id=obj["obj_id"], scale=obj["scale"], category_id=obj["category_id"]
+ obj_id=obj["obj_id"],
+ scale=obj["scale"],
+ category_id=obj["category_id"],
)
else:
raise ValueError(obj)
@@ -498,7 +507,10 @@ def make_script(output_dir, objects, textures, cfg, seed):
make_material_randomization(),
make_object_pose_sampler(),
make_physics_positioning(),
- make_light_sampler(radius_min=cfg.light_radius_min, radius_max=cfg.light_radius_max),
+ make_light_sampler(
+ radius_min=cfg.light_radius_min,
+ radius_max=cfg.light_radius_max,
+ ),
make_camera_sampler(
cam_intrinsics=intrinsics,
num_samples=cfg.camera_num_samples_per_chunk,
@@ -523,7 +535,9 @@ def run_script(script, script_path, verbose=True):
env["BLENDER_PROC_RANDOM_SEED"] = str(seed)
run_path = BLENDERPROC_DIR / "run.py"
subprocess.run(
- [str(PYTHON_BIN_PATH), str(run_path), str(script_path)], env=env, **VERBOSE_KWARGS[verbose]
+ [str(PYTHON_BIN_PATH), str(run_path), str(script_path)],
+ env=env,
+ **VERBOSE_KWARGS[verbose],
)
return
@@ -531,7 +545,9 @@ def run_script(script, script_path, verbose=True):
@MEMORY.cache
def load_textures_names():
texture_names = [
- p.name for p in Path(CC_TEXTURE_FOLDER).iterdir() if len(list(p.glob("*2K_Color.jpg"))) > 0
+ p.name
+ for p in Path(CC_TEXTURE_FOLDER).iterdir()
+ if len(list(p.glob("*2K_Color.jpg"))) > 0
]
return texture_names
@@ -546,26 +562,26 @@ def make_one_scene_script(cfg, output_dir, seed):
if len(synset.parents) == 0 and len(synset.models_descendants) > 0
]
objects = []
- for n in range(cfg.n_objects):
+ for _n in range(cfg.n_objects):
synset = np_random.choice(main_synsets)
source_id = np_random.choice(synset.models_descendants)
- obj = dict(
- synset_id=synset.synset_id,
- source_id=source_id,
- category_id=f"shapenet_{synset.synset_id}_{source_id}",
- scale=[cfg.scale, cfg.scale, cfg.scale],
- )
+ obj = {
+ "synset_id": synset.synset_id,
+ "source_id": source_id,
+ "category_id": f"shapenet_{synset.synset_id}_{source_id}",
+ "scale": [cfg.scale, cfg.scale, cfg.scale],
+ }
objects.append(obj)
elif cfg.model_type == "gso":
object_ids = make_gso_infos(GSO_NORMALIZED_DIR)
objects = []
- for n in range(cfg.n_objects):
+ for _n in range(cfg.n_objects):
obj_id = np_random.choice(object_ids)
- obj = dict(
- obj_id=obj_id,
- category_id=f"gso_{obj_id}",
- scale=[cfg.scale, cfg.scale, cfg.scale],
- )
+ obj = {
+ "obj_id": obj_id,
+ "category_id": f"gso_{obj_id}",
+ "scale": [cfg.scale, cfg.scale, cfg.scale],
+ }
objects.append(obj)
else:
raise ValueError(cfg.model_type)
@@ -574,12 +590,15 @@ def make_one_scene_script(cfg, output_dir, seed):
this_scene_floor_textures = [np_random.choice(textures)]
script = make_script(output_dir, objects, this_scene_floor_textures, cfg, seed)
script["seed"] = seed
- scene_infos = dict(objects=objects, floor_textures=this_scene_floor_textures, seed=seed)
+ scene_infos = {
+ "objects": objects,
+ "floor_textures": this_scene_floor_textures,
+ "seed": seed,
+ }
return scene_infos, script
def make_masks_and_gt_infos(chunk_dir, is_shapenet=True, verbose=True):
- bop_toolkit_dir = BOP_TOOLKIT_DIR
env = os.environ.copy()
# env["PYTHONPATH"] = env.get("PYTHONPATH", "") + ":" + str(bop_toolkit_dir)
# env["COSYPOSE_DIR"] = str(PROJECT_DIR)
@@ -630,7 +649,7 @@ def make_dataset_cfg(cfg):
cfg.n_scenes = 2
- cfg.hardware = dict()
+ cfg.hardware = {}
cfg.hardware.world_size = int(os.environ.get("WORLD_SIZE", 1))
cfg.hardware.rank = int(os.environ.get("RANK", 0))
cfg.hardware.n_proc_per_gpu = 3
@@ -667,14 +686,20 @@ def make_dataset_cfg(cfg):
if cfg.resume_dataset is not None:
logger.info(f"{Fore.RED}Resuming {cfg.resume_dataset} {Style.RESET_ALL}")
resume_cfg = OmegaConf.load(
- LOCAL_DATA_DIR / "blender_pbr_datasets" / cfg.resume_dataset / "config.yaml"
+ LOCAL_DATA_DIR
+ / "blender_pbr_datasets"
+ / cfg.resume_dataset
+ / "config.yaml",
)
resume_cfg = OmegaConf.merge(
- resume_cfg, OmegaConf.masked_copy(cfg, ["resume_dataset", "hardware", "verbose"])
+ resume_cfg,
+ OmegaConf.masked_copy(cfg, ["resume_dataset", "hardware", "verbose"]),
)
cfg = resume_cfg
else:
- logger.info(f"{Fore.GREEN}Recording dataset: {cfg.dataset_id} {Style.RESET_ALL}")
+ logger.info(
+ f"{Fore.GREEN}Recording dataset: {cfg.dataset_id} {Style.RESET_ALL}",
+ )
if cfg.debug:
cfg.camera_num_samples_per_chunk = 5
@@ -703,13 +728,13 @@ def record_chunk(cfg, ds_dir, chunk_id):
# Generate data with Blender
run_script(script, script_path, verbose=cfg.verbose)
- chunk_info = dict(
- chunk_id=chunk_id,
- script_path=str(script_path),
- output_dir=str(output_dir),
- scene_infos=scene_infos,
- scale=cfg["scale"],
- )
+ chunk_info = {
+ "chunk_id": chunk_id,
+ "script_path": str(script_path),
+ "output_dir": str(output_dir),
+ "scene_infos": scene_infos,
+ "scale": cfg["scale"],
+ }
gt_path = output_dir / f"bop_data/train_pbr/{0:06d}/scene_gt.json"
gt = json.loads(gt_path.read_text())
for im_id, im_gt in gt.items():
@@ -721,69 +746,83 @@ def record_chunk(cfg, ds_dir, chunk_id):
# Generate masks and gt infos
success = make_masks_and_gt_infos(
- output_dir, verbose=cfg.verbose, is_shapenet=cfg.model_type == "shapenet"
+ output_dir,
+ verbose=cfg.verbose,
+ is_shapenet=cfg.model_type == "shapenet",
)
# Third Party
if success:
chunk_scene_dir = output_dir / f"bop_data/train_pbr/{0:06d}"
convert_scene_to_imagewise(
- chunk_scene_dir, ds_dir / "train_pbr_v2format", f"{chunk_id:06d}_" + "{image_id:06d}"
+ chunk_scene_dir,
+ ds_dir / "train_pbr_v2format",
+ f"{chunk_id:06d}_" + "{image_id:06d}",
)
shutil.rmtree(output_dir)
return
- # # HDF5 dataset generation
- # if cfg.save_hdf5:
- # shutil.copy(
- # ds_dir / "shapenet_labels.json", output_dir / "bop_data" / "shapenet_labels.json"
- # )
- # scene_ds = BOPDataset(
- # output_dir / "bop_data",
- # split="train_pbr",
- # load_depth=True,
- # allow_cache=False,
- # per_view_annotations=False,
- # )
- # write_scene_ds_as_hdf5(
- # scene_ds, output_dir / f"bop_data/train_pbr/{0:06d}/data.hdf5", n_reading_workers=4
- # )
-
- # if cfg.save_webdataset:
- # shutil.copy(
- # ds_dir / "shapenet_labels.json", output_dir / "bop_data" / "shapenet_labels.json"
- # )
- # scene_ds = BOPDataset(
- # output_dir / "bop_data",
- # split="train_pbr",
- # load_depth=True,
- # allow_cache=False,
- # per_view_annotations=False,
- # )
- # write_scene_ds_as_wds(
- # scene_ds, output_dir / f"bop_data/train_pbr/{0:06d}/", n_reading_workers=4
- # )
-
- # # Move everything to base directory
- # chunk_scene_dir = output_dir / f"bop_data/train_pbr/{0:06d}"
- # train_pbr_dir = ds_dir / "train_pbr"
- # target_dir = train_pbr_dir / f"{chunk_id:06d}"
- # if target_dir.exists():
- # shutil.rmtree(target_dir)
- # if cfg.save_files and success:
- # shutil.copytree(chunk_scene_dir, target_dir)
- # if cfg.save_hdf5 and success:
- # target_dir.mkdir(exist_ok=True)
- # shutil.copy(chunk_scene_dir / "data.hdf5", target_dir / "data.hdf5")
- # if cfg.save_webdataset and success:
- # target_dir.mkdir(exist_ok=True)
- # shutil.copy(chunk_scene_dir / "shard-00000000.tar", target_dir / "shard-00000000.tar")
- # shutil.copy(chunk_scene_dir / "ds_infos.json", target_dir / "ds_infos.json")
- # shutil.rmtree(output_dir)
- # return
+
+"""
+ # HDF5 dataset generation
+ if cfg.save_hdf5:
+ shutil.copy(
+ ds_dir / "shapenet_labels.json",
+ output_dir / "bop_data" / "shapenet_labels.json",
+ )
+ scene_ds = BOPDataset(
+ output_dir / "bop_data",
+ split="train_pbr",
+ load_depth=True,
+ allow_cache=False,
+ per_view_annotations=False,
+ )
+ write_scene_ds_as_hdf5(
+ scene_ds,
+ output_dir / f"bop_data/train_pbr/{0:06d}/data.hdf5",
+ n_reading_workers=4,
+ )
+ if cfg.save_webdataset:
+ shutil.copy(
+ ds_dir / "shapenet_labels.json",
+ output_dir / "bop_data" / "shapenet_labels.json",
+ )
+ scene_ds = BOPDataset(
+ output_dir / "bop_data",
+ split="train_pbr",
+ load_depth=True,
+ allow_cache=False,
+ per_view_annotations=False,
+ )
+ write_scene_ds_as_wds(
+ scene_ds, output_dir / f"bop_data/train_pbr/{0:06d}/", n_reading_workers=4
+ )
+
+ # Move everything to base directory
+ chunk_scene_dir = output_dir / f"bop_data/train_pbr/{0:06d}"
+ train_pbr_dir = ds_dir / "train_pbr"
+ target_dir = train_pbr_dir / f"{chunk_id:06d}"
+ if target_dir.exists():
+ shutil.rmtree(target_dir)
+ if cfg.save_files and success:
+ shutil.copytree(chunk_scene_dir, target_dir)
+ if cfg.save_hdf5 and success:
+ target_dir.mkdir(exist_ok=True)
+ shutil.copy(chunk_scene_dir / "data.hdf5", target_dir / "data.hdf5")
+ if cfg.save_webdataset and success:
+ target_dir.mkdir(exist_ok=True)
+ shutil.copy(
+ chunk_scene_dir / "shard-00000000.tar", target_dir / "shard-00000000.tar"
+ )
+ shutil.copy(chunk_scene_dir / "ds_infos.json", target_dir / "ds_infos.json")
+ shutil.rmtree(output_dir)
+ return
+"""
def find_chunks_to_record(cfg, chunk_ids):
- this_chunk_ids = np.array_split(chunk_ids, cfg.hardware.world_size)[cfg.hardware.rank].tolist()
+ this_chunk_ids = np.array_split(chunk_ids, cfg.hardware.world_size)[
+ cfg.hardware.rank
+ ].tolist()
chunk_ids = []
for chunk_id in this_chunk_ids:
if not (Path(cfg.ds_dir) / f"train_pbr/{chunk_id:06d}").exists():
@@ -793,15 +832,15 @@ def find_chunks_to_record(cfg, chunk_ids):
def main(cli_cfg):
cfg = OmegaConf.create(
- dict(
- dataset_id="test",
- resume_dataset=None,
- debug=False,
- verbose=False,
- overwrite=False,
- few=False,
- chunk_ids=None,
- )
+ {
+ "dataset_id": "test",
+ "resume_dataset": None,
+ "debug": False,
+ "verbose": False,
+ "overwrite": False,
+ "few": False,
+ "chunk_ids": None,
+ },
)
if cli_cfg is not None:
cfg = OmegaConf.merge(
@@ -824,7 +863,8 @@ def main(cli_cfg):
elif cfg.overwrite:
shutil.rmtree(cfg.ds_dir)
else:
- raise ValueError("There is already a dataset with this name")
+ msg = "There is already a dataset with this name"
+ raise ValueError(msg)
if cfg.resume_dataset is None:
ds_dir.mkdir(exist_ok=cfg.chunk_ids is not None)
diff --git a/happypose/pose_estimators/megapose/scripts/make_gso_meshes.py b/happypose/pose_estimators/megapose/scripts/make_gso_meshes.py
index e58e46da..e5a72564 100644
--- a/happypose/pose_estimators/megapose/scripts/make_gso_meshes.py
+++ b/happypose/pose_estimators/megapose/scripts/make_gso_meshes.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,14 +14,10 @@
"""
-
# Standard Library
import shutil
-import time
from collections import defaultdict
-from concurrent.futures import ProcessPoolExecutor as Pool
from copy import deepcopy
-from multiprocessing import Process, Queue
from pathlib import Path
# Third Party
@@ -32,7 +27,6 @@
# MegaPose
from happypose.pose_estimators.megapose.config import (
- GSO_DIR,
GSO_NORMALIZED_DIR,
GSO_ORIG_DIR,
GSO_POINTCLOUD_DIR,
@@ -92,21 +86,20 @@ def rescale_mesh(mesh_path):
vertices[:, 2] -= (zmax + zmin) / 2.0
vertices[:, :3] /= scale
- out = elements["mtllib"][0]
-
+ elements["mtllib"][0]
- faces = elements["faces"]
+ elements["faces"]
text = elements["mtllib"][0]
text += "\n\n"
for vertex_line in vertices.tolist():
- line = ["v"] + list(map(str, vertex_line))
+ line = ["v", *list(map(str, vertex_line))]
text += " ".join(line)
text += "\n"
text += "\n"
for normal_line in normals.tolist():
- line = ["vn"] + list(map(str, normal_line))
+ line = ["vn", *list(map(str, normal_line))]
text += " ".join(line)
text += "\n"
@@ -140,7 +133,10 @@ def make_ply_scaled(obj_id, scale=SCALE):
new_mesh_dir = Path(GSO_SCALED_DIR) / obj_id / "meshes"
new_mesh_path = new_mesh_dir / "model.ply"
mesh = trimesh.load(
- str(mesh_dir / "model.obj"), skip_materials=True, process=False, maintain_order=True
+ str(mesh_dir / "model.obj"),
+ skip_materials=True,
+ process=False,
+ maintain_order=True,
)
mesh = as_mesh(mesh)
mesh.apply_scale(scale)
@@ -155,7 +151,10 @@ def make_obj_pc(obj_id):
new_mesh_dir = Path(GSO_POINTCLOUD_DIR) / obj_id / "meshes"
new_mesh_path = new_mesh_dir / "model.obj"
mesh = trimesh.load(
- str(mesh_dir / "model.obj"), skip_materials=True, process=False, maintain_order=True
+ str(mesh_dir / "model.obj"),
+ skip_materials=True,
+ process=False,
+ maintain_order=True,
)
mesh = as_mesh(mesh)
points = trimesh.sample.sample_surface(mesh, n_points)[0]
@@ -167,7 +166,7 @@ def make_obj_pc(obj_id):
if __name__ == "__main__":
trimesh.util.log.setLevel("ERROR")
obj_dataset = make_object_dataset("gso.orig")
- for n, obj in tqdm(enumerate(obj_dataset.objects)):
+ for _n, obj in tqdm(enumerate(obj_dataset.objects)):
obj_id = obj["label"].split("gso_")[1]
make_obj_normalized(obj_id)
make_ply_scaled(obj_id)
diff --git a/happypose/pose_estimators/megapose/scripts/make_gso_subsets.py b/happypose/pose_estimators/megapose/scripts/make_gso_subsets.py
index a0a9e0ba..28c64e45 100644
--- a/happypose/pose_estimators/megapose/scripts/make_gso_subsets.py
+++ b/happypose/pose_estimators/megapose/scripts/make_gso_subsets.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,19 +14,15 @@
"""
-
# Standard Library
import json
-from pathlib import Path
# Third Party
import numpy as np
import pandas as pd
-from tqdm import tqdm
# MegaPose
from happypose.pose_estimators.megapose.config import GSO_DIR
-from happypose.toolbox.datasets.datasets_cfg import make_object_dataset
def get_labels_split(statistics, max_model_mem_kb, max_tot_mem_kb):
@@ -35,7 +30,9 @@ def get_labels_split(statistics, max_model_mem_kb, max_tot_mem_kb):
print(len(statistics), np.nansum(statistics["tot_mem_kb"]) / 1e6)
statistics = statistics.iloc[np.where(np.isfinite(statistics["tot_mem_kb"]))[0]]
print(len(statistics), np.nansum(statistics["tot_mem_kb"]) / 1e6)
- statistics = statistics.iloc[np.where(statistics["tot_mem_kb"] <= max_model_mem_kb)[0]]
+ statistics = statistics.iloc[
+ np.where(statistics["tot_mem_kb"] <= max_model_mem_kb)[0]
+ ]
print(len(statistics), np.nansum(statistics["tot_mem_kb"]) / 1e6)
np_random = np.random.RandomState(0)
@@ -63,76 +60,76 @@ def get_labels_split_max_objects(statistics, max_num_objects):
statistics = pd.read_json(ds_stats_path)
splits = [
- dict(
- name="gso",
- max_model_mem_kb=10e3,
- max_num_objects=1000,
- ),
- dict(
- name="shapenet_10mb_5k",
- max_model_mem_kb=10e3,
- max_num_objects=5000,
- ),
- dict(
- name="shapenet_10mb_10k",
- max_model_mem_kb=10e3,
- max_num_objects=10000,
- ),
- dict(
- name="shapenet_10mb_15k",
- max_model_mem_kb=10e3,
- max_num_objects=15000,
- ),
- dict(
- name="shapenet_100mb_200gb",
- max_model_mem_kb=100e3,
- max_tot_mem_kb=200e6,
- ),
- dict(
- name="shapenet_10mb_200gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=200e6,
- ),
- dict(
- name="shapenet_10mb_50gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=50e6,
- ),
- dict(
- name="shapenet_20mb_50gb",
- max_model_mem_kb=20e3,
- max_tot_mem_kb=50e6,
- ),
- dict(
- name="shapenet_10mb_100gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=100e6,
- ),
- dict(
- name="shapenet_10mb_32gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=32e6,
- ),
- dict(
- name="shapenet_2mb_32gb",
- max_model_mem_kb=2e3,
- max_tot_mem_kb=32e6,
- ),
- dict(
- name="shapenet_10mb_8gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=8e6,
- ),
- dict(
- name="shapenet_10mb_1gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=1e6,
- ),
- dict(
- name="shapenet_2mb_1gb",
- max_model_mem_kb=2e3,
- max_tot_mem_kb=1e6,
- ),
+ {
+ "name": "gso",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 1000,
+ },
+ {
+ "name": "shapenet_10mb_5k",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 5000,
+ },
+ {
+ "name": "shapenet_10mb_10k",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 10000,
+ },
+ {
+ "name": "shapenet_10mb_15k",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 15000,
+ },
+ {
+ "name": "shapenet_100mb_200gb",
+ "max_model_mem_kb": 100e3,
+ "max_tot_mem_kb": 200e6,
+ },
+ {
+ "name": "shapenet_10mb_200gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 200e6,
+ },
+ {
+ "name": "shapenet_10mb_50gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 50e6,
+ },
+ {
+ "name": "shapenet_20mb_50gb",
+ "max_model_mem_kb": 20e3,
+ "max_tot_mem_kb": 50e6,
+ },
+ {
+ "name": "shapenet_10mb_100gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 100e6,
+ },
+ {
+ "name": "shapenet_10mb_32gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 32e6,
+ },
+ {
+ "name": "shapenet_2mb_32gb",
+ "max_model_mem_kb": 2e3,
+ "max_tot_mem_kb": 32e6,
+ },
+ {
+ "name": "shapenet_10mb_8gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 8e6,
+ },
+ {
+ "name": "shapenet_10mb_1gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 1e6,
+ },
+ {
+ "name": "shapenet_2mb_1gb",
+ "max_model_mem_kb": 2e3,
+ "max_tot_mem_kb": 1e6,
+ },
]
for split in splits:
diff --git a/happypose/pose_estimators/megapose/scripts/make_shapenet_panda3d.py b/happypose/pose_estimators/megapose/scripts/make_shapenet_panda3d.py
index 95cff80a..5c049699 100644
--- a/happypose/pose_estimators/megapose/scripts/make_shapenet_panda3d.py
+++ b/happypose/pose_estimators/megapose/scripts/make_shapenet_panda3d.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,12 +14,10 @@
"""
-
# Standard Library
import shutil
import subprocess
import time
-from concurrent.futures import ProcessPoolExecutor as Pool
from multiprocessing import Process
from pathlib import Path
@@ -46,12 +43,12 @@ def fix_normals(obj_path):
is_block = False
def make_new_block():
- return dict(
- g="",
- usemtl="",
- f=[],
- l=[],
- )
+ return {
+ "g": "",
+ "usemtl": "",
+ "f": [],
+ "l": [],
+ }
for line in lines:
if line.startswith("mtllib"):
@@ -105,7 +102,9 @@ def make_new_block():
for line_f in block["f"]:
face = line_f[3:].split(" ")
face = [f.split("/") for f in face]
- face_flipped = " ".join([f"{x[0]}/{x[1]}/{int(x[2])+n_vn_orig}" for x in face])
+ face_flipped = " ".join(
+ [f"{x[0]}/{x[1]}/{int(x[2])+n_vn_orig}" for x in face],
+ )
f_flipped.append(f"f {face_flipped}")
block["f"] += f_flipped
@@ -143,9 +142,9 @@ def convert_obj_to_gltf(obj_path):
print(n, obj_path)
obj_path = Path(obj_path)
new_obj = fix_normals(obj_path)
- binormals_obj_path = Path((str(obj_path.with_suffix("")) + "_binormals.obj"))
+ binormals_obj_path = Path(str(obj_path.with_suffix("")) + "_binormals.obj")
binormals_obj_path.write_text(new_obj)
- proc = subprocess.run(["obj2gltf", "-i", str(binormals_obj_path)])
+ subprocess.run(["obj2gltf", "-i", str(binormals_obj_path)])
gltf_path = binormals_obj_path.with_suffix(".gltf")
p = Process(target=convert_gltf, args=(gltf_path,))
p.start()
@@ -153,7 +152,9 @@ def convert_obj_to_gltf(obj_path):
bam_path = gltf_path.with_suffix(".bam")
bam_exists = bam_path.exists()
if bam_exists:
- new_models_dir = Path(str(obj_path.parent).replace("models_orig", "models_panda3d_bam"))
+ new_models_dir = Path(
+ str(obj_path.parent).replace("models_orig", "models_panda3d_bam"),
+ )
Path(new_models_dir).mkdir(exist_ok=True, parents=True)
img_dir = obj_path.parent.parent / "images"
new_img_dir = new_models_dir
diff --git a/happypose/pose_estimators/megapose/scripts/make_shapenet_ply_scaled.py b/happypose/pose_estimators/megapose/scripts/make_shapenet_ply_scaled.py
index 2b63ca25..92ee9f08 100644
--- a/happypose/pose_estimators/megapose/scripts/make_shapenet_ply_scaled.py
+++ b/happypose/pose_estimators/megapose/scripts/make_shapenet_ply_scaled.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,12 +14,10 @@
"""
-
# Standard Library
import shutil
import time
-from concurrent.futures import ProcessPoolExecutor as Pool
-from multiprocessing import Process, Queue
+from multiprocessing import Process
from pathlib import Path
# Third Party
@@ -40,8 +37,16 @@ def make_ply_scaled(mesh_path, scale=SCALE):
n, mesh_path = mesh_path
mesh_path = Path(mesh_path)
new_mesh_path = str(mesh_path.with_suffix("")) + "_scaled.ply"
- new_mesh_path = new_mesh_path.replace("models_orig", MODELS_DIR_TEMPLATE.format(scale=scale))
- mesh = trimesh.load(str(mesh_path), skip_materials=True, process=False, maintain_order=True)
+ new_mesh_path = new_mesh_path.replace(
+ "models_orig",
+ MODELS_DIR_TEMPLATE.format(scale=scale),
+ )
+ mesh = trimesh.load(
+ str(mesh_path),
+ skip_materials=True,
+ process=False,
+ maintain_order=True,
+ )
mesh = as_mesh(mesh)
mesh.apply_scale(scale)
mesh.apply_scale(1000)
@@ -73,5 +78,4 @@ def make_ply_scaled_(mesh_path):
for mesh_path in tqdm(mesh_paths):
make_ply_scaled_(mesh_path)
-
time.sleep(60)
diff --git a/happypose/pose_estimators/megapose/scripts/make_shapenet_pointclouds.py b/happypose/pose_estimators/megapose/scripts/make_shapenet_pointclouds.py
index c971af40..180a4782 100644
--- a/happypose/pose_estimators/megapose/scripts/make_shapenet_pointclouds.py
+++ b/happypose/pose_estimators/megapose/scripts/make_shapenet_pointclouds.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,12 +14,10 @@
"""
-
# Standard Library
import shutil
import time
-from concurrent.futures import ProcessPoolExecutor as Pool
-from multiprocessing import Process, Queue
+from multiprocessing import Process
from pathlib import Path
# Third Party
@@ -45,7 +42,11 @@ def make_obj_pc(mesh_path):
new_mesh_path = new_mesh_path.replace("models_orig", SPLIT_NAME)
mesh = trimesh.load(
- mesh_path, group_material=False, process=False, skip_materials=True, maintain_order=True
+ mesh_path,
+ group_material=False,
+ process=False,
+ skip_materials=True,
+ maintain_order=True,
)
mesh = as_mesh(mesh)
points = trimesh.sample.sample_surface(mesh, n_points)[0]
@@ -68,7 +69,8 @@ def make_obj_pc_(mesh_path):
shutil.rmtree(TARGETS_MODEL_DIR)
TARGETS_MODEL_DIR.mkdir()
shutil.copy(
- (SHAPENET_DIR / "models_orig" / "taxonomy.json"), TARGETS_MODEL_DIR / "taxonomy.json"
+ (SHAPENET_DIR / "models_orig" / "taxonomy.json"),
+ TARGETS_MODEL_DIR / "taxonomy.json",
)
n_procs = 20
mesh_paths = []
diff --git a/happypose/pose_estimators/megapose/scripts/make_shapenet_statistics.py b/happypose/pose_estimators/megapose/scripts/make_shapenet_statistics.py
index d2a15e8d..93920745 100644
--- a/happypose/pose_estimators/megapose/scripts/make_shapenet_statistics.py
+++ b/happypose/pose_estimators/megapose/scripts/make_shapenet_statistics.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,13 +14,11 @@
"""
-
# Standard Library
import io
import json
-from concurrent.futures import ProcessPoolExecutor as Pool
from contextlib import redirect_stdout
-from multiprocessing import Process, Queue
+from multiprocessing import Process
from pathlib import Path
# Third Party
@@ -30,8 +27,10 @@
# MegaPose
from happypose.pose_estimators.megapose.config import SHAPENET_DIR
+from happypose.pose_estimators.megapose.panda3d_renderer.panda3d_scene_renderer import (
+ App,
+)
from happypose.toolbox.datasets.datasets_cfg import make_object_dataset
-from happypose.pose_estimators.megapose.panda3d_renderer.panda3d_scene_renderer import App
def measure_memory(gltf_path):
@@ -43,26 +42,26 @@ def measure_memory(gltf_path):
s = f.getvalue()
s = s.splitlines()
mems = []
- for l in s:
- if "GeomVertexData arrays occupy" in l:
- print(l)
- l_ = l.split(" ")
+ for line in s:
+ if "GeomVertexData arrays occupy" in line:
+ print(line)
+ l_ = line.split(" ")
idx = [n for n, w in enumerate(l_) if w == "occupy"][0]
mems.append(float(l_[idx + 1]))
- elif "GeomPrimitive arrays occupy" in l:
- print(l)
- l_ = l.split(" ")
+ elif "GeomPrimitive arrays occupy" in line:
+ print(line)
+ l_ = line.split(" ")
idx = [n for n, w in enumerate(l_) if w == "occupy"][0]
mems.append(float(l_[idx + 1]))
- elif "texture memory required" in l:
- print(l)
- l_ = l.split(" ")
+ elif "texture memory required" in line:
+ print(line)
+ l_ = line.split(" ")
idx = [n for n, w in enumerate(l_) if w == "minimum"][0]
mems.append(float(l_[idx + 1]))
tot_mem_kb = sum(mems)
- stats = dict(
- tot_mem_kb=tot_mem_kb,
- )
+ stats = {
+ "tot_mem_kb": tot_mem_kb,
+ }
(gltf_path.parent / "stats.json").write_text(json.dumps(stats))
return
@@ -76,15 +75,15 @@ def measure_memory_(gltf_path):
if __name__ == "__main__":
panda3d_obj_dataset = make_object_dataset("shapenet.panda3d_bam")
panda3d_map = {obj["label"]: obj for obj in panda3d_obj_dataset.objects}
- panda3d_objects = set(list(panda3d_map.keys()))
+ panda3d_objects = set(panda3d_map.keys())
pc_obj_dataset = make_object_dataset("shapenet.pointcloud")
pc_map = {obj["label"]: obj for obj in pc_obj_dataset.objects}
- pc_objects = set(list(pc_map.keys()))
+ pc_objects = set(pc_map.keys())
vanilla_obj_dataset = make_object_dataset("shapenet.orig")
- vanilla_objects = set([obj["label"] for obj in vanilla_obj_dataset.objects])
+ vanilla_objects = {obj["label"] for obj in vanilla_obj_dataset.objects}
stats = []
- for n, obj in enumerate(tqdm(vanilla_obj_dataset.objects)):
- stats_ = dict()
+ for _n, obj in enumerate(tqdm(vanilla_obj_dataset.objects)):
+ stats_ = {}
label = obj["label"]
stats_["label"] = label
stats_["has_pointcloud"] = label in pc_objects
@@ -92,7 +91,11 @@ def measure_memory_(gltf_path):
if stats_["has_panda3d"] and stats_["has_pointcloud"]:
panda3d_obj_dir = Path(panda3d_map[label]["mesh_path"]).parent
tot_mem_kb = sum(
- [f.stat().st_size / 1024 for f in panda3d_obj_dir.iterdir() if f.is_file()]
+ [
+ f.stat().st_size / 1024
+ for f in panda3d_obj_dir.iterdir()
+ if f.is_file()
+ ],
)
else:
tot_mem_kb = np.nan
diff --git a/happypose/pose_estimators/megapose/scripts/make_shapenet_subsets.py b/happypose/pose_estimators/megapose/scripts/make_shapenet_subsets.py
index 908141f2..dd49b925 100644
--- a/happypose/pose_estimators/megapose/scripts/make_shapenet_subsets.py
+++ b/happypose/pose_estimators/megapose/scripts/make_shapenet_subsets.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
import json
from pathlib import Path
@@ -27,15 +25,13 @@
# MegaPose
from happypose.pose_estimators.megapose.config import SHAPENET_DIR
-from happypose.toolbox.datasets.datasets_cfg import make_object_dataset
def read_all_stats(obj_dataset):
- gltf_paths = []
statistics = []
- for n, obj in tqdm(enumerate(obj_dataset.objects)):
+ for _n, obj in tqdm(enumerate(obj_dataset.objects)):
mesh_path = Path(obj["mesh_path"])
- infos = dict()
+ infos = {}
stats_path = mesh_path.parent / "stats.json"
pc_path = mesh_path.parent / "model_normalized_pointcloud.obj"
infos["label"] = obj["label"]
@@ -56,7 +52,9 @@ def get_labels_split(statistics, max_model_mem_kb, max_tot_mem_kb):
print(len(statistics), np.nansum(statistics["tot_mem_kb"]) / 1e6)
statistics = statistics.iloc[np.where(np.isfinite(statistics["tot_mem_kb"]))[0]]
print(len(statistics), np.nansum(statistics["tot_mem_kb"]) / 1e6)
- statistics = statistics.iloc[np.where(statistics["tot_mem_kb"] <= max_model_mem_kb)[0]]
+ statistics = statistics.iloc[
+ np.where(statistics["tot_mem_kb"] <= max_model_mem_kb)[0]
+ ]
print(len(statistics), np.nansum(statistics["tot_mem_kb"]) / 1e6)
np_random = np.random.RandomState(0)
@@ -72,7 +70,9 @@ def get_labels_split_max_objects(statistics, max_model_mem_kb, max_num_objects):
print(len(statistics), np.nansum(statistics["tot_mem_kb"]) / 1e6)
statistics = statistics.iloc[np.where(np.isfinite(statistics["tot_mem_kb"]))[0]]
print(len(statistics), np.nansum(statistics["tot_mem_kb"]) / 1e6)
- statistics = statistics.iloc[np.where(statistics["tot_mem_kb"] <= max_model_mem_kb)[0]]
+ statistics = statistics.iloc[
+ np.where(statistics["tot_mem_kb"] <= max_model_mem_kb)[0]
+ ]
print(len(statistics), np.nansum(statistics["tot_mem_kb"]) / 1e6)
np_random = np.random.RandomState(0)
@@ -90,101 +90,105 @@ def get_labels_split_max_objects(statistics, max_model_mem_kb, max_num_objects):
statistics = pd.read_json(ds_stats_path)
splits = [
- dict(
- name="shapenet_10mb_100",
- max_model_mem_kb=10e3,
- max_num_objects=100,
- ),
- dict(
- name="shapenet_10mb_1k",
- max_model_mem_kb=10e3,
- max_num_objects=1000,
- ),
- dict(
- name="shapenet_10mb_2k",
- max_model_mem_kb=10e3,
- max_num_objects=2500,
- ),
- dict(
- name="shapenet_10mb_5k",
- max_model_mem_kb=10e3,
- max_num_objects=5000,
- ),
- dict(
- name="shapenet_10mb_10k",
- max_model_mem_kb=10e3,
- max_num_objects=10000,
- ),
- dict(
- name="shapenet_10mb_15k",
- max_model_mem_kb=10e3,
- max_num_objects=15000,
- ),
- dict(
- name="shapenet_10mb_20k",
- max_model_mem_kb=10e3,
- max_num_objects=20000,
- ),
- dict(
- name="shapenet_100mb_200gb",
- max_model_mem_kb=100e3,
- max_tot_mem_kb=200e6,
- ),
- dict(
- name="shapenet_10mb_200gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=200e6,
- ),
- dict(
- name="shapenet_10mb_50gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=50e6,
- ),
- dict(
- name="shapenet_20mb_50gb",
- max_model_mem_kb=20e3,
- max_tot_mem_kb=50e6,
- ),
- dict(
- name="shapenet_10mb_100gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=100e6,
- ),
- dict(
- name="shapenet_10mb_32gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=32e6,
- ),
- dict(
- name="shapenet_2mb_32gb",
- max_model_mem_kb=2e3,
- max_tot_mem_kb=32e6,
- ),
- dict(
- name="shapenet_10mb_8gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=8e6,
- ),
- dict(
- name="shapenet_10mb_1gb",
- max_model_mem_kb=10e3,
- max_tot_mem_kb=1e6,
- ),
- dict(
- name="shapenet_2mb_1gb",
- max_model_mem_kb=2e3,
- max_tot_mem_kb=1e6,
- ),
+ {
+ "name": "shapenet_10mb_100",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 100,
+ },
+ {
+ "name": "shapenet_10mb_1k",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 1000,
+ },
+ {
+ "name": "shapenet_10mb_2k",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 2500,
+ },
+ {
+ "name": "shapenet_10mb_5k",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 5000,
+ },
+ {
+ "name": "shapenet_10mb_10k",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 10000,
+ },
+ {
+ "name": "shapenet_10mb_15k",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 15000,
+ },
+ {
+ "name": "shapenet_10mb_20k",
+ "max_model_mem_kb": 10e3,
+ "max_num_objects": 20000,
+ },
+ {
+ "name": "shapenet_100mb_200gb",
+ "max_model_mem_kb": 100e3,
+ "max_tot_mem_kb": 200e6,
+ },
+ {
+ "name": "shapenet_10mb_200gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 200e6,
+ },
+ {
+ "name": "shapenet_10mb_50gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 50e6,
+ },
+ {
+ "name": "shapenet_20mb_50gb",
+ "max_model_mem_kb": 20e3,
+ "max_tot_mem_kb": 50e6,
+ },
+ {
+ "name": "shapenet_10mb_100gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 100e6,
+ },
+ {
+ "name": "shapenet_10mb_32gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 32e6,
+ },
+ {
+ "name": "shapenet_2mb_32gb",
+ "max_model_mem_kb": 2e3,
+ "max_tot_mem_kb": 32e6,
+ },
+ {
+ "name": "shapenet_10mb_8gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 8e6,
+ },
+ {
+ "name": "shapenet_10mb_1gb",
+ "max_model_mem_kb": 10e3,
+ "max_tot_mem_kb": 1e6,
+ },
+ {
+ "name": "shapenet_2mb_1gb",
+ "max_model_mem_kb": 2e3,
+ "max_tot_mem_kb": 1e6,
+ },
]
for split in splits:
if "max_num_objects" in split:
labels = get_labels_split_max_objects(
- statistics, split["max_model_mem_kb"], split["max_num_objects"]
+ statistics,
+ split["max_model_mem_kb"],
+ split["max_num_objects"],
)
else:
labels = get_labels_split(
- statistics, split["max_model_mem_kb"], split["max_tot_mem_kb"]
+ statistics,
+ split["max_model_mem_kb"],
+ split["max_tot_mem_kb"],
)
split_path = (ds_dir / "stats" / split["name"]).with_suffix(".json")
split_path.write_text(json.dumps(labels))
diff --git a/happypose/pose_estimators/megapose/scripts/run_full_megapose_eval.py b/happypose/pose_estimators/megapose/scripts/run_full_megapose_eval.py
index 70313440..7081c0ad 100644
--- a/happypose/pose_estimators/megapose/scripts/run_full_megapose_eval.py
+++ b/happypose/pose_estimators/megapose/scripts/run_full_megapose_eval.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,56 +14,42 @@
"""
-
# Standard Library
import copy
import os
from pathlib import Path
-from typing import Dict, Optional, Tuple
# Third Party
from omegaconf import OmegaConf
# MegaPose
-from happypose.pose_estimators.megapose.bop_config import (
- PBR_COARSE,
- PBR_DETECTORS,
- PBR_REFINER,
- SYNT_REAL_COARSE,
- SYNT_REAL_DETECTORS,
- SYNT_REAL_REFINER,
-)
+from happypose.pose_estimators.megapose.bop_config import PBR_DETECTORS
from happypose.pose_estimators.megapose.config import (
DEBUG_RESULTS_DIR,
- EXP_DIR,
MODELNET_TEST_CATEGORIES,
RESULTS_DIR,
)
+from happypose.pose_estimators.megapose.evaluation.bop import run_evaluation
from happypose.pose_estimators.megapose.evaluation.eval_config import (
BOPEvalConfig,
EvalConfig,
FullEvalConfig,
HardwareConfig,
)
-
-from happypose.pose_estimators.megapose.evaluation.evaluation import get_save_dir, generate_save_key, run_eval
-from happypose.pose_estimators.megapose.evaluation.bop import run_evaluation
-from happypose.toolbox.utils.distributed import get_rank, get_world_size, init_distributed_mode
+from happypose.pose_estimators.megapose.evaluation.evaluation import (
+ generate_save_key,
+ get_save_dir,
+ run_eval,
+)
+from happypose.toolbox.utils.distributed import (
+ get_rank,
+ get_world_size,
+ init_distributed_mode,
+)
from happypose.toolbox.utils.logging import get_logger, set_logging_level
logger = get_logger(__name__)
-BOP_DATASET_NAMES = [
- "lm",
- "lmo",
- "tless",
- "tudl",
- "icbin",
- "itodd",
- "hb",
- "ycbv",
- # 'hope',
-]
BOP_TEST_DATASETS = [
"lmo.bop19",
@@ -77,7 +62,9 @@
]
-MODELNET_TEST_DATASETS = [f"modelnet.{category}.test" for category in MODELNET_TEST_CATEGORIES]
+MODELNET_TEST_DATASETS = [
+ f"modelnet.{category}.test" for category in MODELNET_TEST_CATEGORIES
+]
def create_eval_cfg(
@@ -85,8 +72,7 @@ def create_eval_cfg(
detection_type: str,
coarse_estimation_type: str,
ds_name: str,
-) -> Tuple[str, EvalConfig]:
-
+) -> tuple[str, EvalConfig]:
cfg = copy.deepcopy(cfg)
cfg.inference.detection_type = detection_type
@@ -101,10 +87,11 @@ def create_eval_cfg(
cfg.detector_run_id = PBR_DETECTORS[ds_name_root]
elif detection_type == "gt":
pass
- elif detection_type == "sam":
+ elif detection_type == "exte":
pass
else:
- raise ValueError(f"Unknown detector type {cfg.detector_type}")
+ msg = f"Unknown detector type {cfg.detector_type}"
+ raise ValueError(msg)
name = generate_save_key(detection_type, coarse_estimation_type)
@@ -112,7 +99,6 @@ def create_eval_cfg(
def run_full_eval(cfg: FullEvalConfig) -> None:
-
bop_eval_cfgs = []
init_distributed_mode()
@@ -126,27 +112,30 @@ def run_full_eval(cfg: FullEvalConfig) -> None:
# Iterate over each dataset
for ds_name in cfg.ds_names:
-
# create the EvalConfig objects that we will call `run_eval` on
- eval_configs: Dict[str, EvalConfig] = dict()
- for (detection_type, coarse_estimation_type) in cfg.detection_coarse_types:
- name, cfg_ = create_eval_cfg(cfg, detection_type, coarse_estimation_type, ds_name)
+ eval_configs: dict[str, EvalConfig] = {}
+ for detection_type, coarse_estimation_type in cfg.detection_coarse_types:
+ name, cfg_ = create_eval_cfg(
+ cfg,
+ detection_type,
+ coarse_estimation_type,
+ ds_name,
+ )
eval_configs[name] = cfg_
# For each eval_cfg run the evaluation.
# Note that the results get saved to disk
- for save_key, eval_cfg in eval_configs.items():
-
+ for _save_key, eval_cfg in eval_configs.items():
# Run the inference
if not cfg.skip_inference:
eval_out = run_eval(eval_cfg)
- # If we are skpping the inference mimic the output that run_eval
+ # If we are skipping the inference, mimic the output that run_eval
# would have produced so that we can run the bop_eval
else: # Otherwise hack the output so we can run the BOP eval
if get_rank() == 0:
results_dir = get_save_dir(eval_cfg)
- pred_keys = ["refiner/final"]
+ pred_keys = ["coarse", "refiner/final"]
if eval_cfg.inference.run_depth_refiner:
pred_keys.append("depth_refiner")
eval_out = {
@@ -156,17 +145,21 @@ def run_full_eval(cfg: FullEvalConfig) -> None:
}
assert Path(
- eval_out["results_path"]
+ eval_out["results_path"],
).is_file(), f"The file {eval_out['results_path']} doesn't exist"
# Run the bop eval for each type of prediction
if cfg.run_bop_eval and get_rank() == 0:
+ bop_eval_keys = {"refiner/final", "depth_refiner"}
+ if cfg.eval_coarse_also:
+ bop_eval_keys.add("coarse")
- bop_eval_keys = set(("refiner/final", "depth_refiner"))
+ # Remove from evaluation predictions that were not produced at inference
+ # time
bop_eval_keys = bop_eval_keys.intersection(set(eval_out["pred_keys"]))
for method in bop_eval_keys:
- if not "bop19" in ds_name:
+ if "bop19" not in ds_name:
continue
bop_eval_cfg = BOPEvalConfig(
@@ -175,7 +168,7 @@ def run_full_eval(cfg: FullEvalConfig) -> None:
split="test",
eval_dir=eval_out["save_dir"] / "bop_evaluation",
method=method,
- convert_only=False,
+ convert_only=eval_cfg.convert_only,
)
bop_eval_cfgs.append(bop_eval_cfg)
diff --git a/happypose/pose_estimators/megapose/scripts/run_inference_on_datasettemp.py b/happypose/pose_estimators/megapose/scripts/run_inference_on_datasettemp.py
index d3979632..317b4f29 100644
--- a/happypose/pose_estimators/megapose/scripts/run_inference_on_datasettemp.py
+++ b/happypose/pose_estimators/megapose/scripts/run_inference_on_datasettemp.py
@@ -3,18 +3,24 @@
import json
import os
from pathlib import Path
-from typing import List, Tuple, Union
+from typing import Union
# Third Party
import numpy as np
import torch
-from bokeh.io import export_png
-from bokeh.plotting import gridplot
from PIL import Image
# HappyPose
import happypose
-from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
+
+# MegaPose
+# from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
+# from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
+from happypose.pose_estimators.megapose.config import BOP_DS_DIR
+from happypose.toolbox.datasets.datasets_cfg import (
+ make_object_dataset,
+ make_scene_dataset,
+)
from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
from happypose.toolbox.inference.types import (
DetectionsType,
@@ -23,47 +29,43 @@
)
from happypose.toolbox.inference.utils import make_detections_from_object_data
from happypose.toolbox.lib3d.transform import Transform
-from happypose.toolbox.renderer import Panda3dLightData
-from happypose.toolbox.renderer.panda3d_scene_renderer import Panda3dSceneRenderer
-from happypose.toolbox.utils.conversion import convert_scene_observation_to_panda3d
from happypose.toolbox.utils.load_model import NAMED_MODELS, load_named_model
from happypose.toolbox.utils.logging import get_logger, set_logging_level
-from happypose.toolbox.visualization.bokeh_plotter import BokehPlotter
-from happypose.toolbox.visualization.utils import make_contour_overlay
-from happypose.toolbox.datasets.datasets_cfg import make_scene_dataset, make_object_dataset
-
-# MegaPose
-#from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
-#from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
-from happypose.pose_estimators.megapose.config import (
- BOP_DS_DIR
-)
-#scene_id = str(object['scene_id']).zfill(6)
-#image_id = str(object['image_id']).zfill(6)
+# scene_id = str(object['scene_id']).zfill(6)
+# image_id = str(object['image_id']).zfill(6)
logger = get_logger(__name__)
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
def load_observation(
dataset_dir: Path,
scene_id: str,
image_id: str,
load_depth: bool = False,
-) -> Tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
+) -> tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
camera_data_json = json.loads((dataset_dir / "scene_camera.json").read_text())
camera_data = CameraData(
- K=np.array(camera_data_json[str(image_id)]['cam_K']).reshape(3,3),
- resolution=(480, 640))
- rgb = np.array(Image.open(dataset_dir / "rgb/{image_id}.png".format(image_id=str(image_id).zfill(6))),
- dtype=np.uint8)
+ K=np.array(camera_data_json[str(image_id)]["cam_K"]).reshape(3, 3),
+ resolution=(480, 640),
+ )
+ rgb = np.array(
+ Image.open(dataset_dir / f"rgb/{str(image_id).zfill(6)}.png"),
+ dtype=np.uint8,
+ )
assert rgb.shape[:2] == camera_data.resolution
depth = None
if load_depth:
- depth = np.array(Image.open(dataset_dir / "depth/{image_id}.png".format(image_id=str(image_id).zfill(6))),
- dtype=np.float32) / 1000
+ depth = (
+ np.array(
+ Image.open(dataset_dir / f"depth/{str(image_id).zfill(6)}.png"),
+ dtype=np.float32,
+ )
+ / 1000
+ )
assert depth.shape[:2] == camera_data.resolution
return rgb, depth, camera_data
@@ -76,18 +78,23 @@ def load_observation_tensor(
load_depth: bool = False,
) -> ObservationTensor:
dataset_dir = dataset_dir / "test" / str(scene_id).zfill(6)
- rgb, depth, camera_data = load_observation(dataset_dir, scene_id, image_id, load_depth)
+ rgb, depth, camera_data = load_observation(
+ dataset_dir,
+ scene_id,
+ image_id,
+ load_depth,
+ )
observation = ObservationTensor.from_numpy(rgb, depth, camera_data.K)
return observation
-def load_object_data(data_path: Path) -> List[ObjectData]:
+def load_object_data(data_path: Path) -> list[ObjectData]:
object_data = json.loads(data_path.read_text())
for object in object_data:
- object['bbox_modal'] = object['bbox']
- object['label'] = "ycbv-obj_{}".format(str(object['category_id']).zfill(6))
- scene_id = object['scene_id']
- image_id = object['image_id']
+ object["bbox_modal"] = object["bbox"]
+ object["label"] = "ycbv-obj_{}".format(str(object["category_id"]).zfill(6))
+ scene_id = object["scene_id"]
+ image_id = object["image_id"]
break
for object in object_data:
print("object_data = ", object)
@@ -99,11 +106,14 @@ def load_object_data(data_path: Path) -> List[ObjectData]:
def load_detections(
example_dir: Path,
) -> DetectionsType:
- input_object_data, scene_id, image_id = load_object_data(example_dir / "baseline.json")
+ input_object_data, scene_id, image_id = load_object_data(
+ example_dir / "baseline.json",
+ )
detections = make_detections_from_object_data(input_object_data).to(device)
print(detections)
return detections, scene_id, image_id
+
"""
def make_object_dataset(example_dir: Path) -> RigidObjectDataset:
rigid_objects = []
@@ -117,12 +127,15 @@ def make_object_dataset(example_dir: Path) -> RigidObjectDataset:
assert not mesh_path, f"there multiple meshes in the {label} directory"
mesh_path = fn
assert mesh_path, f"couldnt find a obj or ply mesh for {label}"
- rigid_objects.append(RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units))
+ rigid_objects.append(
+ RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units)
+ )
# TODO: fix mesh units
rigid_object_dataset = RigidObjectDataset(rigid_objects)
return rigid_object_dataset
"""
+
def save_predictions(
example_dir: Path,
pose_estimates: PoseEstimatesType,
@@ -130,7 +143,8 @@ def save_predictions(
labels = pose_estimates.infos["label"]
poses = pose_estimates.poses.cpu().numpy()
object_data = [
- ObjectData(label=label, TWO=Transform(pose)) for label, pose in zip(labels, poses)
+ ObjectData(label=label, TWO=Transform(pose))
+ for label, pose in zip(labels, poses)
]
object_data_json = json.dumps([x.to_json() for x in object_data])
output_fn = example_dir / "outputs" / "object_data.json"
@@ -146,37 +160,40 @@ def run_inference(
dataset_name: str,
model_name: str,
) -> None:
-
model_info = NAMED_MODELS[model_name]
-
detections, scene_id, image_id = load_detections(example_dir)
detections = detections.to(device)
observation = load_observation_tensor(
- dataset_dir, scene_id, image_id, load_depth=model_info["requires_depth"]
+ dataset_dir,
+ scene_id,
+ image_id,
+ load_depth=model_info["requires_depth"],
)
if torch.cuda.is_available():
observation.cuda()
- #object_dataset = make_object_dataset(dataset_dir)
+ # object_dataset = make_object_dataset(dataset_dir)
- ds_kwargs = dict(load_depth=True)
+ ds_kwargs = {"load_depth": True}
dataset_name = "ycbv.bop19"
- scene_ds = make_scene_dataset(dataset_name, **ds_kwargs)
- urdf_ds_name, obj_ds_name = happypose.toolbox.datasets.datasets_cfg.get_obj_ds_info(dataset_name)
+ make_scene_dataset(dataset_name, **ds_kwargs)
+ urdf_ds_name, obj_ds_name = happypose.toolbox.datasets.datasets_cfg.get_obj_ds_info(
+ dataset_name,
+ )
object_dataset = make_object_dataset(obj_ds_name)
-
logger.info(f"Loading model {model_name}.")
pose_estimator = load_named_model(model_name, object_dataset).to(device)
- logger.info(f"Running inference.")
+ logger.info("Running inference.")
output, _ = pose_estimator.run_inference_pipeline(
- observation, detections=detections, **model_info["inference_parameters"]
+ observation,
+ detections=detections,
+ **model_info["inference_parameters"],
)
-
+
save_predictions(example_dir, output)
return
-
# def make_mesh_visualization(RigidObject) -> List[Image]:
@@ -195,7 +212,11 @@ def run_inference(
set_logging_level("info")
parser = argparse.ArgumentParser()
parser.add_argument("ds_name")
- parser.add_argument("--model", type=str, default="megapose-1.0-RGB-multi-hypothesis")
+ parser.add_argument(
+ "--model",
+ type=str,
+ default="megapose-1.0-RGB-multi-hypothesis",
+ )
parser.add_argument("--vis-detections", action="store_true")
parser.add_argument("--run-inference", action="store_true")
parser.add_argument("--vis-outputs", action="store_true")
@@ -208,5 +229,3 @@ def run_inference(
if args.run_inference:
run_inference(example_dir, dataset_dir, args.ds_name, args.model)
-
-
diff --git a/happypose/pose_estimators/megapose/scripts/run_inference_on_example.py b/happypose/pose_estimators/megapose/scripts/run_inference_on_example.py
index 2d1ee6ea..6cfeb8c3 100644
--- a/happypose/pose_estimators/megapose/scripts/run_inference_on_example.py
+++ b/happypose/pose_estimators/megapose/scripts/run_inference_on_example.py
@@ -3,7 +3,7 @@
import json
import os
from pathlib import Path
-from typing import List, Tuple, Union
+from typing import Union
# Third Party
import numpy as np
@@ -31,19 +31,19 @@
from happypose.toolbox.visualization.utils import make_contour_overlay
# MegaPose
-#from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
-#from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
-
+# from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
+# from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
logger = get_logger(__name__)
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
def load_observation(
example_dir: Path,
load_depth: bool = False,
-) -> Tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
+) -> tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
camera_data = CameraData.from_json((example_dir / "camera_data.json").read_text())
rgb = np.array(Image.open(example_dir / "image_rgb.png"), dtype=np.uint8)
@@ -51,7 +51,10 @@ def load_observation(
depth = None
if load_depth:
- depth = np.array(Image.open(example_dir / "image_depth.png"), dtype=np.float32) / 1000
+ depth = (
+ np.array(Image.open(example_dir / "image_depth.png"), dtype=np.float32)
+ / 1000
+ )
assert depth.shape[:2] == camera_data.resolution
return rgb, depth, camera_data
@@ -66,7 +69,7 @@ def load_observation_tensor(
return observation
-def load_object_data(data_path: Path) -> List[ObjectData]:
+def load_object_data(data_path: Path) -> list[ObjectData]:
object_data = json.loads(data_path.read_text())
object_data = [ObjectData.from_json(d) for d in object_data]
return object_data
@@ -92,7 +95,9 @@ def make_object_dataset(example_dir: Path) -> RigidObjectDataset:
assert not mesh_path, f"there multiple meshes in the {label} directory"
mesh_path = fn
assert mesh_path, f"couldnt find a obj or ply mesh for {label}"
- rigid_objects.append(RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units))
+ rigid_objects.append(
+ RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units),
+ )
# TODO: fix mesh units
rigid_object_dataset = RigidObjectDataset(rigid_objects)
return rigid_object_dataset
@@ -120,7 +125,8 @@ def save_predictions(
labels = pose_estimates.infos["label"]
poses = pose_estimates.poses.cpu().numpy()
object_data = [
- ObjectData(label=label, TWO=Transform(pose)) for label, pose in zip(labels, poses)
+ ObjectData(label=label, TWO=Transform(pose))
+ for label, pose in zip(labels, poses)
]
object_data_json = json.dumps([x.to_json() for x in object_data])
output_fn = example_dir / "outputs" / "object_data.json"
@@ -134,11 +140,11 @@ def run_inference(
example_dir: Path,
model_name: str,
) -> None:
-
model_info = NAMED_MODELS[model_name]
observation = load_observation_tensor(
- example_dir, load_depth=model_info["requires_depth"]
+ example_dir,
+ load_depth=model_info["requires_depth"],
)
if torch.cuda.is_available():
observation.cuda()
@@ -147,11 +153,13 @@ def run_inference(
logger.info(f"Loading model {model_name}.")
pose_estimator = load_named_model(model_name, object_dataset).to(device)
- logger.info(f"Running inference.")
+ logger.info("Running inference.")
output, _ = pose_estimator.run_inference_pipeline(
- observation, detections=detections, **model_info["inference_parameters"]
+ observation,
+ detections=detections,
+ **model_info["inference_parameters"],
)
-
+
save_predictions(example_dir, output)
return
@@ -159,7 +167,6 @@ def run_inference(
def make_output_visualization(
example_dir: Path,
) -> None:
-
rgb, _, camera_data = load_observation(example_dir, load_depth=False)
camera_data.TWC = Transform(np.eye(4))
object_datas = load_object_data(example_dir / "outputs" / "object_data.json")
@@ -167,7 +174,10 @@ def make_output_visualization(
renderer = Panda3dSceneRenderer(object_dataset)
- camera_data, object_datas = convert_scene_observation_to_panda3d(camera_data, object_datas)
+ camera_data, object_datas = convert_scene_observation_to_panda3d(
+ camera_data,
+ object_datas,
+ )
light_datas = [
Panda3dLightData(
light_type="ambient",
@@ -189,10 +199,16 @@ def make_output_visualization(
fig_rgb = plotter.plot_image(rgb)
fig_mesh_overlay = plotter.plot_overlay(rgb, renderings.rgb)
contour_overlay = make_contour_overlay(
- rgb, renderings.rgb, dilate_iterations=1, color=(0, 255, 0)
+ rgb,
+ renderings.rgb,
+ dilate_iterations=1,
+ color=(0, 255, 0),
)["img"]
fig_contour_overlay = plotter.plot_image(contour_overlay)
- fig_all = gridplot([[fig_rgb, fig_contour_overlay, fig_mesh_overlay]], toolbar_location=None)
+ fig_all = gridplot(
+ [[fig_rgb, fig_contour_overlay, fig_mesh_overlay]],
+ toolbar_location=None,
+ )
vis_dir = example_dir / "visualizations"
vis_dir.mkdir(exist_ok=True)
export_png(fig_mesh_overlay, filename=vis_dir / "mesh_overlay.png")
@@ -218,7 +234,11 @@ def make_output_visualization(
set_logging_level("info")
parser = argparse.ArgumentParser()
parser.add_argument("example_name")
- parser.add_argument("--model", type=str, default="megapose-1.0-RGB-multi-hypothesis")
+ parser.add_argument(
+ "--model",
+ type=str,
+ default="megapose-1.0-RGB-multi-hypothesis",
+ )
parser.add_argument("--vis-detections", action="store_true")
parser.add_argument("--run-inference", action="store_true")
parser.add_argument("--vis-outputs", action="store_true")
diff --git a/happypose/pose_estimators/megapose/scripts/run_inference_on_example_newdetections.py b/happypose/pose_estimators/megapose/scripts/run_inference_on_example_newdetections.py
index 2d1ee6ea..6cfeb8c3 100644
--- a/happypose/pose_estimators/megapose/scripts/run_inference_on_example_newdetections.py
+++ b/happypose/pose_estimators/megapose/scripts/run_inference_on_example_newdetections.py
@@ -3,7 +3,7 @@
import json
import os
from pathlib import Path
-from typing import List, Tuple, Union
+from typing import Union
# Third Party
import numpy as np
@@ -31,19 +31,19 @@
from happypose.toolbox.visualization.utils import make_contour_overlay
# MegaPose
-#from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
-#from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
-
+# from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
+# from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
logger = get_logger(__name__)
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
def load_observation(
example_dir: Path,
load_depth: bool = False,
-) -> Tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
+) -> tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
camera_data = CameraData.from_json((example_dir / "camera_data.json").read_text())
rgb = np.array(Image.open(example_dir / "image_rgb.png"), dtype=np.uint8)
@@ -51,7 +51,10 @@ def load_observation(
depth = None
if load_depth:
- depth = np.array(Image.open(example_dir / "image_depth.png"), dtype=np.float32) / 1000
+ depth = (
+ np.array(Image.open(example_dir / "image_depth.png"), dtype=np.float32)
+ / 1000
+ )
assert depth.shape[:2] == camera_data.resolution
return rgb, depth, camera_data
@@ -66,7 +69,7 @@ def load_observation_tensor(
return observation
-def load_object_data(data_path: Path) -> List[ObjectData]:
+def load_object_data(data_path: Path) -> list[ObjectData]:
object_data = json.loads(data_path.read_text())
object_data = [ObjectData.from_json(d) for d in object_data]
return object_data
@@ -92,7 +95,9 @@ def make_object_dataset(example_dir: Path) -> RigidObjectDataset:
assert not mesh_path, f"there multiple meshes in the {label} directory"
mesh_path = fn
assert mesh_path, f"couldnt find a obj or ply mesh for {label}"
- rigid_objects.append(RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units))
+ rigid_objects.append(
+ RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units),
+ )
# TODO: fix mesh units
rigid_object_dataset = RigidObjectDataset(rigid_objects)
return rigid_object_dataset
@@ -120,7 +125,8 @@ def save_predictions(
labels = pose_estimates.infos["label"]
poses = pose_estimates.poses.cpu().numpy()
object_data = [
- ObjectData(label=label, TWO=Transform(pose)) for label, pose in zip(labels, poses)
+ ObjectData(label=label, TWO=Transform(pose))
+ for label, pose in zip(labels, poses)
]
object_data_json = json.dumps([x.to_json() for x in object_data])
output_fn = example_dir / "outputs" / "object_data.json"
@@ -134,11 +140,11 @@ def run_inference(
example_dir: Path,
model_name: str,
) -> None:
-
model_info = NAMED_MODELS[model_name]
observation = load_observation_tensor(
- example_dir, load_depth=model_info["requires_depth"]
+ example_dir,
+ load_depth=model_info["requires_depth"],
)
if torch.cuda.is_available():
observation.cuda()
@@ -147,11 +153,13 @@ def run_inference(
logger.info(f"Loading model {model_name}.")
pose_estimator = load_named_model(model_name, object_dataset).to(device)
- logger.info(f"Running inference.")
+ logger.info("Running inference.")
output, _ = pose_estimator.run_inference_pipeline(
- observation, detections=detections, **model_info["inference_parameters"]
+ observation,
+ detections=detections,
+ **model_info["inference_parameters"],
)
-
+
save_predictions(example_dir, output)
return
@@ -159,7 +167,6 @@ def run_inference(
def make_output_visualization(
example_dir: Path,
) -> None:
-
rgb, _, camera_data = load_observation(example_dir, load_depth=False)
camera_data.TWC = Transform(np.eye(4))
object_datas = load_object_data(example_dir / "outputs" / "object_data.json")
@@ -167,7 +174,10 @@ def make_output_visualization(
renderer = Panda3dSceneRenderer(object_dataset)
- camera_data, object_datas = convert_scene_observation_to_panda3d(camera_data, object_datas)
+ camera_data, object_datas = convert_scene_observation_to_panda3d(
+ camera_data,
+ object_datas,
+ )
light_datas = [
Panda3dLightData(
light_type="ambient",
@@ -189,10 +199,16 @@ def make_output_visualization(
fig_rgb = plotter.plot_image(rgb)
fig_mesh_overlay = plotter.plot_overlay(rgb, renderings.rgb)
contour_overlay = make_contour_overlay(
- rgb, renderings.rgb, dilate_iterations=1, color=(0, 255, 0)
+ rgb,
+ renderings.rgb,
+ dilate_iterations=1,
+ color=(0, 255, 0),
)["img"]
fig_contour_overlay = plotter.plot_image(contour_overlay)
- fig_all = gridplot([[fig_rgb, fig_contour_overlay, fig_mesh_overlay]], toolbar_location=None)
+ fig_all = gridplot(
+ [[fig_rgb, fig_contour_overlay, fig_mesh_overlay]],
+ toolbar_location=None,
+ )
vis_dir = example_dir / "visualizations"
vis_dir.mkdir(exist_ok=True)
export_png(fig_mesh_overlay, filename=vis_dir / "mesh_overlay.png")
@@ -218,7 +234,11 @@ def make_output_visualization(
set_logging_level("info")
parser = argparse.ArgumentParser()
parser.add_argument("example_name")
- parser.add_argument("--model", type=str, default="megapose-1.0-RGB-multi-hypothesis")
+ parser.add_argument(
+ "--model",
+ type=str,
+ default="megapose-1.0-RGB-multi-hypothesis",
+ )
parser.add_argument("--vis-detections", action="store_true")
parser.add_argument("--run-inference", action="store_true")
parser.add_argument("--vis-outputs", action="store_true")
diff --git a/happypose/pose_estimators/megapose/scripts/run_megapose_training.py b/happypose/pose_estimators/megapose/scripts/run_megapose_training.py
index 26e86630..2a4ca03f 100644
--- a/happypose/pose_estimators/megapose/scripts/run_megapose_training.py
+++ b/happypose/pose_estimators/megapose/scripts/run_megapose_training.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,8 @@
"""
-# SPDX-FileCopyrightText: Copyright (c) NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+# SPDX-FileCopyrightText: Copyright (c) NVIDIA CORPORATION & AFFILIATES.
+# All rights reserved.
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a
@@ -38,7 +38,7 @@
# Standard Library
import os
-from typing import List, Optional
+from typing import Optional
# Third Party
import numpy as np
@@ -48,14 +48,23 @@
# MegaPose
from happypose.pose_estimators.megapose.bop_config import BOP_CONFIG
from happypose.pose_estimators.megapose.config import EXP_DIR
-from happypose.pose_estimators.megapose.training.train_megapose import DatasetConfig, train_megapose
-from happypose.pose_estimators.megapose.training.training_config import HardwareConfig, TrainingConfig
+from happypose.pose_estimators.megapose.training.train_megapose import (
+ DatasetConfig,
+ train_megapose,
+)
+from happypose.pose_estimators.megapose.training.training_config import (
+ HardwareConfig,
+ TrainingConfig,
+)
from happypose.toolbox.utils.logging import get_logger, set_logging_level
logger = get_logger(__name__)
-def train_on_bop_pbr_datasets(cfg: TrainingConfig, use_webdataset: bool = True) -> TrainingConfig:
+def train_on_bop_pbr_datasets(
+ cfg: TrainingConfig,
+ use_webdataset: bool = True,
+) -> TrainingConfig:
bop_names = ["lm", "tless", "itodd", "hb", "ycbv", "icbin", "tudl"]
for bop_name in bop_names:
bop_cfg = BOP_CONFIG[bop_name]
@@ -75,12 +84,11 @@ def train_on_bop_pbr_datasets(cfg: TrainingConfig, use_webdataset: bool = True)
def train_on_shapenet(
cfg: TrainingConfig,
ds_name: str = "shapenet_1M",
- obj_filters: List[str] = [
+ obj_filters: list[str] = [
"10mb_20k",
],
remove_modelnet: bool = False,
) -> TrainingConfig:
-
if remove_modelnet:
obj_filters.append("remove_modelnet")
@@ -92,7 +100,7 @@ def train_on_shapenet(
ds_name="webdataset." + ds_name,
mesh_obj_ds_name=f"{obj_ds_name}.pointcloud",
renderer_obj_ds_name=f"{obj_ds_name}.panda3d_bam",
- )
+ ),
)
cfg.n_symmetries_batch = 1
return cfg
@@ -103,7 +111,6 @@ def train_on_gso(
ds_name: str = "gso_1M",
n_objects: int = 940,
) -> TrainingConfig:
-
cfg.input_resize = (540, 720)
obj_ds_name = f"gso.nobjects={n_objects}"
cfg.train_datasets.append(
@@ -111,7 +118,7 @@ def train_on_gso(
ds_name="webdataset." + ds_name,
mesh_obj_ds_name=f"{obj_ds_name}.pointcloud",
renderer_obj_ds_name=f"{obj_ds_name}.normalized",
- )
+ ),
)
cfg.n_symmetries_batch = 1
return cfg
@@ -144,7 +151,7 @@ def make_coarse_cfg(cfg: TrainingConfig) -> TrainingConfig:
def enable_depth_in_cfg(cfg: TrainingConfig) -> TrainingConfig:
- """Adds flags for input depth + render depth to cfg"""
+ """Adds flags for input depth + render depth to cfg."""
cfg.depth_normalization_type = "tCR_scale_clamp_center"
cfg.input_depth = True
cfg.render_depth = True
@@ -157,7 +164,7 @@ def update_cfg_with_config_id(cfg: TrainingConfig, config_id: str) -> TrainingCo
def train_on_gso_and_shapenet(
cfg: TrainingConfig,
shapenet_obj_ds_name: Optional[str] = "shapenet_1M",
- shapenet_obj_filters: List[str] = ["10mb_20k"],
+ shapenet_obj_filters: list[str] = ["10mb_20k"],
gso_obj_ds_name: Optional[str] = "gso_1M",
gso_n_objects: int = 940,
remove_modelnet: bool = False,
@@ -204,31 +211,45 @@ def train_on_gso_and_shapenet(
elif config_id == "refiner-gso_shapenet-4views-normals-objects50p":
cfg = make_refiner_cfg(cfg)
cfg = train_on_gso_and_shapenet(
- cfg, shapenet_obj_ds_name="shapenet_10mb_10k", gso_obj_ds_name="gso_500"
+ cfg,
+ shapenet_obj_ds_name="shapenet_10mb_10k",
+ gso_obj_ds_name="gso_500",
)
elif config_id == "refiner-gso_shapenet-4views-normals-objects25p":
cfg = make_refiner_cfg(cfg)
cfg = train_on_gso_and_shapenet(
- cfg, shapenet_obj_ds_name="shapenet_10mb_2k", gso_obj_ds_name="gso_250"
+ cfg,
+ shapenet_obj_ds_name="shapenet_10mb_2k",
+ gso_obj_ds_name="gso_250",
)
elif config_id == "refiner-gso_shapenet-4views-normals-objects10p":
cfg = make_refiner_cfg(cfg)
cfg = train_on_gso_and_shapenet(
- cfg, shapenet_obj_ds_name="shapenet_10mb_1k", gso_obj_ds_name="gso_100"
+ cfg,
+ shapenet_obj_ds_name="shapenet_10mb_1k",
+ gso_obj_ds_name="gso_100",
)
elif config_id == "refiner-gso_shapenet-4views-normals-objects1p":
cfg = make_refiner_cfg(cfg)
cfg = train_on_gso_and_shapenet(
- cfg, shapenet_obj_ds_name="shapenet_10mb_100", gso_obj_ds_name="gso_10"
+ cfg,
+ shapenet_obj_ds_name="shapenet_10mb_100",
+ gso_obj_ds_name="gso_10",
)
elif config_id == "refiner-gso-4views-normals":
cfg = make_refiner_cfg(cfg)
- cfg = train_on_gso_and_shapenet(cfg, shapenet_obj_ds_name=None, gso_obj_ds_name="gso_940")
+ cfg = train_on_gso_and_shapenet(
+ cfg,
+ shapenet_obj_ds_name=None,
+ gso_obj_ds_name="gso_940",
+ )
elif config_id == "refiner-shapenet-4views-normals":
cfg = make_refiner_cfg(cfg)
cfg = train_on_gso_and_shapenet(
- cfg, shapenet_obj_ds_name="shapenet_10mb_20k", gso_obj_ds_name=None
+ cfg,
+ shapenet_obj_ds_name="shapenet_10mb_20k",
+ gso_obj_ds_name=None,
)
elif config_id == "refiner-gso_shapenet_nomodelnet-4views-normals":
cfg = make_refiner_cfg(cfg)
@@ -263,7 +284,8 @@ def train_on_gso_and_shapenet(
cfg = train_on_gso_and_shapenet(cfg)
else:
- raise ValueError("Unknown config")
+ msg = "Unknown config"
+ raise ValueError(msg)
if cfg.run_id is None:
cfg.run_postfix = str(np.random.randint(int(1e6)))
diff --git a/happypose/pose_estimators/megapose/scripts/test_distributed.py b/happypose/pose_estimators/megapose/scripts/test_distributed.py
index 0667d07b..b16e99d4 100644
--- a/happypose/pose_estimators/megapose/scripts/test_distributed.py
+++ b/happypose/pose_estimators/megapose/scripts/test_distributed.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
import os
diff --git a/happypose/pose_estimators/megapose/tests/__init__.py b/happypose/pose_estimators/megapose/tests/__init__.py
index 73a7b275..09aba5e2 100644
--- a/happypose/pose_estimators/megapose/tests/__init__.py
+++ b/happypose/pose_estimators/megapose/tests/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,4 +12,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/pose_estimators/megapose/training/__init__.py b/happypose/pose_estimators/megapose/training/__init__.py
index 73a7b275..09aba5e2 100644
--- a/happypose/pose_estimators/megapose/training/__init__.py
+++ b/happypose/pose_estimators/megapose/training/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,4 +12,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/pose_estimators/megapose/training/detector_models_cfg.py b/happypose/pose_estimators/megapose/training/detector_models_cfg.py
index e156c7bb..d74039ea 100644
--- a/happypose/pose_estimators/megapose/training/detector_models_cfg.py
+++ b/happypose/pose_estimators/megapose/training/detector_models_cfg.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -24,7 +23,9 @@
def check_update_config(cfg):
obj_prefix = cfg.train_ds_names[0][0].split(".")[0]
- cfg.label_to_category_id = {f"{obj_prefix}-{k}": v for k, v in cfg.label_to_category_id.items()}
+ cfg.label_to_category_id = {
+ f"{obj_prefix}-{k}": v for k, v in cfg.label_to_category_id.items()
+ }
return cfg
diff --git a/happypose/pose_estimators/megapose/training/megapose_forward_loss.py b/happypose/pose_estimators/megapose/training/megapose_forward_loss.py
index a15aea90..2b3548ff 100644
--- a/happypose/pose_estimators/megapose/training/megapose_forward_loss.py
+++ b/happypose/pose_estimators/megapose/training/megapose_forward_loss.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +15,7 @@
# Standard Library
-from typing import Any, Dict
+from typing import Any
# Third Party
import numpy as np
@@ -26,6 +25,10 @@
from bokeh.layouts import gridplot
from torch import nn
+from happypose.pose_estimators.megapose.models.pose_rigid import PosePredictor
+from happypose.pose_estimators.megapose.training.training_config import TrainingConfig
+from happypose.pose_estimators.megapose.training.utils import cast, cast_images
+
# MegaPose
from happypose.toolbox.datasets.pose_dataset import BatchPoseData
from happypose.toolbox.lib3d.camera_geometry import (
@@ -38,9 +41,6 @@
from happypose.toolbox.lib3d.multiview import make_TCO_multiview
from happypose.toolbox.lib3d.rigid_mesh_database import BatchedMeshes
from happypose.toolbox.lib3d.transform_ops import add_noise, invert_transform_matrices
-from happypose.pose_estimators.megapose.models.pose_rigid import PosePredictor
-from happypose.pose_estimators.megapose.training.training_config import TrainingConfig
-from happypose.pose_estimators.megapose.training.utils import cast, cast_images
from happypose.toolbox.visualization.bokeh_plotter import BokehPlotter
@@ -48,15 +48,14 @@ def megapose_forward_loss(
model: PosePredictor,
cfg: TrainingConfig,
data: BatchPoseData,
- meters: Dict[str, torchnet.meter.AverageValueMeter],
+ meters: dict[str, torchnet.meter.AverageValueMeter],
mesh_db: BatchedMeshes,
n_iterations: int,
- debug_dict: Dict[str, Any],
+ debug_dict: dict[str, Any],
make_visualization: bool = False,
train: bool = True,
is_notebook: bool = False,
) -> torch.Tensor:
-
# Normalize RGB dims to be in [0,1] from [0,255]
# Don't tamper with depth
images = cast_images(rgb=data.rgbs, depth=data.depths)
@@ -75,15 +74,21 @@ def megapose_forward_loss(
torch.arange(batch_size, device=device).unsqueeze(1).repeat(1, cfg.n_hypotheses)
)
hypotheses_labels = np.repeat(
- np.expand_dims(np.array(labels_gt, dtype=object), axis=1), cfg.n_hypotheses, axis=1
+ np.expand_dims(np.array(labels_gt, dtype=object), axis=1),
+ cfg.n_hypotheses,
+ axis=1,
).copy()
if cfg.hypotheses_init_method == "coarse_z_up+auto-depth":
assert cfg.n_hypotheses == 1
- points_3d = mesh_db.select(np.ravel(hypotheses_labels).tolist()).sample_points(200)
+ points_3d = mesh_db.select(np.ravel(hypotheses_labels).tolist()).sample_points(
+ 200,
+ )
TCO_init_zup = TCO_init_from_boxes_zup_autodepth(bboxes_gt, points_3d, K)
TCO_init_zup = add_noise(
- TCO_init_zup, euler_deg_std=[0, 0, 0], trans_std=[0.01, 0.01, 0.05]
+ TCO_init_zup,
+ euler_deg_std=[0, 0, 0],
+ trans_std=[0.01, 0.01, 0.05],
)
hypotheses_TCO_init = TCO_init_zup.unsqueeze(1)
is_hypothesis_positive = None
@@ -106,7 +111,9 @@ def megapose_forward_loss(
trans_std=cfg.init_trans_std,
)
tOR = torch.zeros(batch_size, 3, device=device, dtype=dtype)
- tCR = TCO_gt_noise[..., :3, [-1]] + TCO_gt_noise[..., :3, :3] @ tOR.unsqueeze(-1)
+ tCR = TCO_gt_noise[..., :3, [-1]] + TCO_gt_noise[..., :3, :3] @ tOR.unsqueeze(
+ -1,
+ )
tCR = tCR.squeeze(-1)
TCV_O = make_TCO_multiview(
TCO_gt_noise,
@@ -121,7 +128,9 @@ def megapose_forward_loss(
views_permutation = np.empty((2, batch_size, n_hypotheses), dtype=int)
for b in range(batch_size):
views_permutation[0, b, :] = b
- views_permutation[1, b, :] = np.random.permutation(n_candidate_views)[:n_hypotheses]
+ views_permutation[1, b, :] = np.random.permutation(n_candidate_views)[
+ :n_hypotheses
+ ]
positive_idx = np.where(views_permutation[1, b] == 0)[0]
is_hypothesis_positive[b, positive_idx] = 1
if len(positive_idx) == 0:
@@ -152,7 +161,9 @@ def megapose_forward_loss(
meshes = mesh_db.select(labels_gt)
points = meshes.sample_points(cfg.n_points_loss)
TCO_possible_gt = TCO_gt.unsqueeze(1) @ meshes.symmetries
- TCO_possible_gt = TCO_possible_gt.unsqueeze(1).repeat(1, n_hypotheses, 1, 1, 1).flatten(0, 1)
+ TCO_possible_gt = (
+ TCO_possible_gt.unsqueeze(1).repeat(1, n_hypotheses, 1, 1, 1).flatten(0, 1)
+ )
points = points.unsqueeze(1).repeat(1, n_hypotheses, 1, 1).flatten(0, 1)
list_losses_pose = []
@@ -165,7 +176,10 @@ def megapose_forward_loss(
loss_TCO_iter, loss_TCO_iter_data = None, None
if cfg.predict_pose_update:
- loss_TCO_iter, loss_TCO_iter_data = loss_refiner_CO_disentangled_reference_point(
+ (
+ loss_TCO_iter,
+ loss_TCO_iter_data,
+ ) = loss_refiner_CO_disentangled_reference_point(
TCO_possible_gt=TCO_possible_gt,
points=points,
TCO_input=iter_outputs.TCO_input,
@@ -179,7 +193,7 @@ def megapose_forward_loss(
if cfg.predict_rendered_views_logits:
list_rendering_logits.append(
- iter_outputs.renderings_logits.view(batch_size, n_hypotheses, -1)
+ iter_outputs.renderings_logits.view(batch_size, n_hypotheses, -1),
)
time_render += iter_outputs.timing_dict["render"]
@@ -198,7 +212,9 @@ def megapose_forward_loss(
# Batch size x N hypotheses x N iterations
loss_hypotheses = torch.zeros(
- (batch_size, n_hypotheses, n_iterations), device=device, dtype=dtype
+ (batch_size, n_hypotheses, n_iterations),
+ device=device,
+ dtype=dtype,
)
if cfg.predict_pose_update:
losses_pose = torch.stack(list_losses_pose).permute(1, 2, 0)
@@ -216,8 +232,12 @@ def megapose_forward_loss(
rendering_logits.flatten(1, 3),
torch.tensor(is_hypothesis_positive, dtype=torch.float, device=device),
).unsqueeze(-1)
- meters["loss_renderings_confidence"].add(loss_renderings_confidence.mean().item())
- loss_hypotheses += cfg.loss_alpha_renderings_confidence * loss_renderings_confidence
+ meters["loss_renderings_confidence"].add(
+ loss_renderings_confidence.mean().item(),
+ )
+ loss_hypotheses += (
+ cfg.loss_alpha_renderings_confidence * loss_renderings_confidence
+ )
loss = loss_hypotheses.mean()
@@ -226,10 +246,12 @@ def megapose_forward_loss(
if make_visualization:
def add_mask_to_image(
- image: torch.Tensor, mask: torch.Tensor, color: str = "red"
+ image: torch.Tensor,
+ mask: torch.Tensor,
+ color: str = "red",
) -> torch.Tensor:
t_color = torch.zeros_like(image)
- idx = dict(red=0, green=1, blue=2)[color]
+ idx = {"red": 0, "green": 1, "blue": 2}[color]
t_color[idx, mask > 0] = 1.0
output = image * 0.8 + t_color * 0.2
return output
@@ -240,10 +262,18 @@ def add_mask_to_image(
n_views = cfg.n_rendered_views
last_iter_outputs = outputs[f"iteration={n_iterations}"]
images_crop = last_iter_outputs.images_crop
- images_crop = images_crop.view(batch_size, n_hypotheses, *images_crop.shape[-3:])
+ images_crop = images_crop.view(
+ batch_size,
+ n_hypotheses,
+ *images_crop.shape[-3:],
+ )
renders = last_iter_outputs.renders
renders = renders.view(
- batch_size, n_hypotheses, n_views, renders.shape[1] // n_views, *renders.shape[-2:]
+ batch_size,
+ n_hypotheses,
+ n_views,
+ renders.shape[1] // n_views,
+ *renders.shape[-2:],
)
KV_crop = last_iter_outputs.KV_crop
@@ -278,16 +308,27 @@ def add_mask_to_image(
TCO_ = TCV_O[[batch_idx], init_idx, view_idx]
TCR_ = TCV_R[[batch_idx], init_idx, view_idx]
-
image_crop_ = add_mask_to_image(image_crop_[:3], image_crop_[-1])
- image_crop_ = add_mask_to_image(image_crop_[:3], render_[-1], "green")
+ image_crop_ = add_mask_to_image(
+ image_crop_[:3],
+ render_[-1],
+ "green",
+ )
f = plotter.plot_image(image_crop_)
f.title.text = f"init of iteration {n_iterations}"
row.append(f)
n_channels = render_.shape[0]
- ref_point_uv = project_points_robust(points_orig, KV_crop_, TCR_).flatten()
- origin_uv = project_points_robust(points_orig, KV_crop_, TCO_).flatten()
+ ref_point_uv = project_points_robust(
+ points_orig,
+ KV_crop_,
+ TCR_,
+ ).flatten()
+ origin_uv = project_points_robust(
+ points_orig,
+ KV_crop_,
+ TCO_,
+ ).flatten()
f = plotter.plot_image(render_[:3])
f.circle(
[int(ref_point_uv[0])],
@@ -295,14 +336,17 @@ def add_mask_to_image(
color="red",
)
f.circle(
- [int(origin_uv[0])], [int(render_.shape[1] - origin_uv[1])], color="green"
+ [int(origin_uv[0])],
+ [int(render_.shape[1] - origin_uv[1])],
+ color="green",
)
f.title.text = f"idx={batch_idx},view={view_idx},init={init_idx}"
if cfg.predict_rendered_views_logits:
assert is_hypothesis_positive is not None
is_positive = is_hypothesis_positive[batch_idx, init_idx]
f.title.text = (
- f"idx={batch_idx},view={view_idx},init={init_idx},target={is_positive}"
+ f"idx={batch_idx},view={view_idx},init={init_idx},"
+ f"target={is_positive}"
)
row.append(f)
diff --git a/happypose/pose_estimators/megapose/training/pose_models_cfg.py b/happypose/pose_estimators/megapose/training/pose_models_cfg.py
index e7744533..6a6dc77a 100644
--- a/happypose/pose_estimators/megapose/training/pose_models_cfg.py
+++ b/happypose/pose_estimators/megapose/training/pose_models_cfg.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,18 +15,20 @@
# Standard Library
-from typing import Union
# MegaPose
# Backbones
import happypose.pose_estimators.megapose.models.torchvision_resnet as models
-from happypose.toolbox.lib3d.rigid_mesh_database import BatchedMeshes
# Pose models
from happypose.pose_estimators.megapose.models.pose_rigid import PosePredictor
-from happypose.pose_estimators.megapose.models.wide_resnet import WideResNet18, WideResNet34
-from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
+from happypose.pose_estimators.megapose.models.wide_resnet import (
+ WideResNet18,
+ WideResNet34,
+)
from happypose.pose_estimators.megapose.training.training_config import TrainingConfig
+from happypose.toolbox.lib3d.rigid_mesh_database import BatchedMeshes
+from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
from happypose.toolbox.utils.logging import get_logger
logger = get_logger(__name__)
@@ -35,7 +36,6 @@
def check_update_config(cfg: TrainingConfig) -> TrainingConfig:
"""Useful for loading models previously trained with different configurations."""
-
cfg.is_coarse_compat = False
# Detect old coarse model definition
if hasattr(cfg, "input_strategy") and cfg.input_strategy == "input=obs+one_render":
@@ -99,13 +99,17 @@ def create_model_pose(
# Assumes that if you are rendering depth you are also
# inputting it from the model
n_inputs = (n_channels + n_depth_channels) + (
- (n_channels + n_normals_channels + n_rendered_depth_channels) * cfg.n_rendered_views
+ (n_channels + n_normals_channels + n_rendered_depth_channels)
+ * cfg.n_rendered_views
)
backbone_str = cfg.backbone_str
render_size = (240, 320)
if "vanilla_resnet34" == backbone_str:
n_features = 512
- backbone = models.__dict__["resnet34"](num_classes=n_features, n_input_channels=n_inputs)
+ backbone = models.__dict__["resnet34"](
+ num_classes=n_features,
+ n_input_channels=n_inputs,
+ )
backbone.n_features = n_features
elif "resnet34" == backbone_str:
backbone = WideResNet34(n_inputs=n_inputs)
@@ -115,7 +119,8 @@ def create_model_pose(
width = int(backbone_str.split("resnet34_width=")[1])
backbone = WideResNet34(n_inputs=n_inputs, width=width)
else:
- raise ValueError("Unknown backbone", backbone_str)
+ msg = "Unknown backbone"
+ raise ValueError(msg, backbone_str)
logger.debug(f"Backbone: {backbone_str}")
backbone.n_inputs = n_inputs
diff --git a/happypose/pose_estimators/megapose/training/train_megapose.py b/happypose/pose_estimators/megapose/training/train_megapose.py
index 8ec51f4d..72fe673c 100644
--- a/happypose/pose_estimators/megapose/training/train_megapose.py
+++ b/happypose/pose_estimators/megapose/training/train_megapose.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -20,7 +19,7 @@
import os
import time
from collections import defaultdict
-from typing import Any, Dict, List
+from typing import Any
# Third Party
import numpy as np
@@ -34,8 +33,34 @@
# MegaPose
from happypose.pose_estimators.megapose.config import EXP_DIR
-from happypose.toolbox.datasets.datasets_cfg import make_object_dataset, make_scene_dataset
-from happypose.toolbox.datasets.object_dataset import RigidObjectDataset, concat_object_datasets
+from happypose.pose_estimators.megapose.panda3d_renderer.panda3d_batch_renderer import (
+ Panda3dBatchRenderer,
+)
+from happypose.pose_estimators.megapose.training.megapose_forward_loss import (
+ megapose_forward_loss,
+)
+from happypose.pose_estimators.megapose.training.pose_models_cfg import (
+ check_update_config,
+ create_model_pose,
+)
+from happypose.pose_estimators.megapose.training.training_config import (
+ DatasetConfig,
+ TrainingConfig,
+)
+from happypose.pose_estimators.megapose.training.utils import (
+ CudaTimer,
+ make_lr_ratio_function,
+ make_optimizer,
+ write_logs,
+)
+from happypose.toolbox.datasets.datasets_cfg import (
+ make_object_dataset,
+ make_scene_dataset,
+)
+from happypose.toolbox.datasets.object_dataset import (
+ RigidObjectDataset,
+ concat_object_datasets,
+)
from happypose.toolbox.datasets.pose_dataset import PoseDataset
from happypose.toolbox.datasets.scene_dataset import (
IterableMultiSceneDataset,
@@ -43,18 +68,11 @@
RandomIterableSceneDataset,
SceneDataset,
)
-from happypose.toolbox.datasets.web_scene_dataset import IterableWebSceneDataset, WebSceneDataset
-from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
-from happypose.pose_estimators.megapose.panda3d_renderer.panda3d_batch_renderer import Panda3dBatchRenderer
-from happypose.pose_estimators.megapose.training.megapose_forward_loss import megapose_forward_loss
-from happypose.pose_estimators.megapose.training.pose_models_cfg import check_update_config, create_model_pose
-from happypose.pose_estimators.megapose.training.training_config import DatasetConfig, TrainingConfig
-from happypose.pose_estimators.megapose.training.utils import (
- CudaTimer,
- make_lr_ratio_function,
- make_optimizer,
- write_logs,
+from happypose.toolbox.datasets.web_scene_dataset import (
+ IterableWebSceneDataset,
+ WebSceneDataset,
)
+from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
from happypose.toolbox.utils.distributed import (
get_rank,
get_world_size,
@@ -65,7 +83,11 @@
)
from happypose.toolbox.utils.logging import get_logger
from happypose.toolbox.utils.random import get_unique_seed, set_seed, temp_numpy_seed
-from happypose.toolbox.utils.resources import get_cuda_memory, get_gpu_memory, get_total_memory
+from happypose.toolbox.utils.resources import (
+ get_cuda_memory,
+ get_gpu_memory,
+ get_total_memory,
+)
def worker_init_fn(worker_id: int) -> None:
@@ -91,23 +113,27 @@ def train_megapose(cfg: TrainingConfig) -> None:
cfg.global_batch_size = world_size * cfg.batch_size
assert cfg.hardware.n_gpus == world_size
- def split_objects_across_gpus(obj_dataset: RigidObjectDataset) -> RigidObjectDataset:
+ def split_objects_across_gpus(
+ obj_dataset: RigidObjectDataset,
+ ) -> RigidObjectDataset:
rank, world_size = get_rank(), get_world_size()
if cfg.split_objects_across_gpus:
with temp_numpy_seed(0):
this_rank_labels = set(
np.array_split(
- np.random.permutation(np.array([obj.label for obj in obj_dataset.objects])),
+ np.random.permutation(
+ np.array([obj.label for obj in obj_dataset.objects]),
+ ),
world_size,
- )[rank].tolist()
+ )[rank].tolist(),
)
else:
- this_rank_labels = set([obj.label for obj in renderer_obj_dataset.objects])
+ this_rank_labels = {obj.label for obj in renderer_obj_dataset.objects}
if cfg.n_max_objects is not None:
this_rank_labels = set(list(this_rank_labels)[: cfg.n_max_objects])
obj_dataset = RigidObjectDataset(
- [obj for obj in obj_dataset.objects if obj.label in this_rank_labels]
+ [obj for obj in obj_dataset.objects if obj.label in this_rank_labels],
)
return obj_dataset
@@ -116,21 +142,21 @@ def split_objects_across_gpus(obj_dataset: RigidObjectDataset) -> RigidObjectDat
[
split_objects_across_gpus(make_object_dataset(ds_cfg.renderer_obj_ds_name))
for ds_cfg in cfg.train_datasets + cfg.val_datasets
- ]
+ ],
)
mesh_obj_dataset = concat_object_datasets(
[
split_objects_across_gpus(make_object_dataset(ds_cfg.mesh_obj_ds_name))
for ds_cfg in cfg.train_datasets + cfg.val_datasets
- ]
+ ],
)
- this_rank_labels = set([obj.label for obj in renderer_obj_dataset.objects])
+ this_rank_labels = {obj.label for obj in renderer_obj_dataset.objects}
assert len(renderer_obj_dataset) == len(mesh_obj_dataset)
logger.info(f"Number of objects to train on (this rank): {len(mesh_obj_dataset)})")
# Scene dataset
def make_iterable_scene_dataset(
- dataset_configs: List[DatasetConfig],
+ dataset_configs: list[DatasetConfig],
deterministic: bool = False,
) -> IterableMultiSceneDataset:
scene_dataset_iterators = []
@@ -142,7 +168,8 @@ def make_iterable_scene_dataset(
if isinstance(ds, WebSceneDataset):
assert not deterministic
iterator: IterableSceneDataset = IterableWebSceneDataset(
- ds, buffer_size=cfg.sample_buffer_size
+ ds,
+ buffer_size=cfg.sample_buffer_size,
)
else:
assert isinstance(ds, SceneDataset)
@@ -230,7 +257,8 @@ def make_iterable_scene_dataset(
ckpt = torch.load(ckpt_path)
except EOFError:
print(
- "Unable to load checkpoint.pth.tar. Falling back to checkpoint_epoch=last.pth.tar"
+ "Unable to load checkpoint.pth.tar. "
+ "Falling back to checkpoint_epoch=last.pth.tar",
)
ckpt_path = resume_run_dir / "checkpoint_epoch=last.pth.tar"
ckpt = torch.load(ckpt_path)
@@ -244,7 +272,9 @@ def make_iterable_scene_dataset(
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = sync_model(model)
model = torch.nn.parallel.DistributedDataParallel(
- model, device_ids=[torch.cuda.current_device()], output_device=torch.cuda.current_device()
+ model,
+ device_ids=[torch.cuda.current_device()],
+ output_device=torch.cuda.current_device(),
)
optimizer = make_optimizer(model.parameters(), cfg)
@@ -252,7 +282,10 @@ def make_iterable_scene_dataset(
this_rank_epoch_size = cfg.epoch_size // get_world_size()
this_rank_n_batch_per_epoch = this_rank_epoch_size // cfg.batch_size
# NOTE: LR schedulers "epoch" actually correspond to "batch"
- lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, make_lr_ratio_function(cfg))
+ lr_scheduler = torch.optim.lr_scheduler.LambdaLR(
+ optimizer,
+ make_lr_ratio_function(cfg),
+ )
lr_scheduler.last_epoch = ( # type: ignore
start_epoch * this_rank_epoch_size // cfg.batch_size - 1
)
@@ -266,16 +299,27 @@ def make_iterable_scene_dataset(
scaler = torch.cuda.amp.GradScaler()
for epoch in range(start_epoch, cfg.n_epochs + 1):
- meters_train: Dict[str, AverageValueMeter] = defaultdict(lambda: AverageValueMeter())
- meters_val: Dict[str, AverageValueMeter] = defaultdict(lambda: AverageValueMeter())
+ meters_train: dict[str, AverageValueMeter] = defaultdict(
+ lambda: AverageValueMeter(),
+ )
+ meters_val: dict[str, AverageValueMeter] = defaultdict(
+ lambda: AverageValueMeter(),
+ )
if cfg.add_iteration_epoch_interval is None:
n_iterations = cfg.n_iterations
else:
- n_iterations = min(epoch // cfg.add_iteration_epoch_interval + 1, cfg.n_iterations)
+ n_iterations = min(
+ epoch // cfg.add_iteration_epoch_interval + 1,
+ cfg.n_iterations,
+ )
forward_loss_fn = functools.partial(
- megapose_forward_loss, model=model, cfg=cfg, n_iterations=n_iterations, mesh_db=mesh_db
+ megapose_forward_loss,
+ model=model,
+ cfg=cfg,
+ n_iterations=n_iterations,
+ mesh_db=mesh_db,
)
def train() -> None:
@@ -283,7 +327,9 @@ def train() -> None:
set_seed(epoch * get_rank() + get_rank())
model.train()
pbar = tqdm(
- range(this_rank_n_batch_per_epoch), ncols=120, disable=cfg.logging_style != "tqdm"
+ range(this_rank_n_batch_per_epoch),
+ ncols=120,
+ disable=cfg.logging_style != "tqdm",
)
for n in pbar:
start_iter = time.time()
@@ -293,7 +339,7 @@ def train() -> None:
optimizer.zero_grad()
- debug_dict: Dict[str, Any] = dict()
+ debug_dict: dict[str, Any] = {}
timer_forward = CudaTimer(enabled=cfg.cuda_timing)
timer_forward.start()
with torch.cuda.amp.autocast():
@@ -313,7 +359,9 @@ def train() -> None:
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
total_grad_norm = torch.nn.utils.clip_grad_norm_(
- model.parameters(), max_norm=cfg.clip_grad_norm, norm_type=2
+ model.parameters(),
+ max_norm=cfg.clip_grad_norm,
+ norm_type=2,
)
meters["grad_norm"].add(torch.as_tensor(total_grad_norm).item())
@@ -327,14 +375,14 @@ def train() -> None:
if n > 0:
meters["time_iter"].add(time_iter)
- infos = dict(
- loss=f"{loss.item():.2e}",
- tf=f"{timer_forward.elapsed():.3f}",
- tb=f"{timer_backward.elapsed():.3f}",
- tr=f"{time_render:.3f}",
- td=f"{time_data:.3f}",
- tt=f"{time_iter:.3f}",
- )
+ infos = {
+ "loss": f"{loss.item():.2e}",
+ "tf": f"{timer_forward.elapsed():.3f}",
+ "tb": f"{timer_backward.elapsed():.3f}",
+ "tr": f"{time_render:.3f}",
+ "td": f"{time_data:.3f}",
+ "tt": f"{time_iter:.3f}",
+ }
infos["it/s"] = f"{1. / time_iter:.2f}"
if not pbar.disable:
pbar.set_postfix(**infos)
@@ -360,7 +408,7 @@ def validation() -> None:
iter_val = iter(ds_iter_val)
n_batch = (cfg.val_size // get_world_size()) // cfg.batch_size
pbar = tqdm(range(n_batch), ncols=120)
- for n in pbar:
+ for _n in pbar:
data = next(iter_val)
loss = forward_loss_fn(
data=data,
@@ -375,7 +423,7 @@ def validation() -> None:
if do_eval and ds_iter_val is not None:
validation()
- log_dict = dict()
+ log_dict = {}
log_dict.update(
{
"grad_norm": meters_train["grad_norm"].mean,
@@ -390,7 +438,7 @@ def validation() -> None:
"time": time.time(),
"n_iterations": epoch * cfg.epoch_size // cfg.batch_size,
"n_datas": epoch * this_rank_n_batch_per_epoch * cfg.batch_size,
- }
+ },
)
for string, meters in zip(("train", "val"), (meters_train, meters_val)):
diff --git a/happypose/pose_estimators/megapose/training/training_config.py b/happypose/pose_estimators/megapose/training/training_config.py
index 7d7a9163..ddc7d9bf 100644
--- a/happypose/pose_estimators/megapose/training/training_config.py
+++ b/happypose/pose_estimators/megapose/training/training_config.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +15,7 @@
# Standard Library
from dataclasses import dataclass, field
-from typing import List, Optional, Tuple
+from typing import Optional
# Third Party
import numpy as np
@@ -47,15 +46,16 @@ class TrainingConfig(omegaconf.dictconfig.DictConfig):
Two options for creating a training configuration:
1. Create it manually, and set `run_id`.
2. If `run_id` is None, then use `config_id`, `run_comment`and
- `run_postfix` to create a `run_id`
+ `run_postfix` to create a `run_id`.
- In 2., the parameters of the config are set-up using the function `update_cfg_with_config_id`.
+ In 2., the parameters of the config are set-up using the function
+ `update_cfg_with_config_id`.
"""
# Datasets
- train_datasets: List[DatasetConfig] = field(default_factory=lambda: [])
+ train_datasets: list[DatasetConfig] = field(default_factory=lambda: [])
input_resize: Resolution = (540, 720)
- val_datasets: List[DatasetConfig] = field(default_factory=lambda: [])
+ val_datasets: list[DatasetConfig] = field(default_factory=lambda: [])
val_epoch_interval: int = 10
split_objects_across_gpus: bool = True
n_max_objects: Optional[int] = None
@@ -106,8 +106,8 @@ class TrainingConfig(omegaconf.dictconfig.DictConfig):
# Hypotheses
hypotheses_init_method: str = "refiner_gt+noise"
n_hypotheses: int = 1
- init_euler_deg_std: Tuple[float, float, float] = (15, 15, 15)
- init_trans_std: Tuple[float, float, float] = (0.01, 0.01, 0.05)
+ init_euler_deg_std: tuple[float, float, float] = (15, 15, 15)
+ init_trans_std: tuple[float, float, float] = (0.01, 0.01, 0.05)
# Optimizer
optimizer: str = "adam"
diff --git a/happypose/pose_estimators/megapose/training/utils.py b/happypose/pose_estimators/megapose/training/utils.py
index 4457d1ac..68753d56 100644
--- a/happypose/pose_estimators/megapose/training/utils.py
+++ b/happypose/pose_estimators/megapose/training/utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,15 +16,15 @@
# Standard Library
import time
+from collections.abc import Iterator
from pathlib import Path
-from typing import Callable, Iterator, Optional
+from typing import Callable, Optional
# Third Party
import simplejson as json
import torch
from bokeh import document
-from bokeh.io import export_png, save
-from bokeh.io.export import get_screenshot_as_png
+from bokeh.io import save
from omegaconf import OmegaConf
# MegaPose
@@ -56,10 +55,12 @@ def cast_images(rgb: torch.Tensor, depth: Optional[torch.Tensor]) -> torch.Tenso
"""Convert rgb and depth to a single to cuda FloatTensor.
Arguments:
+ ---------
rgb: (bsz, 3, h, w) uint8 tensor, with values in [0, 1]
depth: (bsz, h, w) float tensor, or None
Returns:
+ -------
images: (bsz, 3, h, w) RGB or (bsz, 4, h, w) RGB-D images.
"""
rgb_tensor = cast(rgb).float() / 255
@@ -71,9 +72,10 @@ def cast_images(rgb: torch.Tensor, depth: Optional[torch.Tensor]) -> torch.Tenso
def cast_tensor_image_to_numpy(images):
- """Convert images to
+ """Convert images to.
Args:
+ ----
images: [B,C,H,W]
"""
images = (images[:, :3] * 255).to(torch.uint8)
@@ -83,23 +85,28 @@ def cast_tensor_image_to_numpy(images):
def cast_raw_numpy_images_to_tensor(images):
- """
- Casts numpy images to tensor.
+ """Casts numpy images to tensor.
Args:
+ ----
images: [B,H,W,C] numpy array, RGB values in [0,255], depth in meters
"""
B, H, W, C = images.shape
- assert C in [
- 3,
- 4,
- ], f"images must have shape [B,H,W,C] with C=3 (rgb) or C=4 (rgbd), encountered C={C}"
+ msg = (
+ f"images must have shape [B,H,W,C] with C=3 (rgb) or C=4 (rgbd), "
+ f"encountered C={C}"
+ )
+ assert C in [3, 4], msg
images = torch.as_tensor(images)
max_rgb = torch.max(images[:, RGB_DIMS])
if max_rgb < 1.5:
- raise Warning("You are about to divide by 255 but the max rgb pixel value is less than 1.5")
+ msg = (
+ "You are about to divide by 255 "
+ "but the max rgb pixel value is less than 1.5"
+ )
+ raise Warning(msg)
# [B,C,H,W]
images = images.permute(0, 3, 1, 2).cuda().float()
@@ -109,16 +116,21 @@ def cast_raw_numpy_images_to_tensor(images):
def make_optimizer(
parameters: Iterator[torch.nn.Parameter],
- cfg: TrainingConfig
+ cfg: TrainingConfig,
) -> torch.optim.Optimizer:
-
optimizer: Optional[torch.optim.Optimizer] = None
if cfg.optimizer == "adam":
optimizer = torch.optim.Adam(
- parameters, lr=cfg.lr, weight_decay=cfg.weight_decay)
+ parameters,
+ lr=cfg.lr,
+ weight_decay=cfg.weight_decay,
+ )
elif cfg.optimizer == "sgd":
optimizer = torch.optim.SGD(
- parameters, lr=cfg.lr, momentum=cfg.sgd_momentum, weight_decay=cfg.weight_decay
+ parameters,
+ lr=cfg.lr,
+ momentum=cfg.sgd_momentum,
+ weight_decay=cfg.weight_decay,
)
else:
raise ValueError(cfg.optimizer)
@@ -126,7 +138,6 @@ def make_optimizer(
def make_lr_ratio_function(cfg: TrainingConfig) -> Callable:
-
def lr_ratio(batch: int) -> float:
this_rank_epoch_size = cfg.epoch_size // get_world_size()
n_batch_per_epoch = this_rank_epoch_size // cfg.batch_size
@@ -179,7 +190,9 @@ def save_checkpoint(model, postfix=None):
if cfg.vis_save_only_last:
bokeh_doc_path = bokeh_doc_dir / f"epoch=last_{bokeh_doc_postfix}.html"
else:
- bokeh_doc_path = bokeh_doc_dir / f"epoch={epoch}_{bokeh_doc_postfix}.html"
+ bokeh_doc_path = (
+ bokeh_doc_dir / f"epoch={epoch}_{bokeh_doc_postfix}.html"
+ )
if bokeh_doc_path.exists():
bokeh_doc_path.unlink()
bokeh_doc = document.Document.from_json(bokeh_doc_json)
@@ -256,9 +269,11 @@ def elapsed(self) -> float:
return 0.0
if not self.start_called:
- raise ValueError("You must call CudaTimer.start() before querying the elapsed time")
+ msg = "You must call CudaTimer.start() before querying the elapsed time"
+ raise ValueError(msg)
if not self.end_called:
- raise ValueError("You must call CudaTimer.end() before querying the elapsed time")
+ msg = "You must call CudaTimer.end() before querying the elapsed time"
+ raise ValueError(msg)
return self.elapsed_sec
diff --git a/happypose/pose_estimators/megapose/utils/__init__.py b/happypose/pose_estimators/megapose/utils/__init__.py
index 73a7b275..09aba5e2 100644
--- a/happypose/pose_estimators/megapose/utils/__init__.py
+++ b/happypose/pose_estimators/megapose/utils/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,4 +12,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/toolbox/datasets/augmentations.py b/happypose/toolbox/datasets/augmentations.py
index 35e5db70..bb5fcbe5 100644
--- a/happypose/toolbox/datasets/augmentations.py
+++ b/happypose/toolbox/datasets/augmentations.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,13 +14,12 @@
"""
-
# Standard Library
import dataclasses
import random
from copy import deepcopy
from pathlib import Path
-from typing import Dict, List, Tuple, Union
+from typing import Union
# Third Party
import cv2
@@ -47,7 +45,10 @@ def __call__(self, obs: SceneObservation) -> SceneObservation:
class SceneObservationAugmentation(SceneObservationTransform):
def __init__(
self,
- transform: Union[SceneObservationTransform, List["SceneObservationAugmentation"]],
+ transform: Union[
+ SceneObservationTransform,
+ list["SceneObservationAugmentation"],
+ ],
p: float = 1.0,
):
self.p = p
@@ -65,39 +66,54 @@ def __call__(self, obs: SceneObservation) -> SceneObservation:
class PillowRGBTransform(SceneObservationTransform):
- def __init__(self, pillow_fn: PIL.ImageEnhance._Enhance, factor_interval: Tuple[float, float]):
+ def __init__(
+ self,
+ pillow_fn: PIL.ImageEnhance._Enhance,
+ factor_interval: tuple[float, float],
+ ):
self.pillow_fn = pillow_fn
self.factor_interval = factor_interval
def __call__(self, obs: SceneObservation) -> SceneObservation:
rgb_pil = PIL.Image.fromarray(obs.rgb)
- rgb_pil = self.pillow_fn(rgb_pil).enhance(factor=random.uniform(*self.factor_interval))
+ rgb_pil = self.pillow_fn(rgb_pil).enhance(
+ factor=random.uniform(*self.factor_interval),
+ )
obs = dataclasses.replace(obs, rgb=np.array(rgb_pil))
return obs
class PillowSharpness(PillowRGBTransform):
- def __init__(self, factor_interval: Tuple[float, float] = (0.0, 50.0)):
- super().__init__(pillow_fn=ImageEnhance.Sharpness, factor_interval=factor_interval)
+ def __init__(self, factor_interval: tuple[float, float] = (0.0, 50.0)):
+ super().__init__(
+ pillow_fn=ImageEnhance.Sharpness,
+ factor_interval=factor_interval,
+ )
class PillowContrast(PillowRGBTransform):
- def __init__(self, factor_interval: Tuple[float, float] = (0.2, 50.0)):
- super().__init__(pillow_fn=ImageEnhance.Contrast, factor_interval=factor_interval)
+ def __init__(self, factor_interval: tuple[float, float] = (0.2, 50.0)):
+ super().__init__(
+ pillow_fn=ImageEnhance.Contrast,
+ factor_interval=factor_interval,
+ )
class PillowBrightness(PillowRGBTransform):
- def __init__(self, factor_interval: Tuple[float, float] = (0.1, 6.0)):
- super().__init__(pillow_fn=ImageEnhance.Brightness, factor_interval=factor_interval)
+ def __init__(self, factor_interval: tuple[float, float] = (0.1, 6.0)):
+ super().__init__(
+ pillow_fn=ImageEnhance.Brightness,
+ factor_interval=factor_interval,
+ )
class PillowColor(PillowRGBTransform):
- def __init__(self, factor_interval: Tuple[float, float] = (0, 20.0)):
+ def __init__(self, factor_interval: tuple[float, float] = (0, 20.0)):
super().__init__(pillow_fn=ImageEnhance.Color, factor_interval=factor_interval)
class PillowBlur(SceneObservationTransform):
- def __init__(self, factor_interval: Tuple[int, int] = (1, 3)):
+ def __init__(self, factor_interval: tuple[int, int] = (1, 3)):
self.factor_interval = factor_interval
def __call__(self, obs: SceneObservation) -> SceneObservation:
@@ -156,8 +172,16 @@ def _transform_depth(self, depth: np.ndarray) -> np.ndarray:
)
small_H, small_W = (np.array([H, W]) / rescale_factor).astype(int)
- additive_noise = np.random.normal(loc=0.0, scale=self.std_dev, size=(small_H, small_W))
- additive_noise = cv2.resize(additive_noise, (W, H), interpolation=cv2.INTER_CUBIC)
+ additive_noise = np.random.normal(
+ loc=0.0,
+ scale=self.std_dev,
+ size=(small_H, small_W),
+ )
+ additive_noise = cv2.resize(
+ additive_noise,
+ (W, H),
+ interpolation=cv2.INTER_CUBIC,
+ )
depth[depth > 0] += additive_noise[depth > 0]
depth = np.clip(depth, 0, np.finfo(np.float32).max)
return depth
@@ -178,7 +202,9 @@ def _transform_depth(self, depth: np.ndarray) -> np.ndarray:
else:
missing_fraction = self.max_missing_fraction
dropout_ids = np.random.choice(
- np.arange(len(u_idx)), int(missing_fraction * len(u_idx)), replace=False
+ np.arange(len(u_idx)),
+ int(missing_fraction * len(u_idx)),
+ replace=False,
)
depth[v_idx[dropout_ids], u_idx[dropout_ids]] = 0
return depth
@@ -207,15 +233,21 @@ def __init__(
@staticmethod
def generate_random_ellipses(
- depth_img: np.ndarray, noise_params: Dict[str, float]
- ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
+ depth_img: np.ndarray,
+ noise_params: dict[str, float],
+ ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
# Sample number of ellipses to dropout
- num_ellipses_to_dropout = np.random.poisson(noise_params["ellipse_dropout_mean"])
+ num_ellipses_to_dropout = np.random.poisson(
+ noise_params["ellipse_dropout_mean"],
+ )
# Sample ellipse centers
- nonzero_pixel_indices = np.array(np.where(depth_img > 0)).T # Shape: [#nonzero_pixels x 2]
+ nonzero_pixel_indices = np.array(
+ np.where(depth_img > 0),
+ ).T # Shape: [#nonzero_pixels x 2]
dropout_centers_indices = np.random.choice(
- nonzero_pixel_indices.shape[0], size=num_ellipses_to_dropout
+ nonzero_pixel_indices.shape[0],
+ size=num_ellipses_to_dropout,
)
# Shape: [num_ellipses_to_dropout x 2]
dropout_centers = nonzero_pixel_indices[dropout_centers_indices, :]
@@ -237,7 +269,8 @@ def generate_random_ellipses(
@staticmethod
def dropout_random_ellipses(
- depth_img: np.ndarray, noise_params: Dict[str, float]
+ depth_img: np.ndarray,
+ noise_params: dict[str, float],
) -> np.ndarray:
"""Randomly drop a few ellipses in the image for robustness.
@@ -250,7 +283,6 @@ def dropout_random_ellipses(
@param depth_img: a [H x W] set of depth z values
"""
-
depth_img = depth_img.copy()
(
@@ -259,7 +291,8 @@ def dropout_random_ellipses(
angles,
dropout_centers,
) = DepthEllipseDropoutTransform.generate_random_ellipses(
- depth_img, noise_params=noise_params
+ depth_img,
+ noise_params=noise_params,
)
num_ellipses_to_dropout = x_radii.shape[0]
@@ -314,12 +347,17 @@ def _transform_depth(self, depth: np.ndarray) -> np.ndarray:
angles,
dropout_centers,
) = DepthEllipseDropoutTransform.generate_random_ellipses(
- depth_img, noise_params=self._noise_params
+ depth_img,
+ noise_params=self._noise_params,
)
num_ellipses_to_dropout = x_radii.shape[0]
- additive_noise = np.random.normal(loc=0.0, scale=self.std_dev, size=x_radii.shape)
+ additive_noise = np.random.normal(
+ loc=0.0,
+ scale=self.std_dev,
+ size=x_radii.shape,
+ )
# Dropout ellipses
noise = np.zeros_like(depth)
@@ -347,7 +385,7 @@ def _transform_depth(self, depth: np.ndarray) -> np.ndarray:
class DepthBlurTransform(DepthTransform):
- def __init__(self, factor_interval: Tuple[int, int] = (3, 7)):
+ def __init__(self, factor_interval: tuple[int, int] = (3, 7)):
self.factor_interval = factor_interval
def _transform_depth(self, depth: np.ndarray) -> np.ndarray:
@@ -444,7 +482,12 @@ def __call__(self, obs: SceneObservation) -> SceneObservation:
x0, y0 = w / 2, h / 2
crop_box_size = (crop_h, w)
crop_h, crop_w = min(crop_box_size), max(crop_box_size)
- x1, y1, x2, y2 = x0 - crop_w / 2, y0 - crop_h / 2, x0 + crop_w / 2, y0 + crop_h / 2
+ x1, y1, x2, y2 = (
+ x0 - crop_w / 2,
+ y0 - crop_h / 2,
+ x0 + crop_w / 2,
+ y0 + crop_h / 2,
+ )
box = (x1, y1, x2, y2)
rgb_pil = rgb_pil.crop(box)
segmentation_pil = segmentation_pil.crop(box)
@@ -463,9 +506,15 @@ def __call__(self, obs: SceneObservation) -> SceneObservation:
w, h = rgb_pil.size
w_resize, h_resize = max(self.resize), min(self.resize)
rgb_pil = rgb_pil.resize((w_resize, h_resize), resample=PIL.Image.BILINEAR)
- segmentation_pil = segmentation_pil.resize((w_resize, h_resize), resample=PIL.Image.NEAREST)
+ segmentation_pil = segmentation_pil.resize(
+ (w_resize, h_resize),
+ resample=PIL.Image.NEAREST,
+ )
if depth_pil is not None:
- depth_pil = depth_pil.resize((w_resize, h_resize), resample=PIL.Image.NEAREST)
+ depth_pil = depth_pil.resize(
+ (w_resize, h_resize),
+ resample=PIL.Image.NEAREST,
+ )
box = (0, 0, w, h)
new_K = get_K_crop_resize(
torch.tensor(new_K).unsqueeze(0),
@@ -488,7 +537,10 @@ def __call__(self, obs: SceneObservation) -> SceneObservation:
for obj in obs.object_datas:
if obj.unique_id in dets_gt:
new_obj = dataclasses.replace(
- obj, bbox_modal=dets_gt[obj.unique_id], bbox_amodal=None, visib_fract=None
+ obj,
+ bbox_modal=dets_gt[obj.unique_id],
+ bbox_amodal=None,
+ visib_fract=None,
)
new_object_datas.append(new_obj)
new_obs.object_datas = new_object_datas
diff --git a/happypose/toolbox/datasets/bop_object_datasets.py b/happypose/toolbox/datasets/bop_object_datasets.py
index d044bc85..ec7df61a 100644
--- a/happypose/toolbox/datasets/bop_object_datasets.py
+++ b/happypose/toolbox/datasets/bop_object_datasets.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
import json
from pathlib import Path
@@ -23,12 +21,12 @@
# Third Party
import numpy as np
-# MegaPose
-from happypose.toolbox.lib3d.symmetries import ContinuousSymmetry, DiscreteSymmetry
-
# Local Folder
from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
+# MegaPose
+from happypose.toolbox.lib3d.symmetries import ContinuousSymmetry, DiscreteSymmetry
+
class BOPObjectDataset(RigidObjectDataset):
def __init__(self, ds_dir: Path, label_format: str = "{label}"):
diff --git a/happypose/toolbox/datasets/bop_scene_dataset.py b/happypose/toolbox/datasets/bop_scene_dataset.py
index 44d1e5b5..6008d5ca 100644
--- a/happypose/toolbox/datasets/bop_scene_dataset.py
+++ b/happypose/toolbox/datasets/bop_scene_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -28,10 +27,7 @@
from tqdm import tqdm
# MegaPose
-from happypose.pose_estimators.megapose.config import (
- BOP_TOOLKIT_DIR,
- MEMORY,
-)
+from happypose.pose_estimators.megapose.config import BOP_TOOLKIT_DIR, MEMORY
# Local Folder
from happypose.toolbox.datasets.scene_dataset import (
@@ -67,15 +63,14 @@ def build_index_and_annotations(
save_file_annotations=None,
make_per_view_annotations=True,
):
-
scene_ids, view_ids = [], []
- annotations = dict()
+ annotations = {}
base_dir = ds_dir / split
for scene_dir in tqdm(base_dir.iterdir()):
scene_id = scene_dir.name
- annotations_scene = dict()
+ annotations_scene = {}
for f in ("scene_camera.json", "scene_gt_info.json", "scene_gt.json"):
path = scene_dir / f
if path.exists():
@@ -87,7 +82,7 @@ def build_index_and_annotations(
scene_annotation = annotations_scene
for view_id in scene_annotation["scene_camera"].keys():
if make_per_view_annotations:
- this_annotation = dict()
+ this_annotation = {}
this_annotation["camera"] = scene_annotation["scene_camera"][
str(view_id)
]
@@ -99,7 +94,7 @@ def build_index_and_annotations(
annotation_dir = base_dir / scene_id / "per_view_annotations"
annotation_dir.mkdir(exist_ok=True)
(annotation_dir / f"view={view_id}.json").write_text(
- json.dumps(this_annotation)
+ json.dumps(this_annotation),
)
scene_ids.append(int(scene_id))
view_ids.append(int(view_id))
@@ -192,7 +187,7 @@ def data_from_bop_obs(
class BOPDataset(SceneDataset):
"""Read a dataset in the BOP format.
- See https://github.com/thodan/bop_toolkit/blob/master/docs/bop_datasets_format.md
+ See https://github.com/thodan/bop_toolkit/blob/master/docs/bop_datasets_format.md.
# TODO: Document whats happening with the per-view annotations.
# TODO: Remove per-view annotations, recommend using WebDataset for performance ?
@@ -208,7 +203,6 @@ def __init__(
allow_cache: bool = False,
per_view_annotations: bool = False,
):
-
self.ds_dir = ds_dir
assert ds_dir.exists(), "Dataset does not exists."
@@ -232,7 +226,9 @@ def __init__(
self.annotations = pickle.loads(save_file_annotations.read_bytes())
else:
frame_index, self.annotations = build_index_and_annotations(
- ds_dir, split, make_per_view_annotations=per_view_annotations
+ ds_dir,
+ split,
+ make_per_view_annotations=per_view_annotations,
)
self.use_raw_object_id = use_raw_object_id
@@ -245,7 +241,8 @@ def __init__(
)
def _load_scene_observation(
- self, image_infos: ObservationInfos
+ self,
+ image_infos: ObservationInfos,
) -> SceneObservation:
scene_id, view_id = image_infos.scene_id, image_infos.view_id
view_id = int(view_id)
@@ -257,7 +254,7 @@ def _load_scene_observation(
# TODO: Also change the pandas numpy arrays to np.string_ instead of np.object
# See https://github.com/pytorch/pytorch/issues/13246#issuecomment-905703662
this_annotation_path = (
- scene_dir / "per_view_annotations" / f"view={str(view_id)}.json"
+ scene_dir / "per_view_annotations" / f"view={view_id!s}.json"
)
if this_annotation_path.exists():
this_annotation = json.loads(this_annotation_path.read_text())
@@ -357,8 +354,8 @@ def _load_scene_observation(
for n in range(n_objects):
binary_mask_n = np.array(
Image.open(
- scene_dir / "mask_visib" / f"{view_id_str}_{n:06d}.png"
- )
+ scene_dir / "mask_visib" / f"{view_id_str}_{n:06d}.png",
+ ),
)
segmentation[binary_mask_n == 255] = n + 1
diff --git a/happypose/toolbox/datasets/datasets_cfg.py b/happypose/toolbox/datasets/datasets_cfg.py
index 8d79ef8d..452323aa 100644
--- a/happypose/toolbox/datasets/datasets_cfg.py
+++ b/happypose/toolbox/datasets/datasets_cfg.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,19 +14,14 @@
"""
-
# Standard Library
import json
-from typing import List, Optional, Tuple
+from typing import Optional
# Third Party
import numpy as np
import pandas as pd
-# HappyPose
-from happypose.toolbox.datasets.object_dataset import RigidObjectDataset
-from happypose.toolbox.datasets.scene_dataset import SceneDataset
-
# MegaPose
from happypose.pose_estimators.megapose.config import (
BOP_DS_DIR,
@@ -44,6 +38,10 @@
from happypose.toolbox.datasets.deepim_modelnet import DeepImModelNetDataset
from happypose.toolbox.datasets.gso_dataset import GoogleScannedObjectDataset
from happypose.toolbox.datasets.modelnet_object_dataset import ModelNetObjectDataset
+
+# HappyPose
+from happypose.toolbox.datasets.object_dataset import RigidObjectDataset
+from happypose.toolbox.datasets.scene_dataset import SceneDataset
from happypose.toolbox.datasets.shapenet_object_dataset import ShapeNetObjectDataset
from happypose.toolbox.datasets.urdf_dataset import UrdfDataset
from happypose.toolbox.datasets.web_scene_dataset import WebSceneDataset
@@ -58,7 +56,9 @@ def keep_bop19(ds: SceneDataset) -> SceneDataset:
targets = pd.read_json(ds.ds_dir / "test_targets_bop19.json")
targets = remap_bop_targets(targets)
targets = targets.loc[:, ["scene_id", "view_id"]].drop_duplicates()
- index = ds.frame_index.merge(targets, on=["scene_id", "view_id"]).reset_index(drop=True)
+ index = ds.frame_index.merge(targets, on=["scene_id", "view_id"]).reset_index(
+ drop=True,
+ )
assert len(index) == len(targets)
ds.frame_index = index
return ds
@@ -69,11 +69,14 @@ def make_scene_dataset(
load_depth: bool = False,
n_frames: Optional[int] = None,
) -> SceneDataset:
-
# BOP challenge splits
if ds_name == "hb.bop19":
ds_dir = BOP_DS_DIR / "hb"
- ds: SceneDataset = BOPDataset(ds_dir, split="test_primesense", label_format="hb-{label}")
+ ds: SceneDataset = BOPDataset(
+ ds_dir,
+ split="test_primesense",
+ label_format="hb-{label}",
+ )
ds = keep_bop19(ds)
elif ds_name == "icbin.bop19":
ds_dir = BOP_DS_DIR / "icbin"
@@ -126,29 +129,31 @@ def make_scene_dataset(
elif ds_name == "ycbv.train.real":
ds_dir = BOP_DS_DIR / "ycbv"
ds = BOPDataset(ds_dir, split="train_real", label_format="ycbv-{label}")
- elif ds_name == 'ycbv.train.synt':
- ds_dir = BOP_DS_DIR / 'ycbv'
- ds = BOPDataset(ds_dir, split='train_synt', label_format="ycbv-{label}")
+ elif ds_name == "ycbv.train.synt":
+ ds_dir = BOP_DS_DIR / "ycbv"
+ ds = BOPDataset(ds_dir, split="train_synt", label_format="ycbv-{label}")
elif ds_name == "ycbv.real.train":
ds_dir = BOP_DS_DIR / "ycbv"
ds = BOPDataset(ds_dir, split="train_real", label_format="ycbv-{label}")
- elif ds_name == 'ycbv.synt.train':
- ds_dir = BOP_DS_DIR / 'ycbv'
+ elif ds_name == "ycbv.synt.train":
+ ds_dir = BOP_DS_DIR / "ycbv"
ds = BOPDataset(ds_dir, split="train_synt", label_format="ycbv-{label}")
elif ds_name == "ycbv.test":
ds_dir = BOP_DS_DIR / "ycbv"
ds = BOPDataset(ds_dir, split="test", label_format="ycbv-{label}")
- elif ds_name == 'ycbv.test.keyframes':
- ds_dir = BOP_DS_DIR / 'ycbv'
- ds = BOPDataset(ds_dir, split='test', label_format="ycbv-{label}")
- keyframes_path = ds_dir / 'keyframe.txt'
- ls = keyframes_path.read_text().split('\n')[:-1]
+ elif ds_name == "ycbv.test.keyframes":
+ ds_dir = BOP_DS_DIR / "ycbv"
+ ds = BOPDataset(ds_dir, split="test", label_format="ycbv-{label}")
+ keyframes_path = ds_dir / "keyframe.txt"
+ ls = keyframes_path.read_text().split("\n")[:-1]
frame_index = ds.frame_index
ids = []
for l_n in ls:
- scene_id, view_id = l_n.split('/')
+ scene_id, view_id = l_n.split("/")
scene_id, view_id = int(scene_id), int(view_id)
- mask = (frame_index['scene_id'] == scene_id) & (frame_index['view_id'] == view_id)
+ mask = (frame_index["scene_id"] == scene_id) & (
+ frame_index["view_id"] == view_id
+ )
ids.append(np.where(mask)[0].item())
ds.frame_index = frame_index.iloc[ids].reset_index(drop=True)
elif ds_name == "lmo.test":
@@ -193,7 +198,15 @@ def make_scene_dataset(
n_objects = (
30
if category
- in {"bathtub", "bookshelf", "guitar", "range_hood", "sofa", "wardrobe", "tv_stand"}
+ in {
+ "bathtub",
+ "bookshelf",
+ "guitar",
+ "range_hood",
+ "sofa",
+ "wardrobe",
+ "tv_stand",
+ }
else 50
)
ds = DeepImModelNetDataset(
@@ -210,14 +223,19 @@ def make_scene_dataset(
ds = WebSceneDataset(WDS_DS_DIR / ds_name)
# Synthetic datasets
- elif 'synthetic.' in ds_name:
- from happypose.pose_estimators.cosypose.cosypose.datasets.synthetic_dataset import SyntheticSceneDataset
- assert '.train' in ds_name or '.val' in ds_name
- is_train = 'train' in ds_name.split('.')[-1]
- ds_name = ds_name.split('.')[1]
- print("ds_name synthetic =", ds_name)
- ds = SyntheticSceneDataset(ds_dir=LOCAL_DATA_DIR / 'synt_datasets' / ds_name, train=is_train)
+ elif "synthetic." in ds_name:
+ from happypose.pose_estimators.cosypose.cosypose.datasets.synthetic_dataset import ( # noqa: E501
+ SyntheticSceneDataset,
+ )
+ assert ".train" in ds_name or ".val" in ds_name
+ is_train = "train" in ds_name.split(".")[-1]
+ ds_name = ds_name.split(".")[1]
+ print("ds_name synthetic =", ds_name)
+ ds = SyntheticSceneDataset(
+ ds_dir=LOCAL_DATA_DIR / "synt_datasets" / ds_name,
+ train=is_train,
+ )
else:
raise ValueError(ds_name)
@@ -231,15 +249,22 @@ def make_scene_dataset(
def make_object_dataset(ds_name: str) -> RigidObjectDataset:
# BOP original models
-
+
if ds_name == "tless.cad":
ds: RigidObjectDataset = BOPObjectDataset(
- BOP_DS_DIR / "tless/models_cad", label_format="tless-{label}"
+ BOP_DS_DIR / "tless/models_cad",
+ label_format="tless-{label}",
)
elif ds_name == "tless.eval":
- ds = BOPObjectDataset(BOP_DS_DIR / "tless/models_eval", label_format="tless-{label}")
+ ds = BOPObjectDataset(
+ BOP_DS_DIR / "tless/models_eval",
+ label_format="tless-{label}",
+ )
elif ds_name == "tless.reconst":
- ds = BOPObjectDataset(BOP_DS_DIR / "tless/models_reconst", label_format="tless-{label}")
+ ds = BOPObjectDataset(
+ BOP_DS_DIR / "tless/models_reconst",
+ label_format="tless-{label}",
+ )
elif ds_name == "ycbv":
ds = BOPObjectDataset(BOP_DS_DIR / "ycbv/models", label_format="ycbv-{label}")
elif ds_name == "hb":
@@ -262,25 +287,55 @@ def make_object_dataset(ds_name: str) -> RigidObjectDataset:
# BOP models converted for Panda3D
# TODO: Is this necessary ?
elif ds_name == "hb.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "hb/models", label_format="hb-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "hb/models",
+ label_format="hb-{label}",
+ )
elif ds_name == "icbin.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "icbin/models", label_format="icbin-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "icbin/models",
+ label_format="icbin-{label}",
+ )
elif ds_name == "itodd.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "itodd/models", label_format="itodd-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "itodd/models",
+ label_format="itodd-{label}",
+ )
elif ds_name == "lm.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "lm/models", label_format="lm-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "lm/models",
+ label_format="lm-{label}",
+ )
elif ds_name == "tless.cad.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "tless/models_cad", label_format="tless-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "tless/models_cad",
+ label_format="tless-{label}",
+ )
elif ds_name == "ycbv.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "ycbv/models", label_format="ycbv-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "ycbv/models",
+ label_format="ycbv-{label}",
+ )
elif ds_name == "tudl.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "tudl/models", label_format="tudl-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "tudl/models",
+ label_format="tudl-{label}",
+ )
elif ds_name == "tyol.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "tyol/models", label_format="tyol-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "tyol/models",
+ label_format="tyol-{label}",
+ )
elif ds_name == "ruapc.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "ruapc/models", label_format="ruapc-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "ruapc/models",
+ label_format="ruapc-{label}",
+ )
elif ds_name == "hope.panda3d":
- ds = BOPObjectDataset(BOP_PANDA3D_DS_DIR / "hope/models", label_format="hope-{label}")
+ ds = BOPObjectDataset(
+ BOP_PANDA3D_DS_DIR / "hope/models",
+ label_format="hope-{label}",
+ )
# GSO
elif ds_name == "gso.orig":
@@ -297,7 +352,15 @@ def make_object_dataset(ds_name: str) -> RigidObjectDataset:
n_objects = (
30
if category
- in {"bathtub", "bookshelf", "guitar", "range_hood", "sofa", "wardrobe", "tv_stand"}
+ in {
+ "bathtub",
+ "bookshelf",
+ "guitar",
+ "range_hood",
+ "sofa",
+ "wardrobe",
+ "tv_stand",
+ }
else 50
)
ds = ModelNetObjectDataset(
@@ -313,7 +376,7 @@ def make_object_dataset(ds_name: str) -> RigidObjectDataset:
elif ds_name.startswith("shapenet."):
ds_name = ds_name[len("shapenet.") :]
- filters_list: List[str] = []
+ filters_list: list[str] = []
if ds_name.startswith("filters="):
filter_str = ds_name.split(".")[0]
filters_list = filter_str.split("filters=")[1].split(",")
@@ -324,20 +387,18 @@ def make_object_dataset(ds_name: str) -> RigidObjectDataset:
for filter_str in filters_list:
if filter_str == "remove_modelnet":
- keep_labels = set(
- [
- obj.label
- for obj in ds.objects
- if obj.category not in SHAPENET_MODELNET_CATEGORIES
- ]
- )
+ keep_labels = {
+ obj.label
+ for obj in ds.objects
+ if obj.category not in SHAPENET_MODELNET_CATEGORIES
+ }
else:
keep_labels = set(
json.loads(
(SHAPENET_DIR / "stats" / ("shapenet_" + filter_str))
.with_suffix(".json")
- .read_text()
- )
+ .read_text(),
+ ),
)
ds = ds.filter_objects(keep_labels)
@@ -358,8 +419,10 @@ def make_object_dataset(ds_name: str) -> RigidObjectDataset:
np_random = np.random.RandomState(0)
keep_labels = set(
np_random.choice(
- [obj.label for obj in ds.objects], n_objects_, replace=False
- ).tolist()
+ [obj.label for obj in ds.objects],
+ n_objects_,
+ replace=False,
+ ).tolist(),
)
ds = ds.filter_objects(keep_labels)
@@ -372,7 +435,9 @@ def make_urdf_dataset(ds_name: str) -> RigidObjectDataset:
# BOP
if ds_name == "tless.cad":
ds = UrdfDataset(
- LOCAL_DATA_DIR / "urdfs" / "tless.cad", mesh_units="mm", label_format="tless-{label}"
+ LOCAL_DATA_DIR / "urdfs" / "tless.cad",
+ mesh_units="mm",
+ label_format="tless-{label}",
)
elif ds_name == "tless.reconst":
ds = UrdfDataset(
@@ -383,31 +448,45 @@ def make_urdf_dataset(ds_name: str) -> RigidObjectDataset:
elif ds_name == "tless":
ds = UrdfDataset(
- LOCAL_DATA_DIR / "urdfs" / "tless.cad", mesh_units="mm", label_format="tless-{label}"
+ LOCAL_DATA_DIR / "urdfs" / "tless.cad",
+ mesh_units="mm",
+ label_format="tless-{label}",
)
elif ds_name == "ycbv":
ds = UrdfDataset(
- LOCAL_DATA_DIR / "urdfs" / "ycbv", mesh_units="mm", label_format="ycbv-{label}"
+ LOCAL_DATA_DIR / "urdfs" / "ycbv",
+ mesh_units="mm",
+ label_format="ycbv-{label}",
)
elif ds_name == "hb":
ds = UrdfDataset(
- LOCAL_DATA_DIR / "urdfs" / "hb", mesh_units="mm", label_format="hb-{label}"
+ LOCAL_DATA_DIR / "urdfs" / "hb",
+ mesh_units="mm",
+ label_format="hb-{label}",
)
elif ds_name == "icbin":
ds = UrdfDataset(
- LOCAL_DATA_DIR / "urdfs" / "icbin", mesh_units="mm", label_format="icbin-{label}"
+ LOCAL_DATA_DIR / "urdfs" / "icbin",
+ mesh_units="mm",
+ label_format="icbin-{label}",
)
elif ds_name == "itodd":
ds = UrdfDataset(
- LOCAL_DATA_DIR / "urdfs" / "itodd", mesh_units="mm", label_format="itodd-{label}"
+ LOCAL_DATA_DIR / "urdfs" / "itodd",
+ mesh_units="mm",
+ label_format="itodd-{label}",
)
elif ds_name == "lm":
ds = UrdfDataset(
- LOCAL_DATA_DIR / "urdfs" / "lm", mesh_units="mm", label_format="lm-{label}"
+ LOCAL_DATA_DIR / "urdfs" / "lm",
+ mesh_units="mm",
+ label_format="lm-{label}",
)
elif ds_name == "tudl":
ds = UrdfDataset(
- LOCAL_DATA_DIR / "urdfs" / "tudl", mesh_units="mm", label_format="tudl-{label}"
+ LOCAL_DATA_DIR / "urdfs" / "tudl",
+ mesh_units="mm",
+ label_format="tudl-{label}",
)
else:
@@ -415,8 +494,8 @@ def make_urdf_dataset(ds_name: str) -> RigidObjectDataset:
return ds
-def get_obj_ds_info(ds_name: str) -> Tuple[Optional[str], str]:
- urdf_ds_name = None # Only used for bullet compatibility
+def get_obj_ds_info(ds_name: str) -> tuple[Optional[str], str]:
+ urdf_ds_name = None # Only used for bullet compatibility
if ds_name == "ycbv.bop19":
ds_name = "ycbv"
urdf_ds_name = "ycbv"
@@ -442,7 +521,8 @@ def get_obj_ds_info(ds_name: str) -> Tuple[Optional[str], str]:
category = ds_name.split(".")[1]
obj_ds_name = f"modelnet.{category}.test.rescaled"
else:
- raise ValueError("Unknown dataset")
+ msg = "Unknown dataset"
+ raise ValueError(msg)
return urdf_ds_name, obj_ds_name
@@ -452,10 +532,12 @@ def get_object_label(ds_name, description):
if ds_name == "ycbv":
df = YCBV_OBJECT_NAMES
else:
- raise ValueError(f"Unknown dataset {ds_name}")
+ msg = f"Unknown dataset {ds_name}"
+ raise ValueError(msg)
x = df[df.description == description]
if len(x) == 0:
- raise ValueError(f"Couldn't find object '{description}' in ds {ds_name}")
+ msg = f"Couldn't find object '{description}' in ds {ds_name}"
+ raise ValueError(msg)
return x.iloc[0].label
diff --git a/happypose/toolbox/datasets/deepim_modelnet.py b/happypose/toolbox/datasets/deepim_modelnet.py
index 2988deb0..2a9c87ca 100644
--- a/happypose/toolbox/datasets/deepim_modelnet.py
+++ b/happypose/toolbox/datasets/deepim_modelnet.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
from pathlib import Path
@@ -25,10 +23,6 @@
import torch
from PIL import Image
-# MegaPose
-from happypose.toolbox.datasets.utils import make_detections_from_segmentation
-from happypose.toolbox.lib3d.transform import Transform
-
# Local Folder
from happypose.toolbox.datasets.scene_dataset import (
CameraData,
@@ -38,6 +32,10 @@
SceneObservation,
)
+# MegaPose
+from happypose.toolbox.datasets.utils import make_detections_from_segmentation
+from happypose.toolbox.lib3d.transform import Transform
+
def parse_pose(pose_str: str) -> np.ndarray:
pose_str_split = pose_str.split("\n")[-3:]
@@ -58,39 +56,31 @@ def __init__(
n_images_per_object: int = 50,
load_depth: bool = False,
):
-
+ data_dir = modelnet_dir / "modelnet_render_v1" / "data"
self.test_template_im = (
- modelnet_dir
- / "modelnet_render_v1/data/real/{category}/{split}/{obj_id}_{im_id:04d}-color.png"
+ data_dir / "real/{category}/{split}/{obj_id}_{im_id:04d}-color.png"
)
self.test_template_depth = (
- modelnet_dir
- / "modelnet_render_v1/data/real/{category}/{split}/{obj_id}_{im_id:04d}-depth.png"
+ data_dir / "real/{category}/{split}/{obj_id}_{im_id:04d}-depth.png"
)
self.test_template_label = (
- modelnet_dir
- / "modelnet_render_v1/data/real/{category}/{split}/{obj_id}_{im_id:04d}-label.png"
+ data_dir / "real/{category}/{split}/{obj_id}_{im_id:04d}-label.png"
)
self.test_template_pose = (
- modelnet_dir
- / "modelnet_render_v1/data/real/{category}/{split}/{obj_id}_{im_id:04d}-pose.txt"
+ data_dir / "real/{category}/{split}/{obj_id}_{im_id:04d}-pose.txt"
)
self.init_template_im = (
- modelnet_dir
- / "modelnet_render_v1/data/rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-color.png"
+ data_dir / "rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-color.png"
)
self.init_template_depth = (
- modelnet_dir
- / "modelnet_render_v1/data/rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-depth.png"
+ data_dir / "rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-depth.png"
)
self.init_template_label = (
- modelnet_dir
- / "modelnet_render_v1/data/rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-label.png"
+ data_dir / "rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-label.png"
)
self.init_template_pose = (
- modelnet_dir
- / "modelnet_render_v1/data/rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-pose.txt"
+ data_dir / "rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-pose.txt"
)
object_ids = (
@@ -116,29 +106,37 @@ def __init__(
load_depth=load_depth,
)
- def _load_scene_observation(self, image_infos: ObservationInfos) -> SceneObservation:
- infos_dict = dict(
- category=self.category,
- split=self.split,
- obj_id=image_infos.scene_id,
- im_id=image_infos.view_id,
- )
+ def _load_scene_observation(
+ self,
+ image_infos: ObservationInfos,
+ ) -> SceneObservation:
+ infos_dict = {
+ "category": self.category,
+ "split": self.split,
+ "obj_id": image_infos.scene_id,
+ "im_id": image_infos.view_id,
+ }
obj_label = image_infos.scene_id
rgb = np.array(Image.open(str(self.test_template_im).format(**infos_dict)))
if self.load_depth:
- depth = np.array(Image.open(str(self.test_template_depth).format(**infos_dict)))
+ depth = np.array(
+ Image.open(str(self.test_template_depth).format(**infos_dict)),
+ )
depth = torch.as_tensor(depth) / self.depth_im_scale
else:
depth = None
segmentation = np.array(
- Image.open(str(self.test_template_label).format(**infos_dict)), dtype=np.int_
+ Image.open(str(self.test_template_label).format(**infos_dict)),
+ dtype=np.int_,
)
pose_str = Path(str(self.test_template_pose).format(**infos_dict)).read_text()
pose = Transform(parse_pose(pose_str))
- init_pose_str = Path(str(self.init_template_pose).format(**infos_dict)).read_text()
+ init_pose_str = Path(
+ str(self.init_template_pose).format(**infos_dict),
+ ).read_text()
init_pose = Transform(parse_pose(init_pose_str))
obj_label = self.label_format.format(label=obj_label)
@@ -154,7 +152,7 @@ def _load_scene_observation(self, image_infos: ObservationInfos) -> SceneObserva
visib_fract=1.0,
unique_id=1,
bbox_modal=dets[1],
- )
+ ),
]
K = np.array([[572.4114, 0, 325.2611], [0, 573.57043, 242.04899], [0, 0, 1]])
diff --git a/happypose/toolbox/datasets/gso_dataset.py b/happypose/toolbox/datasets/gso_dataset.py
index 03241840..0a8ede67 100644
--- a/happypose/toolbox/datasets/gso_dataset.py
+++ b/happypose/toolbox/datasets/gso_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -18,7 +17,6 @@
# Standard Library
import json
from pathlib import Path
-from typing import List
# MegaPose
from happypose.pose_estimators.megapose.config import MEMORY
@@ -28,7 +26,7 @@
@MEMORY.cache
-def make_gso_infos(gso_dir: Path, model_name: str = "model.obj") -> List[str]:
+def make_gso_infos(gso_dir: Path, model_name: str = "model.obj") -> list[str]:
gso_dir = Path(gso_dir)
models_dir = gso_dir.iterdir()
invalid_ids = set(json.loads((gso_dir.parent / "invalid_meshes.json").read_text()))
@@ -43,9 +41,9 @@ def make_gso_infos(gso_dir: Path, model_name: str = "model.obj") -> List[str]:
def load_object_infos(models_infos_path):
- with open(models_infos_path, "r") as f:
+ with open(models_infos_path) as f:
infos = json.load(f)
- itos = dict()
+ itos = {}
for info in infos:
k = f"gso_{info['gso_id']}"
itos[info["obj_id"]] = k
diff --git a/happypose/toolbox/datasets/modelnet_object_dataset.py b/happypose/toolbox/datasets/modelnet_object_dataset.py
index 157ab6fe..ef6519e7 100644
--- a/happypose/toolbox/datasets/modelnet_object_dataset.py
+++ b/happypose/toolbox/datasets/modelnet_object_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -31,7 +30,6 @@ def __init__(
rescaled: bool = True,
n_objects: int = 30,
):
-
object_ids = (
Path(modelnet_dir / "model_set" / f"{category}_{split}.txt")
.read_text()
@@ -42,10 +40,16 @@ def __init__(
for object_id in object_ids:
if rescaled:
mesh_path = (
- modelnet_dir / "ModelNet40" / category / split / f"{object_id}_rescaled.obj"
+ modelnet_dir
+ / "ModelNet40"
+ / category
+ / split
+ / f"{object_id}_rescaled.obj"
)
else:
- mesh_path = modelnet_dir / "ModelNet40" / category / split / f"{object_id}.obj"
+ mesh_path = (
+ modelnet_dir / "ModelNet40" / category / split / f"{object_id}.obj"
+ )
obj = RigidObject(
label=object_id,
category=category,
diff --git a/happypose/toolbox/datasets/object_dataset.py b/happypose/toolbox/datasets/object_dataset.py
index 61bc48da..ba591430 100644
--- a/happypose/toolbox/datasets/object_dataset.py
+++ b/happypose/toolbox/datasets/object_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,11 +14,10 @@
"""
-
# Standard Library
import itertools
from pathlib import Path
-from typing import List, Optional, Set, Tuple
+from typing import Optional
# Third Party
import numpy as np
@@ -40,37 +38,42 @@ def __init__(
category: Optional[str] = None,
mesh_diameter: Optional[float] = None,
mesh_units: str = "m",
- symmetries_discrete: List[DiscreteSymmetry] = [],
- symmetries_continuous: List[ContinuousSymmetry] = [],
- ypr_offset_deg: Tuple[float, float, float] = (0., 0., 0.),
+ symmetries_discrete: list[DiscreteSymmetry] = [],
+ symmetries_continuous: list[ContinuousSymmetry] = [],
+ ypr_offset_deg: tuple[float, float, float] = (0.0, 0.0, 0.0),
scaling_factor: float = 1.0,
scaling_factor_mesh_units_to_meters: Optional[float] = None,
):
- """
- Args:
+ """Args:
+ ----
label (str): A unique label to identify an object.
mesh_path (Path): Path to a mesh. Multiple object types are supported.
- Please refer to downstream usage of this class for the supported formats.
- For example, when a `RigidObjectDataset`is passed to a `Panda3dSceneRenderer`,
- the user must ensure that the mesh can be loaded correctly.
+ Please refer to downstream usage of this class for the supported
+ formats.
+ For example, when a `RigidObjectDataset`is passed to a
+ `Panda3dSceneRenderer`, the user must ensure that the mesh can be loaded
+ correctly.
category (Optional[str], optional): Can be used to identify the object
- as one of a known category, e.g. mug or shoes. In the general case, an
- object does not need to belong to a category. The notion of category can also
- ambiguous. In this codebase, this is only used to parse the categories of the
- ShapeNet dataset in order to remove the instances that overlap with the test
- categories of the ModelNet dataset.
+ as one of a known category, e.g. mug or shoes. In the general case, an
+ object does not need to belong to a category. The notion of category can
+ also ambiguous. In this codebase, this is only used to parse the
+ categories of the ShapeNet dataset in order to remove the instances that
+ overlap with the test categories of the ModelNet dataset.
mesh_diameter (Optional[float], optional): Diameter of the object, expressed
the in unit of the meshes.
- This is useful for computing error some metrics like ADD<0.1d or ADD-S<0.1d.
- mesh_units (str, optional): Units in which the vertex positions are expressed.
- Can be `m`or `mm`, defaults to `m`. In the operations of this codebase,
- all mesh coordinates and poses must be expressed in meters.
- When an object is loaded, a scaling will be applied to the mesh
+ This is useful for computing error some metrics like ADD<0.1d or
+ ADD-S<0.1d.
+ mesh_units (str, optional): Units in which the vertex positions are
+ expressed. Can be `m`or `mm`, defaults to `m`. In the operations of
+ this codebase, all mesh coordinates and poses must be expressed in
+ meters. When an object is loaded, a scaling will be applied to the mesh
to ensure its coordinates are in meters when in memory.
symmetries_discrete (List[ContinuousSymmetry], optional):
- See https://github.com/thodan/bop_toolkit/blob/master/bop_toolkit_lib/misc.py
+ See https://github.com/thodan/bop_toolkit/blob/master/
+ bop_toolkit_lib/misc.py
symmetries_continuous (List[DiscreteSymmetry], optional):
- See https://github.com/thodan/bop_toolkit/blob/master/bop_toolkit_lib/misc.py
+ See https://github.com/thodan/bop_toolkit/blob/master/
+ bop_toolkit_lib/misc.py
ypr_offset_deg (np.ndarray, optional): A rotation offset applied to the mesh
**only when loaded in Panda3D**. This can be useful to correct
some mesh conventions where axes are flipped.
@@ -82,19 +85,21 @@ def __init__(
For example, if you have a mesh with coordinates expressed in `mm`
which you want to resize to 10% of its size,
you should pass `mesh_units=mm`and `scaling_factor=0.1`.
- Note that `mesh_units=m` and `scaling_factor=100` would be strictly equivalent.
+ Note that `mesh_units=m` and `scaling_factor=100` would be strictly
+ equivalent.
scaling_factor_mesh_units_to_meters (float, optional): Can be used
instead of the mesh_units argument. This is the scale that converts
mesh units to meters.
"""
-
self.label = label
self.category = category
self.mesh_path = mesh_path
self.mesh_units = mesh_units
if scaling_factor_mesh_units_to_meters is not None:
- self.scaling_factor_mesh_units_to_meters = scaling_factor_mesh_units_to_meters
+ self.scaling_factor_mesh_units_to_meters = (
+ scaling_factor_mesh_units_to_meters
+ )
else:
self.scaling_factor_mesh_units_to_meters = {
"m": 1.0,
@@ -107,7 +112,9 @@ def __init__(
if self._mesh_diameter is not None:
self.mesh_diameter = mesh_diameter
- self.diameter_meters = mesh_diameter * self.scaling_factor_mesh_units_to_meters
+ self.diameter_meters = (
+ mesh_diameter * self.scaling_factor_mesh_units_to_meters
+ )
self.symmetries_discrete = symmetries_discrete
self.symmetries_continuous = symmetries_continuous
@@ -122,11 +129,11 @@ def scale(self) -> float:
"""Returns the scale factor that converts the mesh to desired units."""
return self.scaling_factor_mesh_units_to_meters * self.scaling_factor
- def make_symmetry_poses(
- self, n_symmetries_continuous: int = 64) -> np.ndarray:
+ def make_symmetry_poses(self, n_symmetries_continuous: int = 64) -> np.ndarray:
"""Generates the set of object symmetries.
- Returns:
+ Returns
+ -------
(num_symmetries, 4, 4) array
"""
return make_symmetries_poses(
@@ -140,12 +147,13 @@ def make_symmetry_poses(
class RigidObjectDataset:
def __init__(
self,
- objects: List[RigidObject],
+ objects: list[RigidObject],
):
self.list_objects = objects
self.label_to_objects = {obj.label: obj for obj in objects}
if len(self.list_objects) != len(self.label_to_objects):
- raise RuntimeError("There are objects with duplicate labels")
+ msg = "There are objects with duplicate labels"
+ raise RuntimeError(msg)
def __getitem__(self, idx: int) -> RigidObject:
return self.list_objects[idx]
@@ -157,23 +165,24 @@ def __len__(self) -> int:
return len(self.list_objects)
@property
- def objects(self) -> List[RigidObject]:
+ def objects(self) -> list[RigidObject]:
"""Returns a list of objects in this dataset."""
return self.list_objects
- def filter_objects(self, keep_labels: Set[str]) -> "RigidObjectDataset":
+ def filter_objects(self, keep_labels: set[str]) -> "RigidObjectDataset":
list_objects = [obj for obj in self.list_objects if obj.label in keep_labels]
return RigidObjectDataset(list_objects)
def append_dataset_name_to_object_labels(
- ds_name: str, object_dataset: RigidObjectDataset
+ ds_name: str,
+ object_dataset: RigidObjectDataset,
) -> RigidObjectDataset:
for obj in object_dataset.list_objects:
obj.label = f"ds_name={ds_name}_{obj.label}"
return object_dataset
-def concat_object_datasets(datasets: List[RigidObjectDataset]) -> RigidObjectDataset:
+def concat_object_datasets(datasets: list[RigidObjectDataset]) -> RigidObjectDataset:
objects = list(itertools.chain.from_iterable([ds.list_objects for ds in datasets]))
return RigidObjectDataset(objects)
diff --git a/happypose/toolbox/datasets/pickle_dataset.py b/happypose/toolbox/datasets/pickle_dataset.py
index c8ca47b9..50e6acfb 100644
--- a/happypose/toolbox/datasets/pickle_dataset.py
+++ b/happypose/toolbox/datasets/pickle_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -45,8 +44,13 @@ def __getitem__(self, idx):
data["scene_id"] = 0
data["im_idx"] = idx
mask = None
- infos = dict()
+ infos = {}
infos["camera"] = {"TWC": data["world_t_camera"], "K": data["intrinsics"]}
- infos["frame_info"] = {"scene_id": 0, "view_id": idx, "cam_name": "cam", "cam_id": "cam"}
+ infos["frame_info"] = {
+ "scene_id": 0,
+ "view_id": idx,
+ "cam_name": "cam",
+ "cam_id": "cam",
+ }
scene_data = SceneData(data["rgb"], data["depth"], mask, infos)
return scene_data
diff --git a/happypose/toolbox/datasets/pose_dataset.py b/happypose/toolbox/datasets/pose_dataset.py
index 1a734984..1b330565 100644
--- a/happypose/toolbox/datasets/pose_dataset.py
+++ b/happypose/toolbox/datasets/pose_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,25 +14,17 @@
"""
-
# Standard Library
import random
import time
+from collections.abc import Iterator
from dataclasses import dataclass
-from typing import Iterator, List, Optional, Set, Union
+from typing import Optional, Union
# Third Party
import numpy as np
import torch
-# HappyPose
-from happypose.toolbox.datasets.scene_dataset import (
- IterableSceneDataset,
- ObjectData,
- SceneDataset,
- SceneObservation,
-)
-
# MegaPose
from happypose.pose_estimators.megapose.config import LOCAL_DATA_DIR
from happypose.toolbox.datasets.augmentations import (
@@ -52,20 +43,29 @@
PillowContrast,
PillowSharpness,
)
-from happypose.toolbox.datasets.augmentations import SceneObservationAugmentation as SceneObsAug
+from happypose.toolbox.datasets.augmentations import (
+ SceneObservationAugmentation as SceneObsAug,
+)
from happypose.toolbox.datasets.augmentations import VOCBackgroundAugmentation
+
+# HappyPose
+from happypose.toolbox.datasets.scene_dataset import (
+ IterableSceneDataset,
+ ObjectData,
+ SceneDataset,
+ SceneObservation,
+)
from happypose.toolbox.datasets.scene_dataset_wrappers import remove_invisible_objects
from happypose.toolbox.utils.types import Resolution
@dataclass
class PoseData:
- """
- rgb: (h, w, 3) uint8
+ """rgb: (h, w, 3) uint8
depth: (bsz, h, w) float32
bbox: (4, ) int
K: (3, 3) float32
- TCO: (4, 4) float32
+ TCO: (4, 4) float32.
"""
rgb: np.ndarray
@@ -78,16 +78,15 @@ class PoseData:
@dataclass
class BatchPoseData:
- """
- rgbs: (bsz, 3, h, w) uint8
+ """rgbs: (bsz, 3, h, w) uint8
depths: (bsz, h, w) float32
bboxes: (bsz, 4) int
TCO: (bsz, 4, 4) float32
- K: (bsz, 3, 3) float32
+ K: (bsz, 3, 3) float32.
"""
rgbs: torch.Tensor
- object_datas: List[ObjectData]
+ object_datas: list[ObjectData]
bboxes: torch.Tensor
TCO: torch.Tensor
K: torch.Tensor
@@ -120,10 +119,9 @@ def __init__(
apply_depth_augmentation: bool = False,
apply_background_augmentation: bool = False,
return_first_object: bool = False,
- keep_labels_set: Optional[Set[str]] = None,
+ keep_labels_set: Optional[set[str]] = None,
depth_augmentation_level: int = 1,
):
-
self.scene_ds = scene_ds
self.resize_transform = CropResizeToAspectTransform(resize=resize)
self.min_area = min_area
@@ -131,7 +129,12 @@ def __init__(
self.background_augmentations = []
if apply_background_augmentation:
self.background_augmentations += [
- (SceneObsAug(VOCBackgroundAugmentation(LOCAL_DATA_DIR / "VOC2012"), p=0.3))
+ (
+ SceneObsAug(
+ VOCBackgroundAugmentation(LOCAL_DATA_DIR / "VOC2012"),
+ p=0.3,
+ )
+ ),
]
self.rgb_augmentations = []
@@ -140,13 +143,19 @@ def __init__(
SceneObsAug(
[
SceneObsAug(PillowBlur(factor_interval=(1, 3)), p=0.4),
- SceneObsAug(PillowSharpness(factor_interval=(0.0, 50.0)), p=0.3),
+ SceneObsAug(
+ PillowSharpness(factor_interval=(0.0, 50.0)),
+ p=0.3,
+ ),
SceneObsAug(PillowContrast(factor_interval=(0.2, 50.0)), p=0.3),
- SceneObsAug(PillowBrightness(factor_interval=(0.1, 6.0)), p=0.5),
+ SceneObsAug(
+ PillowBrightness(factor_interval=(0.1, 6.0)),
+ p=0.5,
+ ),
SceneObsAug(PillowColor(factor_interval=(0.0, 20.0)), p=0.3),
],
p=0.8,
- )
+ ),
]
self.depth_augmentations = []
@@ -167,7 +176,9 @@ def __init__(
SceneObsAug(DepthBlurTransform(), p=0.3),
SceneObsAug(
DepthCorrelatedGaussianNoiseTransform(
- gp_rescale_factor_min=15.0, gp_rescale_factor_max=40.0, std_dev=0.01
+ gp_rescale_factor_min=15.0,
+ gp_rescale_factor_max=40.0,
+ std_dev=0.01,
),
p=0.3,
),
@@ -194,13 +205,18 @@ def __init__(
# Set the depth image to zero occasionally.
if depth_augmentation_level == 2:
- self.depth_augmentations.append(SceneObsAug(DepthDropoutTransform(), p=0.3))
self.depth_augmentations.append(
- SceneObsAug(DepthBackgroundDropoutTransform(), p=0.2)
+ SceneObsAug(DepthDropoutTransform(), p=0.3),
)
- self.depth_augmentations = [SceneObsAug(self.depth_augmentations, p=0.8)]
+ self.depth_augmentations.append(
+ SceneObsAug(DepthBackgroundDropoutTransform(), p=0.2),
+ )
+ self.depth_augmentations = [
+ SceneObsAug(self.depth_augmentations, p=0.8),
+ ]
else:
- raise ValueError(f"Unknown depth augmentation type {depth_augmentation_level}")
+ msg = f"Unknown depth augmentation type {depth_augmentation_level}"
+ raise ValueError(msg)
self.return_first_object = return_first_object
@@ -208,9 +224,14 @@ def __init__(
if keep_labels_set is not None:
self.keep_labels_set = keep_labels_set
- def collate_fn(self, list_data: List[PoseData]) -> BatchPoseData:
+ def collate_fn(self, list_data: list[PoseData]) -> BatchPoseData:
batch_data = BatchPoseData(
- rgbs=torch.from_numpy(np.stack([d.rgb for d in list_data])).permute(0, 3, 1, 2),
+ rgbs=torch.from_numpy(np.stack([d.rgb for d in list_data])).permute(
+ 0,
+ 3,
+ 1,
+ 2,
+ ),
bboxes=torch.from_numpy(np.stack([d.bbox for d in list_data])),
K=torch.from_numpy(np.stack([d.K for d in list_data])),
TCO=torch.from_numpy(np.stack([d.TCO for d in list_data])),
@@ -227,13 +248,13 @@ def make_data_from_obs(self, obs: SceneObservation) -> Union[PoseData, None]:
The object satisfies the constraints:
1. The visible 2D area is superior or equal to min_area
2. if `keep_objects_set` isn't None, the object must belong to this set
- If there are no objects that satisfy this condition in the observation, returns None.
+ If there are no objects that satisfy this condition in the observation,
+ returns None.
"""
-
obs = remove_invisible_objects(obs)
start = time.time()
- timings = dict()
+ timings = {}
s = time.time()
obs = self.resize_transform(obs)
@@ -326,7 +347,8 @@ def find_valid_data(self, iterator: Iterator[SceneObservation]) -> PoseData:
return data
n_attempts += 1
if n_attempts > 200:
- raise ValueError("Cannot find valid image in the dataset")
+ msg = "Cannot find valid image in the dataset"
+ raise ValueError(msg)
def __iter__(self) -> Iterator[PoseData]:
assert isinstance(self.scene_ds, IterableSceneDataset)
diff --git a/happypose/toolbox/datasets/samplers.py b/happypose/toolbox/datasets/samplers.py
index 4ea6449d..7228ff73 100644
--- a/happypose/toolbox/datasets/samplers.py
+++ b/happypose/toolbox/datasets/samplers.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Third Party
import numpy as np
import torch
@@ -71,7 +69,9 @@ def __init__(self, ds, num_replicas, rank, epoch_size, seed=0, shuffle=True):
# NOTE: Epoch size is local.
total_epoch_size = epoch_size * num_replicas
n_repeats = 1 + total_epoch_size // len(ds)
- self.all_indices = np.concatenate([np.arange(len(ds)) for _ in range(n_repeats)])
+ self.all_indices = np.concatenate(
+ [np.arange(len(ds)) for _ in range(n_repeats)],
+ )
assert len(self.all_indices) >= total_epoch_size
self.total_epoch_size = total_epoch_size
self.seed = seed
@@ -86,6 +86,10 @@ def __len__(self):
def __iter__(self):
self.epoch += 1
with temp_numpy_seed(self.epoch + self.seed):
- indices_shuffled = np.random.permutation(self.all_indices)[: self.total_epoch_size]
- local_indices = np.array_split(indices_shuffled, self.num_replicas)[self.rank]
+ indices_shuffled = np.random.permutation(self.all_indices)[
+ : self.total_epoch_size
+ ]
+ local_indices = np.array_split(indices_shuffled, self.num_replicas)[
+ self.rank
+ ]
return iter(local_indices)
diff --git a/happypose/toolbox/datasets/scene_dataset.py b/happypose/toolbox/datasets/scene_dataset.py
index 275bab74..a218477c 100644
--- a/happypose/toolbox/datasets/scene_dataset.py
+++ b/happypose/toolbox/datasets/scene_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +13,6 @@
limitations under the License.
"""
-
from __future__ import annotations
# Standard Library
@@ -23,8 +21,9 @@
import os
import random
import time
+from collections.abc import Iterator
from dataclasses import dataclass
-from typing import Any, Dict, Iterator, List, Optional, Union
+from typing import Any, Optional, Union
# Third Party
import numpy as np
@@ -39,8 +38,8 @@
from happypose.toolbox.utils.tensor_collection import PandasTensorCollection
from happypose.toolbox.utils.types import Resolution
-ListBbox = List[int]
-ListPose = List[List[float]]
+ListBbox = list[int]
+ListPose = list[list[float]]
"""
infos: pd.DataFrame with fields
@@ -52,7 +51,8 @@
tensors:
K: [B,3,3] camera intrinsics
poses: [B,4,4] object to camera transform
- poses_init [Optional]: [B,4,4] object to camera transform. Used if the dataset has initial estimates (ModelNet)
+ poses_init [Optional]: [B,4,4] object to camera transform. Used if the dataset has
+ initial estimates (ModelNet)
TCO: same as poses
bboxes: [B,4] bounding boxes for objects
masks: (optional)
@@ -61,7 +61,7 @@
SceneObservationTensorCollection = PandasTensorCollection
SingleDataJsonType = Union[str, float, ListPose, int, ListBbox, Any]
-DataJsonType = Union[Dict[str, SingleDataJsonType], List[SingleDataJsonType]]
+DataJsonType = Union[dict[str, SingleDataJsonType], list[SingleDataJsonType]]
def transform_to_list(T: Transform) -> ListPose:
@@ -70,20 +70,20 @@ def transform_to_list(T: Transform) -> ListPose:
@dataclass
class ObjectData:
- # NOTE (Yann): bbox_amodal, bbox_modal, visib_fract should be moved to SceneObservation
+ # NOTE (Yann): bbox_amodal, bbox_modal, visib_fract should be moved to
+ # SceneObservation
label: str
TWO: Optional[Transform] = None
unique_id: Optional[int] = None
bbox_amodal: Optional[np.ndarray] = None # (4, ) array [xmin, ymin, xmax, ymax]
bbox_modal: Optional[np.ndarray] = None # (4, ) array [xmin, ymin, xmax, ymax]
visib_fract: Optional[float] = None
- TWO_init: Optional[
- Transform
- ] = None # Some pose estimation datasets (ModelNet) provide an initial pose estimate
+ TWO_init: Optional[Transform] = None
+ # Some pose estimation datasets (ModelNet) provide an initial pose estimate
# NOTE: This should be loaded externally
- def to_json(self) -> Dict[str, SingleDataJsonType]:
- d: Dict[str, SingleDataJsonType] = dict(label=self.label)
+ def to_json(self) -> dict[str, SingleDataJsonType]:
+ d: dict[str, SingleDataJsonType] = {"label": self.label}
for k in ("TWO", "TWO_init"):
if getattr(self, k) is not None:
d[k] = transform_to_list(getattr(self, k))
@@ -96,7 +96,7 @@ def to_json(self) -> Dict[str, SingleDataJsonType]:
return d
@staticmethod
- def from_json(d: DataJsonType) -> "ObjectData":
+ def from_json(d: DataJsonType) -> ObjectData:
assert isinstance(d, dict)
label = d["label"]
assert isinstance(label, str)
@@ -126,13 +126,12 @@ class CameraData:
resolution: Optional[Resolution] = None
TWC: Optional[Transform] = None
camera_id: Optional[str] = None
- TWC_init: Optional[
- Transform
- ] = None # Some pose estimation datasets (ModelNet) provide an initial pose estimate
+ TWC_init: Optional[Transform] = None
+ # Some pose estimation datasets (ModelNet) provide an initial pose estimate
# NOTE: This should be loaded externally
def to_json(self) -> str:
- d: Dict[str, SingleDataJsonType] = dict()
+ d: dict[str, SingleDataJsonType] = {}
for k in ("TWC", "TWC_init"):
if getattr(self, k) is not None:
d[k] = transform_to_list(getattr(self, k))
@@ -145,7 +144,7 @@ def to_json(self) -> str:
return json.dumps(d)
@staticmethod
- def from_json(data_str: str) -> "CameraData":
+ def from_json(data_str: str) -> CameraData:
d: DataJsonType = json.loads(data_str)
assert isinstance(d, dict)
data = CameraData()
@@ -183,7 +182,7 @@ def to_json(self) -> str:
return json.dumps(self.__dict__)
@staticmethod
- def from_json(data_str: str) -> "ObservationInfos":
+ def from_json(data_str: str) -> ObservationInfos:
d = json.loads(data_str)
assert "scene_id" in d
assert "view_id" in d
@@ -195,24 +194,27 @@ class SceneObservation:
rgb: Optional[np.ndarray] = None # (h,w,3) uint8 numpy array
depth: Optional[np.ndarray] = None # (h, w), np.float32
segmentation: Optional[np.ndarray] = None # (h, w), np.uint32 (important);
- # contains objects unique ids. int64 are not handled and can be dangerous when used with PIL
+ # contains objects unique ids. int64 are not handled and can be dangerous when used
+ # with PIL
infos: Optional[ObservationInfos] = None
- object_datas: Optional[List[ObjectData]] = None
+ object_datas: Optional[list[ObjectData]] = None
camera_data: Optional[CameraData] = None
- binary_masks: Optional[
- Dict[int, np.ndarray]
- ] = None # dict mapping unique id to (h, w) np.bool_
+ # dict mapping unique id to (h, w) np.bool_
+ binary_masks: Optional[dict[int, np.ndarray]] = None
@staticmethod
def collate_fn(
- batch: List[SceneObservation], object_labels: Optional[List[str]] = None
- ) -> Dict[Any, Any]:
+ batch: list[SceneObservation],
+ object_labels: Optional[list[str]] = None,
+ ) -> dict[Any, Any]:
"""Collate a batch of SceneObservation objects.
Args:
+ ----
object_labels: If passed in parse only those object labels.
Returns:
+ -------
A dict with fields
cameras: PandasTensorCollection
rgb: torch.tensor [B,3,H,W] torch.uint8
@@ -235,21 +237,21 @@ def collate_fn(
rgb_images = []
depth_images = []
- for n, data in enumerate(batch):
+ for _n, data in enumerate(batch):
# data is of type SceneObservation
batch_im_id += 1
- im_info = dict(
- scene_id=data.infos.scene_id,
- view_id=data.infos.view_id,
- batch_im_id=batch_im_id,
- )
+ im_info = {
+ "scene_id": data.infos.scene_id,
+ "view_id": data.infos.view_id,
+ "batch_im_id": batch_im_id,
+ }
im_infos.append(im_info)
K.append(data.camera_data.K)
- cam_info = dict(
- TWC=data.camera_data.TWC,
- resolution=data.camera_data.resolution,
- )
+ cam_info = {
+ "TWC": data.camera_data.TWC,
+ "resolution": data.camera_data.resolution,
+ }
cam_infos.append(cam_info)
# [3,H,W]
@@ -288,19 +290,19 @@ def collate_fn(
infos=pd.DataFrame(cam_infos),
K=torch.as_tensor(np.stack(K)),
)
- return dict(
- cameras=cameras,
- rgb=torch.stack(rgb_images), # [B,3,H,W]
- depth=torch.as_tensor(np.stack(depth_images)), # [B,1,H,W] or [B,0]
- im_infos=im_infos,
- gt_detections=gt_detections,
- gt_data=gt_data,
- initial_data=initial_data,
- )
+ return {
+ "cameras": cameras,
+ "rgb": torch.stack(rgb_images), # [B,3,H,W]
+ "depth": torch.as_tensor(np.stack(depth_images)), # [B,1,H,W] or [B,0]
+ "im_infos": im_infos,
+ "gt_detections": gt_detections,
+ "gt_data": gt_data,
+ "initial_data": initial_data,
+ }
def as_pandas_tensor_collection(
self,
- object_labels: Optional[List[str]] = None,
+ object_labels: Optional[list[str]] = None,
) -> SceneObservationTensorCollection:
"""Convert SceneData to a PandasTensorCollection representation."""
obs = self
@@ -319,15 +321,15 @@ def as_pandas_tensor_collection(
if obs.camera_data.TWC_init is not None:
TWC_init = torch.as_tensor(obs.camera_data.TWC_init.matrix).float()
- for n, obj_data in enumerate(obs.object_datas):
+ for _n, obj_data in enumerate(obs.object_datas):
if object_labels is not None and obj_data.label not in object_labels:
continue
- info = dict(
- label=obj_data.label,
- scene_id=obs.infos.scene_id,
- view_id=obs.infos.view_id,
- visib_fract=getattr(obj_data, "visib_fract", 1),
- )
+ info = {
+ "label": obj_data.label,
+ "scene_id": obs.infos.scene_id,
+ "view_id": obs.infos.view_id,
+ "visib_fract": getattr(obj_data, "visib_fract", 1),
+ }
infos.append(info)
TWO.append(torch.tensor(obj_data.TWO.matrix).float())
bboxes.append(torch.tensor(obj_data.bbox_modal).float())
@@ -399,18 +401,23 @@ def __init__(
Can be an IterableDataset or a map-style Dataset.
Args:
- frame_index (pd.DataFrame): Must contain the following columns: scene_id, view_id
- load_depth (bool, optional): Whether to load depth images. Defaults to False.
+ ----
+ frame_index (pd.DataFrame): Must contain the following columns:
+ scene_id, view_id
+ load_depth (bool, optional): Whether to load depth images.
+ Defaults to False.
load_segmentation (bool, optional): Whether to load image segmentation.
Defaults to True.
Defaults to f'{label}'.
"""
-
self.frame_index = frame_index
self.load_depth = load_depth
self.load_segmentation = load_segmentation
- def _load_scene_observation(self, image_infos: ObservationInfos) -> SceneObservation:
+ def _load_scene_observation(
+ self,
+ image_infos: ObservationInfos,
+ ) -> SceneObservation:
raise NotImplementedError
def __getitem__(self, idx: int) -> SceneObservation:
@@ -468,7 +475,7 @@ def __iter__(self) -> Iterator[SceneObservation]:
class IterableMultiSceneDataset(IterableSceneDataset):
def __init__(
self,
- list_iterable_scene_ds: List[IterableSceneDataset],
+ list_iterable_scene_ds: list[IterableSceneDataset],
deterministic: bool = False,
):
self.list_iterable_scene_ds = list_iterable_scene_ds
@@ -490,4 +497,3 @@ def __iter__(self) -> Iterator[SceneObservation]:
while True:
idx = self.rng.randint(0, len(self.iterators) - 1)
yield next(self.iterators[idx])
-
diff --git a/happypose/toolbox/datasets/scene_dataset_wrappers.py b/happypose/toolbox/datasets/scene_dataset_wrappers.py
index ecc55e23..1352d70e 100644
--- a/happypose/toolbox/datasets/scene_dataset_wrappers.py
+++ b/happypose/toolbox/datasets/scene_dataset_wrappers.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/happypose/toolbox/datasets/shapenet_object_dataset.py b/happypose/toolbox/datasets/shapenet_object_dataset.py
index db429b88..2e626a9c 100644
--- a/happypose/toolbox/datasets/shapenet_object_dataset.py
+++ b/happypose/toolbox/datasets/shapenet_object_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -43,9 +42,9 @@ def __init__(self, synset_id, source_id):
def load_object_infos(models_infos_path):
- with open(models_infos_path, "r") as f:
+ with open(models_infos_path) as f:
infos = json.load(f)
- itos = dict()
+ itos = {}
for info in infos:
k = f"shapenet_{info['shapenet_synset_id']}_{info['shapenet_source_id']}"
itos[info["obj_id"]] = k
@@ -59,7 +58,7 @@ def make_shapenet_infos(shapenet_dir, model_name):
taxonomy_path = shapenet_dir / "taxonomy.json"
taxonomy = json.loads(taxonomy_path.read_text())
- synset_id_to_synset = dict()
+ synset_id_to_synset = {}
def get_synset(synset_id):
if synset_id not in synset_id_to_synset:
@@ -95,7 +94,7 @@ def get_descendants(synset):
if len(synset.children) == 0:
return synset.models
else:
- return sum([get_descendants(child) for child in children])
+ return sum([get_descendants(child) for child in synset.children])
for synset in synset_id_to_synset.values():
synset.models_descendants = get_descendants(synset)
@@ -120,7 +119,8 @@ def __init__(
model_name = "model_normalized_pointcloud.obj"
ypr_offset_deg = (0.0, 0.0, 0.0)
else:
- raise ValueError("split")
+ msg = "split"
+ raise ValueError(msg)
synsets = make_shapenet_infos(self.shapenet_dir, model_name)
main_synsets = [
@@ -131,7 +131,6 @@ def __init__(
objects = []
for synset in main_synsets:
-
for source_id in synset.models_descendants:
model_path = (
self.shapenet_dir
diff --git a/happypose/toolbox/datasets/urdf_dataset.py b/happypose/toolbox/datasets/urdf_dataset.py
index 80534f3c..c650eeb4 100644
--- a/happypose/toolbox/datasets/urdf_dataset.py
+++ b/happypose/toolbox/datasets/urdf_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
from pathlib import Path
@@ -24,7 +22,12 @@
class UrdfDataset(RigidObjectDataset):
- def __init__(self, ds_dir: Path, mesh_units: str = "m", label_format: str = "{label}"):
+ def __init__(
+ self,
+ ds_dir: Path,
+ mesh_units: str = "m",
+ label_format: str = "{label}",
+ ):
objects = []
for urdf_dir in ds_dir.iterdir():
urdf_paths = list(urdf_dir.glob("*.urdf"))
@@ -33,6 +36,10 @@ def __init__(self, ds_dir: Path, mesh_units: str = "m", label_format: str = "{la
label = urdf_dir.name
label = label_format.format(label=label)
objects.append(
- RigidObject(label=label, mesh_path=urdf_path, mesh_units=mesh_units)
+ RigidObject(
+ label=label,
+ mesh_path=urdf_path,
+ mesh_units=mesh_units,
+ ),
)
super().__init__(objects)
diff --git a/happypose/toolbox/datasets/utils.py b/happypose/toolbox/datasets/utils.py
index 418d854a..4e671bfa 100644
--- a/happypose/toolbox/datasets/utils.py
+++ b/happypose/toolbox/datasets/utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,9 +14,7 @@
"""
-
# Standard Library
-from typing import Dict, List
# Third Party
import numpy as np
@@ -25,18 +22,20 @@
def make_detections_from_segmentation(
segmentations: np.ndarray,
-) -> List[Dict[int, np.ndarray]]:
- """
- segmentations: (n, h, w) int np.ndarray
- """
+) -> list[dict[int, np.ndarray]]:
+ """segmentations: (n, h, w) int np.ndarray."""
assert segmentations.ndim == 3
detections = []
for segmentation_n in segmentations:
- dets_n = dict()
+ dets_n = {}
for unique_id in np.unique(segmentation_n):
ids = np.where(segmentation_n == unique_id)
- x1, y1, x2, y2 = np.min(ids[1]), np.min(ids[0]), np.max(ids[1]), np.max(ids[0])
+ x1, y1, x2, y2 = (
+ np.min(ids[1]),
+ np.min(ids[0]),
+ np.max(ids[1]),
+ np.max(ids[0]),
+ )
dets_n[int(unique_id)] = np.array([x1, y1, x2, y2])
detections.append(dets_n)
return detections
-
diff --git a/happypose/toolbox/datasets/web_scene_dataset.py b/happypose/toolbox/datasets/web_scene_dataset.py
index efea72b7..01e6dff9 100644
--- a/happypose/toolbox/datasets/web_scene_dataset.py
+++ b/happypose/toolbox/datasets/web_scene_dataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -18,12 +17,12 @@
# Standard Library
import io
import json
-import tarfile
from collections import defaultdict
+from collections.abc import Iterator
from functools import partial
from hashlib import sha1
from pathlib import Path
-from typing import Any, Dict, Iterator, List, Optional, Set, Union
+from typing import Any, Optional, Union
# Third Party
import imageio
@@ -60,18 +59,19 @@ def write_scene_ds_as_wds(
n_reading_workers: int = 8,
maxcount: int = 1000,
shard_format: str = "shard-%08d.tar",
- keep_labels_set: Optional[Set] = None,
+ keep_labels_set: Optional[set] = None,
n_max_frames: Optional[int] = None,
- frame_ids: Optional[List[int]] = None,
+ frame_ids: Optional[list[int]] = None,
depth_scale: int = 1000,
) -> None:
-
assert scene_ds.frame_index is not None
wds_dir.mkdir(exist_ok=True, parents=True)
frame_index = scene_ds.frame_index.copy()
shard_writer = wds.ShardWriter(
- str(wds_dir / shard_format), maxcount=maxcount, start_shard=0
+ str(wds_dir / shard_format),
+ maxcount=maxcount,
+ start_shard=0,
)
sampler = None
@@ -96,13 +96,13 @@ def write_scene_ds_as_wds(
if keep_labels_set is not None:
assert obs.object_datas is not None
- object_labels = set([obj.label for obj in obs.object_datas])
+ object_labels = {obj.label for obj in obs.object_datas}
n_objects_valid = len(object_labels.intersection(keep_labels_set))
if n_objects_valid == 0:
continue
key = sha1(obs.rgb.data).hexdigest()
- sample: Dict[str, Any] = {
+ sample: dict[str, Any] = {
"__key__": key,
}
if obs.rgb is not None:
@@ -127,20 +127,19 @@ def write_scene_ds_as_wds(
frame_index = frame_index.loc[:, ["scene_id", "view_id", "key", "shard_fname"]]
shard_writer.close()
frame_index.to_feather(wds_dir / "frame_index.feather")
- ds_infos = dict(
- depth_scale=depth_scale,
- )
+ ds_infos = {
+ "depth_scale": depth_scale,
+ }
(wds_dir / "infos.json").write_text(json.dumps(ds_infos))
return
def load_scene_ds_obs(
- sample: Dict[str, Union[bytes, str]],
+ sample: dict[str, Union[bytes, str]],
depth_scale: float = 1000.0,
load_depth: bool = False,
label_format: str = "{label}",
) -> SceneObservation:
-
assert isinstance(sample["rgb.png"], bytes)
assert isinstance(sample["segmentation.png"], bytes)
assert isinstance(sample["depth.png"], bytes)
@@ -156,7 +155,7 @@ def load_scene_ds_obs(
depth = np.asarray(depth, dtype=np.float32)
depth /= depth_scale
- object_datas_json: List[DataJsonType] = json.loads(sample["object_datas.json"])
+ object_datas_json: list[DataJsonType] = json.loads(sample["object_datas.json"])
object_datas = [ObjectData.from_json(d) for d in object_datas_json]
for obj in object_datas:
obj.label = label_format.format(label=obj.label)
@@ -204,7 +203,7 @@ def __init__(
load_segmentation=load_segmentation,
)
- def get_tar_list(self) -> List[str]:
+ def get_tar_list(self) -> list[str]:
tar_files = [str(x) for x in self.wds_dir.iterdir() if x.suffix == ".tar"]
tar_files.sort()
return tar_files
diff --git a/happypose/toolbox/inference/detector.py b/happypose/toolbox/inference/detector.py
index b9587d15..fb10d6c8 100644
--- a/happypose/toolbox/inference/detector.py
+++ b/happypose/toolbox/inference/detector.py
@@ -1,4 +1,3 @@
-
# Standard Library
from abc import ABCMeta, abstractmethod
@@ -10,9 +9,8 @@
class DetectorModule(torch.nn.Module, metaclass=ABCMeta):
-
@abstractmethod
def get_detections(
- self
+ self,
) -> DetectionsType:
pass
diff --git a/happypose/toolbox/inference/pose_estimator.py b/happypose/toolbox/inference/pose_estimator.py
index 0792b7b4..06ab1c07 100644
--- a/happypose/toolbox/inference/pose_estimator.py
+++ b/happypose/toolbox/inference/pose_estimator.py
@@ -1,6 +1,5 @@
# Standard Library
from abc import ABCMeta, abstractmethod
-from typing import Tuple
# Third Party
import torch
@@ -10,21 +9,20 @@
class PoseEstimationModule(torch.nn.Module, metaclass=ABCMeta):
-
@abstractmethod
def forward_coarse_model(
- self
- ) -> Tuple[PoseEstimatesType, dict]:
+ self,
+ ) -> tuple[PoseEstimatesType, dict]:
pass
@abstractmethod
def forward_refiner(
- self
- ) -> Tuple[dict, dict]:
+ self,
+ ) -> tuple[dict, dict]:
pass
@abstractmethod
def run_inference_pipeline(
- self
- ) -> Tuple[PoseEstimatesType, dict]:
+ self,
+ ) -> tuple[PoseEstimatesType, dict]:
pass
diff --git a/happypose/toolbox/inference/types.py b/happypose/toolbox/inference/types.py
index 448ee63a..fd43dccc 100644
--- a/happypose/toolbox/inference/types.py
+++ b/happypose/toolbox/inference/types.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,12 +13,11 @@
limitations under the License.
"""
-
from __future__ import annotations
# Standard Library
from dataclasses import dataclass
-from typing import Optional, Tuple
+from typing import Optional
# Third Party
import numpy as np
@@ -102,10 +100,8 @@ class InferenceConfig:
@dataclass
class ObservationTensor:
- """
-
- images: [B,C,H,W] with C=3 (rgb) or C=4 (rgbd). RGB dimensions should already
- be normalized to be in [0,1] by diving the uint8 values by 255
+ """images: [B,C,H,W] with C=3 (rgb) or C=4 (rgbd). RGB dimensions should already
+ be normalized to be in [0,1] by diving the uint8 values by 255.
K: [B,3,3] camera intrinsics
"""
@@ -122,14 +118,14 @@ def cuda(self) -> ObservationTensor:
@property
def batch_size(self) -> int:
"""Returns the batch size."""
-
return self.images.shape[0]
@property
def depth(self) -> torch.tensor:
"""Returns depth tensor.
- Returns:
+ Returns
+ -------
torch.tensor with shape [B,H,W]
"""
assert self.channel_dim == 4
@@ -141,7 +137,6 @@ def channel_dim(self) -> int:
return self.images.shape[1]
def is_valid(self) -> bool:
-
if not self.images.ndim == 4:
return False
@@ -175,12 +170,12 @@ def from_numpy(
"""Create an ObservationData type from numpy data.
Args:
+ ----
rgb: [H,W,3] np.uint8
depth: [H,W] np.float
K: [3,3] np.float
"""
-
assert rgb.dtype == np.uint8
rgb_tensor = torch.as_tensor(rgb).float() / 255
@@ -201,17 +196,17 @@ def from_numpy(
@staticmethod
def from_torch_batched(
- rgb: torch.Tensor, depth: torch.Tensor, K: torch.Tensor
+ rgb: torch.Tensor,
+ depth: torch.Tensor,
+ K: torch.Tensor,
) -> ObservationTensor:
- """
-
- Args:
+ """Args:
+ ----
rgb: [B,3,H,W] torch.uint8
depth: [B,1,H,W] torch.float
- K: [B,3,3] torch.float
+ K: [B,3,3] torch.float.
"""
-
assert rgb.dtype == torch.uint8
# [B,3,H,W]
@@ -221,7 +216,6 @@ def from_torch_batched(
# [C,H,W]
if depth is not None:
-
if depth.ndim == 3:
depth.unsqueeze(1)
diff --git a/happypose/toolbox/inference/utils.py b/happypose/toolbox/inference/utils.py
index 6fceb2ed..d61a0154 100644
--- a/happypose/toolbox/inference/utils.py
+++ b/happypose/toolbox/inference/utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +16,7 @@
# Standard Library
from pathlib import Path
-from typing import Any, Dict, List, Optional, Tuple, Union
+from typing import Optional, Union
# Third Party
import numpy as np
@@ -26,36 +25,40 @@
import yaml
from omegaconf import OmegaConf
-# HappyPose
-from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
-from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
-
# MegaPose
import happypose.pose_estimators.megapose
import happypose.toolbox.utils.tensor_collection as tc
from happypose.pose_estimators.megapose.config import EXP_DIR
-from happypose.toolbox.datasets.datasets_cfg import make_object_dataset
from happypose.pose_estimators.megapose.inference.detector import Detector
-from happypose.toolbox.inference.types import DetectionsType, PoseEstimatesType
-from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
from happypose.pose_estimators.megapose.models.pose_rigid import PosePredictor
-from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
from happypose.pose_estimators.megapose.training.detector_models_cfg import (
check_update_config as check_update_config_detector,
)
-from happypose.pose_estimators.megapose.training.detector_models_cfg import create_model_detector
+from happypose.pose_estimators.megapose.training.detector_models_cfg import (
+ create_model_detector,
+)
from happypose.pose_estimators.megapose.training.pose_models_cfg import (
check_update_config as check_update_config_pose,
)
-from happypose.pose_estimators.megapose.training.pose_models_cfg import create_model_pose
+from happypose.pose_estimators.megapose.training.pose_models_cfg import (
+ create_model_pose,
+)
from happypose.pose_estimators.megapose.training.training_config import TrainingConfig
+
+# HappyPose
+from happypose.toolbox.datasets.object_dataset import RigidObjectDataset
+from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
+from happypose.toolbox.inference.types import DetectionsType, PoseEstimatesType
+from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
+from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
from happypose.toolbox.utils.logging import get_logger
from happypose.toolbox.utils.models_compat import change_keys_of_older_models
from happypose.toolbox.utils.tensor_collection import PandasTensorCollection
logger = get_logger(__name__)
-device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
def load_detector(run_id: str) -> torch.nn.Module:
run_dir = EXP_DIR / run_id
@@ -63,7 +66,7 @@ def load_detector(run_id: str) -> torch.nn.Module:
cfg = check_update_config_detector(cfg)
label_to_category_id = cfg.label_to_category_id
model = create_model_detector(cfg, len(label_to_category_id))
- ckpt = torch.load(run_dir / "checkpoint.pth.tar", map_location=torch.device('cpu'))
+ ckpt = torch.load(run_dir / "checkpoint.pth.tar", map_location=torch.device("cpu"))
ckpt = ckpt["state_dict"]
model.load_state_dict(ckpt)
model = model.to(device).eval()
@@ -87,8 +90,11 @@ def load_pose_models(
force_panda3d_renderer: bool = False,
renderer_kwargs: Optional[dict] = None,
models_root: Path = EXP_DIR,
-) -> Tuple[torch.nn.Module, torch.nn.Module, happypose.toolbox.lib3d.rigid_mesh_database.BatchedMeshes]:
-
+) -> tuple[
+ torch.nn.Module,
+ torch.nn.Module,
+ happypose.toolbox.lib3d.rigid_mesh_database.BatchedMeshes,
+]:
coarse_run_dir = models_root / coarse_run_id
coarse_cfg: TrainingConfig = load_cfg(coarse_run_dir / "config.yaml")
coarse_cfg = check_update_config_pose(coarse_cfg)
@@ -107,7 +113,7 @@ def load_pose_models(
def make_renderer(renderer_type: str) -> Panda3dBatchRenderer:
logger.debug("renderer_kwargs", renderer_kwargs)
if renderer_kwargs is None:
- renderer_kwargs_ = dict()
+ renderer_kwargs_ = {}
else:
renderer_kwargs_ = renderer_kwargs
@@ -116,7 +122,10 @@ def make_renderer(renderer_type: str) -> Panda3dBatchRenderer:
renderer_kwargs_.setdefault("n_workers", 4)
if renderer_type == "panda3d" or force_panda3d_renderer:
- renderer = Panda3dBatchRenderer(object_dataset=object_dataset, **renderer_kwargs_)
+ renderer = Panda3dBatchRenderer(
+ object_dataset=object_dataset,
+ **renderer_kwargs_,
+ )
else:
raise ValueError(renderer_type)
return renderer
@@ -131,12 +140,15 @@ def make_renderer(renderer_type: str) -> Panda3dBatchRenderer:
def load_model(run_id: str, renderer: Panda3dBatchRenderer) -> PosePredictor:
if run_id is None:
- return
+ return None
run_dir = models_root / run_id
cfg: TrainingConfig = load_cfg(run_dir / "config.yaml")
cfg = check_update_config_pose(cfg)
model = create_model_pose(cfg, renderer=renderer, mesh_db=mesh_db_batched)
- ckpt = torch.load(run_dir / "checkpoint.pth.tar", map_location=torch.device('cpu'))
+ ckpt = torch.load(
+ run_dir / "checkpoint.pth.tar",
+ map_location=torch.device("cpu"),
+ )
ckpt = ckpt["state_dict"]
ckpt = change_keys_of_older_models(ckpt)
model.load_state_dict(ckpt)
@@ -152,7 +164,7 @@ def load_model(run_id: str, renderer: Panda3dBatchRenderer) -> PosePredictor:
def add_instance_id(
- inputs: Union[PoseEstimatesType, DetectionsType]
+ inputs: Union[PoseEstimatesType, DetectionsType],
) -> Union[PoseEstimatesType, DetectionsType]:
"""Adds a column with instance_id to the provided detections.
@@ -168,7 +180,7 @@ def create_instance_id(df: pd.DataFrame) -> pd.DataFrame:
df = inputs.infos
df = df.groupby(["batch_im_id", "label"], group_keys=False).apply(
- lambda df: create_instance_id(df)
+ lambda df: create_instance_id(df),
)
inputs.infos = df
return inputs
@@ -176,11 +188,10 @@ def create_instance_id(df: pd.DataFrame) -> pd.DataFrame:
def filter_detections(
detections: DetectionsType,
- labels: Optional[List[str]] = None,
+ labels: Optional[list[str]] = None,
one_instance_per_class: bool = False,
) -> DetectionsType:
"""Filter detections based on kwargs."""
-
if labels is not None:
df = detections.infos
df = df[df.label.isin(labels)]
@@ -197,10 +208,11 @@ def filter_detections(
return detections
-def make_cameras(camera_data: List[CameraData]) -> PandasTensorCollection:
+def make_cameras(camera_data: list[CameraData]) -> PandasTensorCollection:
"""Creates a PandasTensorCollection from list of camera data.
- Returns:
+ Returns
+ -------
PandasTensorCollection.
infos: pd.DataFrame with columns ['batch_im_id', 'resolution']
tensor: K with shape [B,3,3] of camera intrinsics matrices.
@@ -209,18 +221,18 @@ def make_cameras(camera_data: List[CameraData]) -> PandasTensorCollection:
K = []
for n, cam_data in enumerate(camera_data):
K.append(torch.tensor(cam_data.K))
- infos.append(dict(batch_im_id=n, resolution=cam_data.resolution))
+ infos.append({"batch_im_id": n, "resolution": cam_data.resolution})
return tc.PandasTensorCollection(infos=pd.DataFrame(infos), K=torch.stack(K))
-def make_detections_from_object_data(object_data: List[ObjectData]) -> DetectionsType:
+def make_detections_from_object_data(object_data: list[ObjectData]) -> DetectionsType:
infos = pd.DataFrame(
- dict(
- label=[data.label for data in object_data],
- batch_im_id=0,
- instance_id=np.arange(len(object_data)),
- )
+ {
+ "label": [data.label for data in object_data],
+ "batch_im_id": 0,
+ "instance_id": np.arange(len(object_data)),
+ },
)
bboxes = torch.as_tensor(
np.stack([data.bbox_modal for data in object_data]),
diff --git a/happypose/toolbox/lib3d/camera_geometry.py b/happypose/toolbox/lib3d/camera_geometry.py
index bc1c831d..7664c489 100644
--- a/happypose/toolbox/lib3d/camera_geometry.py
+++ b/happypose/toolbox/lib3d/camera_geometry.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,9 +14,7 @@
"""
-
# Standard Library
-from typing import Tuple
# Third Party
import torch
@@ -30,7 +27,10 @@ def project_points(points_3d, K, TCO):
n_points = points_3d.shape[1]
device = points_3d.device
if points_3d.shape[-1] == 3:
- points_3d = torch.cat((points_3d, torch.ones(batch_size, n_points, 1).to(device)), dim=-1)
+ points_3d = torch.cat(
+ (points_3d, torch.ones(batch_size, n_points, 1).to(device)),
+ dim=-1,
+ )
P = K @ TCO[:, :3]
suv = (P.unsqueeze(1) @ points_3d.unsqueeze(-1)).squeeze(-1)
suv = suv / suv[..., [-1]]
@@ -44,7 +44,10 @@ def project_points_robust(points_3d, K, TCO, z_min=0.1):
n_points = points_3d.shape[1]
device = points_3d.device
if points_3d.shape[-1] == 3:
- points_3d = torch.cat((points_3d, torch.ones(batch_size, n_points, 1).to(device)), dim=-1)
+ points_3d = torch.cat(
+ (points_3d, torch.ones(batch_size, n_points, 1).to(device)),
+ dim=-1,
+ )
P = K @ TCO[:, :3]
suv = (P.unsqueeze(1) @ points_3d.unsqueeze(-1)).squeeze(-1)
z = suv[..., -1]
@@ -65,14 +68,18 @@ def boxes_from_uv(uv):
def get_K_crop_resize(
- K: torch.Tensor, boxes: torch.Tensor, orig_size: Tuple[int, int], crop_resize: Tuple[int, int]
+ K: torch.Tensor,
+ boxes: torch.Tensor,
+ orig_size: tuple[int, int],
+ crop_resize: tuple[int, int],
) -> torch.Tensor:
- """
- Adapted from https://github.com/BerkeleyAutomation/perception/blob/master/perception/camera_intrinsics.py
+ """Adapted from https://github.com/BerkeleyAutomation/perception/blob/master/perception/camera_intrinsics.py
Skew is not handled.
+
Args:
+ ----
K: (bsz, 3, 3) float
- boxes: (bsz, 4) float
+ boxes: (bsz, 4) float.
"""
assert K.dim() == 3
assert K.shape[1:] == (3, 3)
@@ -115,12 +122,19 @@ def get_K_crop_resize(
return new_K
-def cropresize_backtransform_points2d(input_wh, boxes_2d_crop, output_wh, points_2d_in_output):
+def cropresize_backtransform_points2d(
+ input_wh,
+ boxes_2d_crop,
+ output_wh,
+ points_2d_in_output,
+):
bsz = input_wh.shape[0]
assert output_wh.shape == (bsz, 2)
assert input_wh.shape == (bsz, 2)
assert points_2d_in_output.dim() == 3
points_2d_normalized = points_2d_in_output / output_wh.unsqueeze(1)
- points_2d = boxes_2d_crop[:, [0, 1]].unsqueeze(1) + points_2d_normalized * input_wh.unsqueeze(1)
+ points_2d = boxes_2d_crop[:, [0, 1]].unsqueeze(
+ 1,
+ ) + points_2d_normalized * input_wh.unsqueeze(1)
return points_2d
diff --git a/happypose/toolbox/lib3d/cosypose_ops.py b/happypose/toolbox/lib3d/cosypose_ops.py
index aa14a463..9eb1c82b 100644
--- a/happypose/toolbox/lib3d/cosypose_ops.py
+++ b/happypose/toolbox/lib3d/cosypose_ops.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,19 +14,21 @@
"""
-
# Third Party
import torch
# Local Folder
-from .rotations import (
- compute_rotation_matrix_from_ortho6d,
- compute_rotation_matrix_from_quaternions,
-)
+from .camera_geometry import project_points
+from .rotations import compute_rotation_matrix_from_ortho6d
from .transform_ops import invert_transform_matrices, transform_pts
-l1 = lambda diff: diff.abs()
-l2 = lambda diff: diff**2
+
+def l1(diff):
+ return diff.abs()
+
+
+def l2(diff):
+ return diff**2
def pose_update_with_reference_point(TCO, K, vxvyvz, dRCO, tCR):
@@ -48,7 +49,10 @@ def pose_update_with_reference_point(TCO, K, vxvyvz, dRCO, tCR):
xsrcysrc = tCR[:, :2]
tCR_out = tCR.clone()
tCR_out[:, 2] = ztgt.flatten()
- tCR_out[:, :2] = ((vxvy / fxfy) + (xsrcysrc / zsrc.repeat(1, 2))) * ztgt.repeat(1, 2)
+ tCR_out[:, :2] = ((vxvy / fxfy) + (xsrcysrc / zsrc.repeat(1, 2))) * ztgt.repeat(
+ 1,
+ 2,
+ )
tCO_out = dRCO @ (TCO[:, :3, 3] - tCR).unsqueeze(-1) + tCR_out.unsqueeze(-1)
tCO_out = tCO_out.squeeze(-1)
@@ -68,7 +72,7 @@ def loss_CO_symmetric(TCO_possible_gt, TCO_pred, points, l1_or_l2=l1):
TCO_points_possible_gt = transform_pts(TCO_possible_gt, points)
TCO_pred_points = transform_pts(TCO_pred, points)
losses_possible = l1_or_l2(
- (TCO_pred_points.unsqueeze(1) - TCO_points_possible_gt).flatten(-2, -1)
+ (TCO_pred_points.unsqueeze(1) - TCO_points_possible_gt).flatten(-2, -1),
).mean(-1)
loss, min_id = losses_possible.min(dim=1)
TCO_assign = TCO_possible_gt[torch.arange(bsz), min_id]
@@ -84,7 +88,6 @@ def loss_refiner_CO_disentangled_reference_point(
tCR,
):
# MegaPose
- from happypose.toolbox.lib3d.transform_ops import invert_transform_matrices
bsz = TCO_possible_gt.shape[0]
assert TCO_possible_gt.shape[0] == bsz
@@ -112,19 +115,31 @@ def loss_refiner_CO_disentangled_reference_point(
# First term
TCO_pred_orn = TCO_gt.clone()
TCO_pred_orn[:, :3, :3] = pose_update_with_reference_point(
- TCO_input, K_crop, torch.cat((vxvy_gt, vz_gt), dim=-1), dR, tCR
+ TCO_input,
+ K_crop,
+ torch.cat((vxvy_gt, vz_gt), dim=-1),
+ dR,
+ tCR,
)[:, :3, :3].to(TCO_pred_orn.dtype)
# Second term: influence of vxvy
TCO_pred_xy = TCO_gt.clone()
TCO_pred_xy[:, :2, [3]] = pose_update_with_reference_point(
- TCO_input, K_crop, torch.cat((vxvy, vz_gt), dim=-1), dR_gt, tCR
+ TCO_input,
+ K_crop,
+ torch.cat((vxvy, vz_gt), dim=-1),
+ dR_gt,
+ tCR,
)[:, :2, [3]].to(TCO_pred_xy.dtype)
# Third term: influence of vz
TCO_pred_z = TCO_gt.clone()
TCO_pred_z[:, [2], [3]] = pose_update_with_reference_point(
- TCO_input, K_crop, torch.cat((vxvy_gt, vz.unsqueeze(-1)), dim=-1), dR_gt, tCR
+ TCO_input,
+ K_crop,
+ torch.cat((vxvy_gt, vz.unsqueeze(-1)), dim=-1),
+ dR_gt,
+ tCR,
)[:, [2], [3]].to(TCO_pred_z.dtype)
loss_orn, _ = loss_CO_symmetric(TCO_possible_gt, TCO_pred_orn, points, l1_or_l2=l1)
@@ -167,14 +182,15 @@ def TCO_init_from_boxes(z_range, boxes, K):
def TCO_init_from_boxes_autodepth_with_R(boxes_2d, model_points_3d, K, R):
- """
- Args:
+ """Args:
+ ----
boxes_2d: [B,4], in (xmin, ymin, xmax, ymax) convention
model_points_3d: [B,N,3]
K: [B,3,3]
- R: [B,3,3]
+ R: [B,3,3].
- Returns:
+ Returns
+ -------
TCO: [B,4,4]
"""
# User in BOP20 challenge
@@ -198,8 +214,12 @@ def TCO_init_from_boxes_autodepth_with_R(boxes_2d, model_points_3d, K, R):
C_pts_3d = transform_pts(TCO, model_points_3d)
if bsz > 0:
- deltax_3d = C_pts_3d[:, :, 0].max(dim=1).values - C_pts_3d[:, :, 0].min(dim=1).values
- deltay_3d = C_pts_3d[:, :, 1].max(dim=1).values - C_pts_3d[:, :, 1].min(dim=1).values
+ deltax_3d = (
+ C_pts_3d[:, :, 0].max(dim=1).values - C_pts_3d[:, :, 0].min(dim=1).values
+ )
+ deltay_3d = (
+ C_pts_3d[:, :, 1].max(dim=1).values - C_pts_3d[:, :, 1].min(dim=1).values
+ )
else:
deltax_3d = C_pts_3d[:, 0, 0]
deltay_3d = C_pts_3d[:, 0, 1]
@@ -239,8 +259,12 @@ def TCO_init_from_boxes_zup_autodepth(boxes_2d, model_points_3d, K):
C_pts_3d = transform_pts(TCO, model_points_3d)
if bsz > 0:
- deltax_3d = C_pts_3d[:, :, 0].max(dim=1).values - C_pts_3d[:, :, 0].min(dim=1).values
- deltay_3d = C_pts_3d[:, :, 1].max(dim=1).values - C_pts_3d[:, :, 1].min(dim=1).values
+ deltax_3d = (
+ C_pts_3d[:, :, 0].max(dim=1).values - C_pts_3d[:, :, 0].min(dim=1).values
+ )
+ deltay_3d = (
+ C_pts_3d[:, :, 1].max(dim=1).values - C_pts_3d[:, :, 1].min(dim=1).values
+ )
else:
deltax_3d = C_pts_3d[:, 0, 0]
deltay_3d = C_pts_3d[:, 0, 1]
@@ -261,6 +285,7 @@ def TCO_init_from_boxes_zup_autodepth(boxes_2d, model_points_3d, K):
def TCO_init_from_boxes_v3(layer, boxes, K):
# TODO: Clean these 2 functions
+ # TODO: F821 Undefined name `_TCO_init_from_boxes_v2`
# MegaPose
from happypose.pose_estimators.megapose.math_utils.meshes import get_T_offset
@@ -278,7 +303,7 @@ def TCO_init_from_boxes_v3(layer, boxes, K):
.to(boxes.device)
.to(boxes.dtype)
)
- TCO = _TCO_init_from_boxes_v2(z, boxes, K)
+ TCO = _TCO_init_from_boxes_v2(z, boxes, K) # noqa: F821
pts2d = project_points(pts, K, TCO)
deltax = pts2d[..., 0].max() - pts2d[..., 0].min()
deltay = pts2d[..., 1].max() - pts2d[..., 1].min()
@@ -290,7 +315,7 @@ def TCO_init_from_boxes_v3(layer, boxes, K):
ratio_y = deltay / bb_deltay
z2 = z * (ratio_y.unsqueeze(1) + ratio_x.unsqueeze(1)) / 2
- TCO = _TCO_init_from_boxes_v2(z2, boxes, K)
+ TCO = _TCO_init_from_boxes_v2(z2, boxes, K) # noqa: F821
return TCO
@@ -301,18 +326,29 @@ def init_K_TCO_from_boxes(boxes_2d, model_points_3d, z_guess, resolution):
H, W = min(resolution), max(resolution)
bsz = boxes_2d.shape[0]
- z = torch.as_tensor(z_guess).unsqueeze(0).unsqueeze(0).repeat(bsz, 1).to(device).float()
+ z = (
+ torch.as_tensor(z_guess)
+ .unsqueeze(0)
+ .unsqueeze(0)
+ .repeat(bsz, 1)
+ .to(device)
+ .float()
+ )
TCO = torch.eye(4).unsqueeze(0).to(torch.float).to(device).repeat(bsz, 1, 1)
TCO[:, 2, 3] = z.flatten()
C_pts_3d = transform_pts(TCO, model_points_3d)
- deltax_3d = C_pts_3d[:, :, 0].max(dim=1).values - C_pts_3d[:, :, 0].min(dim=1).values
- deltay_3d = C_pts_3d[:, :, 1].max(dim=1).values - C_pts_3d[:, :, 1].min(dim=1).values
+ deltax_3d = (
+ C_pts_3d[:, :, 0].max(dim=1).values - C_pts_3d[:, :, 0].min(dim=1).values
+ )
+ deltay_3d = (
+ C_pts_3d[:, :, 1].max(dim=1).values - C_pts_3d[:, :, 1].min(dim=1).values
+ )
bb_deltax = boxes_2d[:, 2] - boxes_2d[:, 0]
bb_deltay = boxes_2d[:, 3] - boxes_2d[:, 1]
- f_from_dx = bb_deltax * z_guess / deltax_3d
+ bb_deltax * z_guess / deltax_3d
f_from_dy = bb_deltay * z_guess / deltay_3d
f = f_from_dy
diff --git a/happypose/toolbox/lib3d/cropping.py b/happypose/toolbox/lib3d/cropping.py
index 00621e5b..3345a710 100644
--- a/happypose/toolbox/lib3d/cropping.py
+++ b/happypose/toolbox/lib3d/cropping.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Third Party
import torch
import torchvision
@@ -27,10 +25,16 @@
from .camera_geometry import boxes_from_uv, project_points, project_points_robust
-def deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, lamb=1.4, im_size=(240, 320), clamp=False):
- """
- gt_boxes: N x 4
- crop_boxes: N x 4
+def deepim_boxes(
+ rend_center_uv,
+ obs_boxes,
+ rend_boxes,
+ lamb=1.4,
+ im_size=(240, 320),
+ clamp=False,
+):
+ """gt_boxes: N x 4
+ crop_boxes: N x 4.
"""
lobs, robs, uobs, dobs = obs_boxes[:, [0, 2, 1, 3]].t()
lrend, rrend, urend, drend = rend_boxes[:, [0, 2, 1, 3]].t()
@@ -47,10 +51,12 @@ def deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, lamb=1.4, im_size=(240,
r = w / h
xdists = torch.cat(
- ((lobs - xc).abs(), (lrend - xc).abs(), (robs - xc).abs(), (rrend - xc).abs()), dim=1
+ ((lobs - xc).abs(), (lrend - xc).abs(), (robs - xc).abs(), (rrend - xc).abs()),
+ dim=1,
)
ydists = torch.cat(
- ((uobs - yc).abs(), (urend - yc).abs(), (dobs - yc).abs(), (drend - yc).abs()), dim=1
+ ((uobs - yc).abs(), (urend - yc).abs(), (dobs - yc).abs(), (drend - yc).abs()),
+ dim=1,
)
xdist = xdists.max(dim=1)[0]
ydist = ydists.max(dim=1)[0]
@@ -59,7 +65,10 @@ def deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, lamb=1.4, im_size=(240,
xc, yc = xc.squeeze(-1), yc.squeeze(-1)
x1, y1, x2, y2 = xc - width / 2, yc - height / 2, xc + width / 2, yc + height / 2
- boxes = torch.cat((x1.unsqueeze(1), y1.unsqueeze(1), x2.unsqueeze(1), y2.unsqueeze(1)), dim=1)
+ boxes = torch.cat(
+ (x1.unsqueeze(1), y1.unsqueeze(1), x2.unsqueeze(1), y2.unsqueeze(1)),
+ dim=1,
+ )
assert not clamp
if clamp:
boxes[:, [0, 2]] = torch.clamp(boxes[:, [0, 2]], 0, w - 1)
@@ -67,16 +76,37 @@ def deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, lamb=1.4, im_size=(240,
return boxes
-def deepim_crops(images, obs_boxes, K, TCO_pred, O_vertices, output_size=None, lamb=1.4):
+def deepim_crops(
+ images,
+ obs_boxes,
+ K,
+ TCO_pred,
+ O_vertices,
+ output_size=None,
+ lamb=1.4,
+):
batch_size, _, h, w = images.shape
device = images.device
if output_size is None:
output_size = (h, w)
uv = project_points(O_vertices, K, TCO_pred)
rend_boxes = boxes_from_uv(uv)
- rend_center_uv = project_points(torch.zeros(batch_size, 1, 3).to(device), K, TCO_pred)
- boxes = deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, im_size=(h, w), lamb=lamb)
- boxes = torch.cat((torch.arange(batch_size).unsqueeze(1).to(device).float(), boxes), dim=1)
+ rend_center_uv = project_points(
+ torch.zeros(batch_size, 1, 3).to(device),
+ K,
+ TCO_pred,
+ )
+ boxes = deepim_boxes(
+ rend_center_uv,
+ obs_boxes,
+ rend_boxes,
+ im_size=(h, w),
+ lamb=lamb,
+ )
+ boxes = torch.cat(
+ (torch.arange(batch_size).unsqueeze(1).to(device).float(), boxes),
+ dim=1,
+ )
crops = crop_images(images, boxes, output_size=output_size, sampling_ratio=4)
return boxes[:, 1:], crops
@@ -101,9 +131,22 @@ def deepim_crops_robust(
rend_boxes = boxes_from_uv(uv)
TCR = TCO_pred.clone()
TCR[:, :3, -1] = tCR_in
- rend_center_uv = project_points_robust(torch.zeros(batch_size, 1, 3).to(device), K, TCR)
- boxes = deepim_boxes(rend_center_uv, obs_boxes, rend_boxes, im_size=(h, w), lamb=lamb)
- boxes = torch.cat((torch.arange(batch_size).unsqueeze(1).to(device).float(), boxes), dim=1)
+ rend_center_uv = project_points_robust(
+ torch.zeros(batch_size, 1, 3).to(device),
+ K,
+ TCR,
+ )
+ boxes = deepim_boxes(
+ rend_center_uv,
+ obs_boxes,
+ rend_boxes,
+ im_size=(h, w),
+ lamb=lamb,
+ )
+ boxes = torch.cat(
+ (torch.arange(batch_size).unsqueeze(1).to(device).float(), boxes),
+ dim=1,
+ )
crops = None
if return_crops:
crops = crop_images(images, boxes, output_size=output_size, sampling_ratio=4)
@@ -123,10 +166,18 @@ def crop_images(images, boxes, output_size, sampling_ratio):
if not has_depth:
crops = torchvision.ops.roi_align(
- images, boxes, output_size=output_size, sampling_ratio=sampling_ratio
+ images,
+ boxes,
+ output_size=output_size,
+ sampling_ratio=sampling_ratio,
)
else:
- crops = torchvision.ops.roi_align(images, boxes, output_size=output_size, sampling_ratio=4)
+ crops = torchvision.ops.roi_align(
+ images,
+ boxes,
+ output_size=output_size,
+ sampling_ratio=4,
+ )
# roi_align can result in invalid depth measurements
# since it does interpolation. Simply set those to zero
@@ -135,7 +186,10 @@ def crop_images(images, boxes, output_size, sampling_ratio):
depth_valid = torch.zeros_like(images[:, DEPTH_DIMS])
depth_valid[depth > 0] = 1
depth_valid_crops = torchvision.ops.roi_align(
- depth_valid, boxes, output_size=output_size, sampling_ratio=4
+ depth_valid,
+ boxes,
+ output_size=output_size,
+ sampling_ratio=4,
)
depth_mask = torch.ones_like(depth_valid_crops)
depth_mask[depth_valid_crops < 0.99] = 0
diff --git a/happypose/toolbox/lib3d/distances.py b/happypose/toolbox/lib3d/distances.py
index 871d5dd4..4894447d 100644
--- a/happypose/toolbox/lib3d/distances.py
+++ b/happypose/toolbox/lib3d/distances.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Third Party
import torch
diff --git a/happypose/toolbox/lib3d/mesh_losses.py b/happypose/toolbox/lib3d/mesh_losses.py
index bde4a9c1..907fd50c 100644
--- a/happypose/toolbox/lib3d/mesh_losses.py
+++ b/happypose/toolbox/lib3d/mesh_losses.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Third Party
import torch
@@ -44,7 +42,9 @@ def compute_ADD_L1_loss(TCO_gt, TCO_pred, points):
assert TCO_pred.shape == (bsz, 4, 4) and TCO_gt.shape == (bsz, 4, 4)
assert points.dim() == 3 and points.shape[-1] == 3
dists = (
- (transform_pts(TCO_gt, points) - transform_pts(TCO_pred, points)).abs().mean(dim=(-1, -2))
+ (transform_pts(TCO_gt, points) - transform_pts(TCO_pred, points))
+ .abs()
+ .mean(dim=(-1, -2))
)
return dists
diff --git a/happypose/toolbox/lib3d/mesh_ops.py b/happypose/toolbox/lib3d/mesh_ops.py
index c29b06f5..e9a3eca0 100644
--- a/happypose/toolbox/lib3d/mesh_ops.py
+++ b/happypose/toolbox/lib3d/mesh_ops.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Third Party
import numpy as np
import torch
diff --git a/happypose/toolbox/lib3d/multiview.py b/happypose/toolbox/lib3d/multiview.py
index ceb08f15..f9d3f40e 100644
--- a/happypose/toolbox/lib3d/multiview.py
+++ b/happypose/toolbox/lib3d/multiview.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Third Party
import numpy as np
import torch
@@ -37,7 +35,10 @@ def _get_views_TCO_pos_sphere(TCO, tCR, cam_positions_wrt_cam0):
obj.reparentTo(root)
obj.setPos(0, 0, 0)
- TCCGL = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=float)
+ TCCGL = np.array(
+ [[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]],
+ dtype=float,
+ )
tCR = np.array(tCR.tolist())
TOC = Transform(np.array(TCO.tolist())).inverse().toHomogeneousMatrix()
@@ -96,7 +97,7 @@ def get_1_view_TCO_pos_front(TCO, tCR):
cam_positions_wrt_cam0 = np.array(
[
[0, 0, 0],
- ]
+ ],
)
return _get_views_TCO_pos_sphere(TCO, tCR, cam_positions_wrt_cam0)
@@ -107,7 +108,7 @@ def get_3_views_TCO_pos_front(TCO, tCR):
[0, 0, 0],
[1, 0, 0],
[-1, 0, 0],
- ]
+ ],
)
return _get_views_TCO_pos_sphere(TCO, tCR, cam_positions_wrt_cam0)
@@ -120,7 +121,7 @@ def get_5_views_TCO_pos_front(TCO, tCR):
[-1, 0, 0],
[0, 0, 1],
[0, 0, -1],
- ]
+ ],
)
return _get_views_TCO_pos_sphere(TCO, tCR, cam_positions_wrt_cam0)
@@ -131,7 +132,7 @@ def get_3_views_TCO_pos_sphere(TCO, tCR):
[0, 0, 0],
[1, 0, 0],
[-1, 0, 0],
- ]
+ ],
)
return _get_views_TCO_pos_sphere(TCO, tCR, cam_positions_wrt_cam0)
@@ -145,7 +146,7 @@ def get_6_views_TCO_pos_sphere(TCO, tCR):
[0, 1, 1],
[-1, 1, 0],
[0, 1, -1],
- ]
+ ],
)
return _get_views_TCO_pos_sphere(TCO, tCR, cam_positions_wrt_cam0)
@@ -162,6 +163,7 @@ def get_26_views_TCO_pos_sphere(TCO, tCR):
cam_positions_wrt_cam0 = np.array(cam_positions_wrt_cam0, dtype=float)
return _get_views_TCO_pos_sphere(TCO, tCR, cam_positions_wrt_cam0)
+
def make_TCO_multiview(
TCO: torch.Tensor,
tCR: torch.Tensor,
@@ -170,14 +172,16 @@ def make_TCO_multiview(
remove_TCO_rendering: bool = False,
views_inplane_rotations: bool = False,
):
- """_summary_
+ """_summary_.
Args:
+ ----
TCO (torch.Tensor): (bsz, 4, 4)
tCR (torch.Tensor): (bsz, 3)
Returns:
+ -------
_type_: _description_
"""
bsz = TCO.shape[0]
@@ -188,7 +192,7 @@ def make_TCO_multiview(
if n_views == 1:
TC0_CV = []
- for b in range(bsz):
+ for _b in range(bsz):
TC0_CV_ = [np.eye(4)]
TC0_CV.append(TC0_CV_)
TC0_CV = torch.as_tensor(np.stack(TC0_CV), device=device, dtype=dtype)
@@ -239,7 +243,9 @@ def make_TCO_multiview(
for idx, angle in enumerate([np.pi / 2, np.pi, 3 * np.pi / 2]):
idx = idx + 1
dR = torch.as_tensor(
- transforms3d.euler.euler2mat(0, 0, angle), device=device, dtype=dtype
+ transforms3d.euler.euler2mat(0, 0, angle),
+ device=device,
+ dtype=dtype,
)
TCV_O[:, :, idx, :3, :3] = dR @ TCV_O[:, :, idx, :3, :3]
TCV_O = TCV_O.flatten(1, 2)
diff --git a/happypose/toolbox/lib3d/rigid_mesh_database.py b/happypose/toolbox/lib3d/rigid_mesh_database.py
index 6e4f5d9a..ed26cc3b 100644
--- a/happypose/toolbox/lib3d/rigid_mesh_database.py
+++ b/happypose/toolbox/lib3d/rigid_mesh_database.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,10 +14,8 @@
"""
-
# Standard Library
from copy import deepcopy
-from typing import List
# Third Party
import numpy as np
@@ -27,16 +24,12 @@
# MegaPose
from happypose.toolbox.datasets.object_dataset import RigidObject
-
-
from happypose.toolbox.lib3d.mesh_ops import get_meshes_bounding_boxes, sample_points
-from happypose.toolbox.lib3d.symmetries import make_symmetries_poses
from happypose.toolbox.utils.tensor_collection import TensorCollection
def as_mesh(scene_or_mesh):
- """
- Convert a possible scene to a mesh.
+ """Convert a possible scene to a mesh.
If conversion occurs, the returned mesh has only vertex and face data.
"""
@@ -49,7 +42,7 @@ def as_mesh(scene_or_mesh):
tuple(
trimesh.Trimesh(vertices=g.vertices, faces=g.faces)
for g in scene_or_mesh.geometry.values()
- )
+ ),
)
else:
mesh = scene_or_mesh
@@ -57,26 +50,25 @@ def as_mesh(scene_or_mesh):
class MeshDataBase:
- def __init__(self, obj_list: List[RigidObject]):
+ def __init__(self, obj_list: list[RigidObject]):
self.obj_dict = {obj.label: obj for obj in obj_list}
self.obj_list = obj_list
- self.infos = {obj.label: dict() for obj in obj_list}
+ self.infos = {obj.label: {} for obj in obj_list}
self.meshes = {
- l: as_mesh(
+ label: as_mesh(
trimesh.load(
obj.mesh_path,
group_material=False,
process=False,
skip_materials=True,
maintain_order=True,
- )
+ ),
)
- for l, obj in self.obj_dict.items()
+ for label, obj in self.obj_dict.items()
}
for label, obj in self.obj_dict.items():
if obj.diameter_meters is None:
-
mesh = self.meshes[label]
points = np.array(mesh.vertices) * obj.scale
extent = points.max(0) - points.min(0)
@@ -97,7 +89,9 @@ def batched(self, aabb=False, resample_n_points=None, n_sym=64):
new_infos = deepcopy(self.infos)
for label, mesh in self.meshes.items():
if aabb:
- points_n = get_meshes_bounding_boxes(torch.as_tensor(mesh.vertices).unsqueeze(0))[0]
+ points_n = get_meshes_bounding_boxes(
+ torch.as_tensor(mesh.vertices).unsqueeze(0),
+ )[0]
elif resample_n_points:
if isinstance(mesh, trimesh.PointCloud):
points_n = sample_points(
@@ -107,7 +101,7 @@ def batched(self, aabb=False, resample_n_points=None, n_sym=64):
)[0]
else:
points_n = torch.tensor(
- trimesh.sample.sample_surface(mesh, resample_n_points)[0]
+ trimesh.sample.sample_surface(mesh, resample_n_points)[0],
)
else:
points_n = torch.tensor(mesh.vertices)
@@ -128,7 +122,11 @@ def batched(self, aabb=False, resample_n_points=None, n_sym=64):
labels = np.array(labels)
points = pad_stack_tensors(points, fill="select_random", deterministic=True)
- symmetries = pad_stack_tensors(symmetries, fill=torch.eye(4), deterministic=True)
+ symmetries = pad_stack_tensors(
+ symmetries,
+ fill=torch.eye(4),
+ deterministic=True,
+ )
return BatchedMeshes(new_infos, labels, points, symmetries).float()
@@ -147,9 +145,9 @@ def n_sym_mapping(self):
return {label: obj["n_sym"] for label, obj in self.infos.items()}
def select(self, labels):
- ids = [self.label_to_id[l] for l in labels]
+ ids = [self.label_to_id[label] for label in labels]
return Meshes(
- infos=[self.infos[l] for l in labels],
+ infos=[self.infos[label] for label in labels],
labels=self.labels[ids],
points=self.points[ids],
symmetries=self.symmetries[ids],
diff --git a/happypose/toolbox/lib3d/rotations.py b/happypose/toolbox/lib3d/rotations.py
index 326abac0..2458dfb3 100644
--- a/happypose/toolbox/lib3d/rotations.py
+++ b/happypose/toolbox/lib3d/rotations.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Third Party
import numpy as np
import torch
@@ -23,11 +21,10 @@
def compute_rotation_matrix_from_ortho6d(poses):
- """
- Code from https://github.com/papagina/RotationContinuity
+ """Code from https://github.com/papagina/RotationContinuity
On the Continuity of Rotation Representations in Neural Networks
Zhou et al. CVPR19
- https://zhouyisjtu.github.io/project_rotation/rotation.html
+ https://zhouyisjtu.github.io/project_rotation/rotation.html.
"""
assert poses.shape[-1] == 6
x_raw = poses[..., 0:3]
@@ -41,9 +38,8 @@ def compute_rotation_matrix_from_ortho6d(poses):
def euler2quat(xyz, axes="sxyz"):
- """
- euler: sxyz
- quaternion: xyzw
+ """euler: sxyz
+ quaternion: xyzw.
"""
wxyz = transforms3d.euler.euler2quat(*xyz, axes=axes)
xyzw = [*wxyz[1:], wxyz[0]]
@@ -51,12 +47,14 @@ def euler2quat(xyz, axes="sxyz"):
def angle_axis_to_rotation_matrix(angle_axis):
- """Convert 3d vector of axis-angle rotation to 4x4 rotation matrix
+ """Convert 3d vector of axis-angle rotation to 4x4 rotation matrix.
Args:
+ ----
angle_axis (Tensor): tensor of 3d vector of axis-angle rotations.
Returns:
+ -------
Tensor: tensor of 4x4 rotation matrices.
Shape:
@@ -64,6 +62,7 @@ def angle_axis_to_rotation_matrix(angle_axis):
- Output: :math:`(N, 4, 4)`
Example:
+ -------
>>> input = torch.rand(1, 3) # Nx3
>>> output = tgm.angle_axis_to_rotation_matrix(input) # Nx4x4
"""
@@ -88,13 +87,19 @@ def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6):
r02 = wy * sin_theta + wx * wz * (k_one - cos_theta)
r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta)
r22 = cos_theta + wz * wz * (k_one - cos_theta)
- rotation_matrix = torch.cat([r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1)
+ rotation_matrix = torch.cat(
+ [r00, r01, r02, r10, r11, r12, r20, r21, r22],
+ dim=1,
+ )
return rotation_matrix.view(-1, 3, 3)
def _compute_rotation_matrix_taylor(angle_axis):
rx, ry, rz = torch.chunk(angle_axis, 3, dim=1)
k_one = torch.ones_like(rx)
- rotation_matrix = torch.cat([k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1)
+ rotation_matrix = torch.cat(
+ [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one],
+ dim=1,
+ )
return rotation_matrix.view(-1, 3, 3)
# stolen from ceres/rotation.h
@@ -130,9 +135,11 @@ def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
+ ----
quaternion (torch.Tensor): tensor with quaternions.
Return:
+ ------
torch.Tensor: tensor with angle axis of rotation.
Shape:
@@ -140,15 +147,18 @@ def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
- Output: :math:`(*, 3)`
Example:
+ -------
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
- raise TypeError("Input type is not a torch.Tensor. Got {}".format(type(quaternion)))
+ msg = f"Input type is not a torch.Tensor. Got {type(quaternion)}"
+ raise TypeError(msg)
if not quaternion.shape[-1] == 4:
+ msg = f"Input must be a tensor of shape Nx4 or 4. Got {quaternion.shape}"
raise ValueError(
- "Input must be a tensor of shape Nx4 or 4. Got {}".format(quaternion.shape)
+ msg,
)
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
@@ -159,7 +169,9 @@ def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
- cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)
+ cos_theta < 0.0,
+ torch.atan2(-sin_theta, -cos_theta),
+ torch.atan2(sin_theta, cos_theta),
)
k_pos: torch.Tensor = two_theta / sin_theta
diff --git a/happypose/toolbox/lib3d/symmetries.py b/happypose/toolbox/lib3d/symmetries.py
index 79e3d9d0..891f14ae 100644
--- a/happypose/toolbox/lib3d/symmetries.py
+++ b/happypose/toolbox/lib3d/symmetries.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,10 +14,9 @@
"""
-
# Standard Library
from dataclasses import dataclass
-from typing import List, Optional
+from typing import Optional
# Third Party
import numpy as np
@@ -42,23 +40,22 @@ class ContinuousSymmetry:
@dataclass
class DiscreteSymmetry:
- """
- pose: (4, 4) homogeneous matrix
- """
+ """pose: (4, 4) homogeneous matrix."""
pose: npt.NDArray[np.float_]
def make_symmetries_poses(
- symmetries_discrete: List[DiscreteSymmetry] = [],
- symmetries_continuous: List[ContinuousSymmetry] = [],
+ symmetries_discrete: list[DiscreteSymmetry] = [],
+ symmetries_continuous: list[ContinuousSymmetry] = [],
n_symmetries_continuous: int = 8,
units: str = "mm",
scale: Optional[float] = None,
) -> np.ndarray:
"""Generates the set of object symmetries.
- Returns:
+ Returns
+ -------
(num_symmetries, 4, 4) array
"""
# Note: See https://github.com/thodan/bop_toolkit/blob/master/bop_toolkit_lib/misc.py
diff --git a/happypose/toolbox/lib3d/transform.py b/happypose/toolbox/lib3d/transform.py
index 71ad8392..048c57da 100644
--- a/happypose/toolbox/lib3d/transform.py
+++ b/happypose/toolbox/lib3d/transform.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +15,7 @@
# Standard Library
-from typing import Tuple, Union
+from typing import Union
# Third Party
import numpy as np
@@ -35,17 +34,16 @@ def __init__(
pin.Quaternion,
np.ndarray,
torch.Tensor,
- Tuple[float, float, float, float],
+ tuple[float, float, float, float],
], # rotation
- Union[np.ndarray, torch.Tensor, Tuple[float, float, float]], # translation
- ]
+ Union[np.ndarray, torch.Tensor, tuple[float, float, float]], # translation
+ ],
):
- """
- - Transform(T): SE3 or (4, 4) array
+ """- Transform(T): SE3 or (4, 4) array
- Transform(quaternion, translation), where
quaternion: pin.Quaternion, 4-array representing a xyzw quaternion,
or a 3x3 rotation matrix
- translation: 3-array
+ translation: 3-array.
"""
if len(args) == 1:
arg_T = args[0]
@@ -118,5 +116,5 @@ def quaternion(self) -> pin.Quaternion:
@property
def matrix(self) -> np.ndarray:
- """Returns 4x4 homogeneous matrix representations"""
+ """Returns 4x4 homogeneous matrix representations."""
return self._T.homogeneous
diff --git a/happypose/toolbox/lib3d/transform_ops.py b/happypose/toolbox/lib3d/transform_ops.py
index f9d20b75..66f4ac2c 100644
--- a/happypose/toolbox/lib3d/transform_ops.py
+++ b/happypose/toolbox/lib3d/transform_ops.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,9 +14,7 @@
"""
-
# Standard Library
-from typing import Tuple
# Third Party
import numpy as np
@@ -29,16 +26,17 @@
def transform_pts(T: torch.Tensor, pts: torch.Tensor) -> torch.Tensor:
- """
-
- Args:
+ """Args:
+ ----
T (torch.Tensor): (bsz, 4, 4) or (bsz, dim2, 4, 4)
- pts (torch.Tensor): (bsz, n_pts, 3)
+ pts (torch.Tensor): (bsz, n_pts, 3).
- Raises:
+ Raises
+ ------
ValueError: _description_
- Returns:
+ Returns
+ -------
torch.Tensor: _description_
"""
bsz = T.shape[0]
@@ -50,7 +48,8 @@ def transform_pts(T: torch.Tensor, pts: torch.Tensor) -> torch.Tensor:
elif T.dim() == 3:
assert T.shape == (bsz, 4, 4)
else:
- raise ValueError("Unsupported shape for T", T.shape)
+ msg = "Unsupported shape for T"
+ raise ValueError(msg, T.shape)
pts = pts.unsqueeze(-1)
T = T.unsqueeze(-3)
pts_transformed = T[..., :3, :3] @ pts + T[..., :3, [-1]]
@@ -70,8 +69,8 @@ def invert_transform_matrices(T: torch.Tensor) -> torch.Tensor:
def add_noise(
TCO: torch.Tensor,
- euler_deg_std: Tuple[float, float, float] = (15, 15, 15),
- trans_std: Tuple[float, float, float] = (0.01, 0.01, 0.05),
+ euler_deg_std: tuple[float, float, float] = (15, 15, 15),
+ trans_std: tuple[float, float, float] = (0.01, 0.01, 0.05),
) -> torch.Tensor:
TCO_out = TCO.clone()
device = TCO_out.device
@@ -85,7 +84,9 @@ def add_noise(
)
euler_noise_rad = euler_noise_deg * np.pi / 180
R_noise = (
- torch.tensor(np.stack([transforms3d.euler.euler2mat(*xyz) for xyz in euler_noise_rad]))
+ torch.tensor(
+ np.stack([transforms3d.euler.euler2mat(*xyz) for xyz in euler_noise_rad]),
+ )
.float()
.to(device)
)
diff --git a/happypose/toolbox/renderer/geometry.py b/happypose/toolbox/renderer/geometry.py
index 188d8f03..7135c8e2 100644
--- a/happypose/toolbox/renderer/geometry.py
+++ b/happypose/toolbox/renderer/geometry.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,10 +11,7 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-"""
-
-"""
https://github.com/ikalevatykh/panda3d_viewer/blob/master/panda3d_viewer/geometry.py
"""
@@ -59,7 +55,8 @@ class ViewerClosedError(ViewerError):
def make_axes():
"""Make an axes geometry.
- Returns:
+ Returns
+ -------
Geom -- p3d geometry
"""
vformat = GeomVertexFormat.get_v3c4()
@@ -87,10 +84,12 @@ def make_grid(num_ticks=10, step=1.0):
"""Make a grid geometry.
Keyword Arguments:
+ -----------------
step {float} -- step in meters (default: {1.0})
num_ticks {int} -- ticks number per axis (default: {5})
Returns:
+ -------
Geom -- p3d geometry
"""
ticks = np.arange(-num_ticks // 2, num_ticks // 2 + 1) * step
@@ -119,14 +118,17 @@ def make_capsule(radius, length, num_segments=16, num_rings=16):
"""Make capsule geometry.
Arguments:
+ ---------
radius {float} -- capsule radius
length {float} -- capsule length
Keyword Arguments:
+ -----------------
num_segments {int} -- segments number (default: {16})
num_rings {int} -- rings number (default: {16})
Returns:
+ -------
Geom -- p3d geometry
"""
vformat = GeomVertexFormat.get_v3n3t2()
@@ -164,10 +166,12 @@ def make_cylinder(num_segments=16, closed=True):
"""Make a uniform cylinder geometry.
Keyword Arguments:
+ -----------------
num_segments {int} -- segments number (default: {16})
closed {bool} -- add caps (default: {True})
Returns:
+ -------
Geom -- p3d geometry
"""
vformat = GeomVertexFormat.get_v3n3t2()
@@ -222,7 +226,8 @@ def make_cylinder(num_segments=16, closed=True):
def make_box():
"""Make a uniform box geometry.
- Returns:
+ Returns
+ -------
Geom -- p3d geometry
"""
vformat = GeomVertexFormat.get_v3n3t2()
@@ -257,9 +262,11 @@ def make_plane(size=(1.0, 1.0)):
"""Make a plane geometry.
Arguments:
+ ---------
size {tuple} -- plane size x,y
Returns:
+ -------
Geom -- p3d geometry
"""
vformat = GeomVertexFormat.get_v3n3t2()
@@ -290,10 +297,12 @@ def make_sphere(num_segments=16, num_rings=16):
"""Make a uniform UV sphere geometry.
Keyword Arguments:
+ -----------------
num_segments {int} -- segments number (default: {16})
num_rings {int} -- rings number (default: {16})
Returns:
+ -------
Geom -- p3d geometry
"""
return make_capsule(1.0, 0.0, num_segments, num_rings)
@@ -303,16 +312,19 @@ def make_points(vertices, colors=None, texture_coords=None, geom=None):
"""Make or update existing points set geometry.
Arguments:
+ ---------
root_path {str} -- path to the group's root node
name {str} -- node name within a group
vertices {list} -- point coordinates (and other data in a point cloud format)
Keyword Arguments:
+ -----------------
colors {list} -- colors (default: {None})
texture_coords {list} -- texture coordinates (default: {None})
geom {Geom} -- geometry to update (default: {None})
Returns:
+ -------
Geom -- p3d geometry
"""
if not isinstance(vertices, np.ndarray):
@@ -324,7 +336,10 @@ def make_points(vertices, colors=None, texture_coords=None, geom=None):
if colors.dtype != np.uint8:
colors = np.uint8(colors * 255)
vertices = np.column_stack(
- (vertices.view(dtype=np.uint32).reshape(-1, 3), colors.view(dtype=np.uint32))
+ (
+ vertices.view(dtype=np.uint32).reshape(-1, 3),
+ colors.view(dtype=np.uint32),
+ ),
)
if texture_coords is not None:
@@ -334,7 +349,7 @@ def make_points(vertices, colors=None, texture_coords=None, geom=None):
(
vertices.view(dtype=np.uint32).reshape(-1, 3),
texture_coords.view(dtype=np.uint32).reshape(-1, 2),
- )
+ ),
)
data = vertices.tostring()
@@ -347,8 +362,9 @@ def make_points(vertices, colors=None, texture_coords=None, geom=None):
elif vertices.strides[0] == 20:
vformat = GeomVertexFormat.get_v3t2()
else:
+ msg = f"Incompatible point clout format: {vertices.dtype},{vertices.shape}"
raise ViewerError(
- "Incompatible point clout format: {},{}".format(vertices.dtype, vertices.shape)
+ msg,
)
vdata = GeomVertexData("vdata", vformat, Geom.UHDynamic)
diff --git a/happypose/toolbox/renderer/panda3d_batch_renderer.py b/happypose/toolbox/renderer/panda3d_batch_renderer.py
index 7e3e7698..c1e6ee34 100644
--- a/happypose/toolbox/renderer/panda3d_batch_renderer.py
+++ b/happypose/toolbox/renderer/panda3d_batch_renderer.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,10 +14,9 @@
"""
-
# Standard Library
from dataclasses import dataclass
-from typing import List, Optional, Set, Union
+from typing import Optional, Union
# Third Party
import numpy as np
@@ -28,7 +26,6 @@
# HappyPose
from happypose.toolbox.datasets.object_dataset import RigidObjectDataset
-
# MegaPose
from happypose.toolbox.lib3d.transform import Transform
from happypose.toolbox.lib3d.transform_ops import invert_transform_matrices
@@ -49,10 +46,9 @@
@dataclass
class RenderOutput:
- """
- rgb: (h, w, 3) uint8
+ """rgb: (h, w, 3) uint8
normals: (h, w, 3) uint8
- depth: (h, w, 1) float32
+ depth: (h, w, 1) float32.
"""
data_id: int
@@ -63,8 +59,7 @@ class RenderOutput:
@dataclass
class BatchRenderOutput:
- """
- rgb: (bsz, 3, h, w) float, values in [0, 1]
+ """rgb: (bsz, 3, h, w) float, values in [0, 1]
normals: (bsz, 3, h, w) float, values in [0, 1]
depth: (bsz, 1, h, w) float, in meters.
"""
@@ -77,8 +72,8 @@ class BatchRenderOutput:
@dataclass
class SceneData:
camera_data: Panda3dCameraData
- light_datas: List[Panda3dLightData]
- object_datas: List[Panda3dObjectData]
+ light_datas: list[Panda3dLightData]
+ object_datas: list[Panda3dObjectData]
@dataclass
@@ -94,9 +89,8 @@ def worker_loop(
in_queue: torch.multiprocessing.Queue,
out_queue: torch.multiprocessing.Queue,
object_dataset: RigidObjectDataset,
- preload_labels: Set[str] = set(),
+ preload_labels: set[str] = set(),
) -> None:
-
logger.debug(f"Init worker: {worker_id}")
renderer = Panda3dSceneRenderer(
asset_dataset=object_dataset,
@@ -139,13 +133,9 @@ def worker_loop(
output = RenderOutput(
data_id=render_args.data_id,
- rgb=torch.tensor(renderings_.rgb).share_memory_(),
- normals=torch.tensor(renderings_.normals).share_memory_()
- if render_args.render_normals
- else None,
- depth=torch.tensor(renderings_.depth).share_memory_()
- if render_args.render_depth
- else None,
+ rgb=renderings_.rgb,
+ normals=renderings_.normals if render_args.render_normals else None,
+ depth=renderings_.depth if render_args.render_depth else None,
)
del render_args
out_queue.put(output)
@@ -161,7 +151,6 @@ def __init__(
preload_cache: bool = True,
split_objects: bool = False,
):
-
assert n_workers >= 1
self._object_dataset = object_dataset
self._n_workers = n_workers
@@ -172,15 +161,16 @@ def __init__(
def make_scene_data(
self,
- labels: List[str],
+ labels: list[str],
TCO: torch.Tensor,
K: torch.Tensor,
- light_datas: List[List[Panda3dLightData]],
+ light_datas: list[list[Panda3dLightData]],
resolution: Resolution,
- ) -> List[SceneData]:
- """_summary_
+ ) -> list[SceneData]:
+ """_summary_.
Args:
+ ----
labels (List[str]): _description_
TCO (torch.Tensor): (bsz, 4, 4) float
K (torch.Tensor): (bsz, 3, 3) float
@@ -188,6 +178,7 @@ def make_scene_data(
resolution (Resolution): _description_
Returns:
+ -------
List[SceneData]: _description_
"""
bsz = TCO.shape[0]
@@ -210,7 +201,7 @@ def make_scene_data(
Panda3dObjectData(
label=label_n,
TWO=TWO,
- )
+ ),
],
light_datas=lights_n,
)
@@ -219,16 +210,15 @@ def make_scene_data(
def render(
self,
- labels: List[str],
+ labels: list[str],
TCO: torch.Tensor,
K: torch.Tensor,
- light_datas: List[List[Panda3dLightData]],
+ light_datas: list[list[Panda3dLightData]],
resolution: Resolution,
render_depth: bool = False,
render_mask: bool = False,
render_normals: bool = False,
) -> BatchRenderOutput:
-
if render_mask:
raise NotImplementedError
@@ -253,11 +243,11 @@ def render(
for n in np.arange(bsz):
renders = self._out_queue.get()
data_id = renders.data_id
- list_rgbs[data_id] = renders.rgb
+ list_rgbs[data_id] = torch.tensor(renders.rgb)
if render_depth:
- list_depths[data_id] = renders.depth
+ list_depths[data_id] = torch.tensor(renders.depth)
if render_normals:
- list_normals[data_id] = renders.normals
+ list_normals[data_id] = torch.tensor(renders.normals)
del renders
assert list_rgbs[0] is not None
@@ -265,7 +255,7 @@ def render(
rgbs = torch.stack(list_rgbs).pin_memory().cuda(non_blocking=True)
else:
rgbs = torch.stack(list_rgbs)
-
+
rgbs = rgbs.float().permute(0, 3, 1, 2) / 255
if render_depth:
@@ -294,18 +284,20 @@ def render(
depths=depths,
normals=normals,
)
-
+
def _init_renderers(self, preload_cache: bool) -> None:
object_labels = [obj.label for obj in self._object_dataset.list_objects]
- self._renderers: List[torch.multiprocessing.Process] = []
+ self._renderers: list[torch.multiprocessing.Process] = []
if self._split_objects:
- self._in_queues: List[torch.multiprocessing.Queue] = [
+ self._in_queues: list[torch.multiprocessing.Queue] = [
torch.multiprocessing.Queue() for _ in range(self._n_workers)
]
- self._worker_id_to_queue = {n: self._in_queues[n] for n in range(self._n_workers)}
+ self._worker_id_to_queue = {
+ n: self._in_queues[n] for n in range(self._n_workers)
+ }
object_labels_split = np.array_split(object_labels, self._n_workers)
- self._object_label_to_queue = dict()
+ self._object_label_to_queue = {}
for n, split in enumerate(object_labels_split):
for label in split:
self._object_label_to_queue[label] = self._in_queues[n]
@@ -313,7 +305,9 @@ def _init_renderers(self, preload_cache: bool) -> None:
object_labels_split = [object_labels for _ in range(self._n_workers)]
self._in_queues = [torch.multiprocessing.Queue()]
self._object_label_to_queue = {k: self._in_queues[0] for k in object_labels}
- self._worker_id_to_queue = {n: self._in_queues[0] for n in range(self._n_workers)}
+ self._worker_id_to_queue = {
+ n: self._in_queues[0] for n in range(self._n_workers)
+ }
self._out_queue: torch.multiprocessing.Queue = torch.multiprocessing.Queue()
@@ -324,13 +318,13 @@ def _init_renderers(self, preload_cache: bool) -> None:
preload_labels = set()
renderer_process = torch.multiprocessing.Process(
target=worker_loop,
- kwargs=dict(
- worker_id=n,
- in_queue=self._worker_id_to_queue[n],
- out_queue=self._out_queue,
- object_dataset=self._object_dataset,
- preload_labels=preload_labels,
- ),
+ kwargs={
+ "worker_id": n,
+ "in_queue": self._worker_id_to_queue[n],
+ "out_queue": self._out_queue,
+ "object_dataset": self._object_dataset,
+ "preload_labels": preload_labels,
+ },
)
renderer_process.start()
self._renderers.append(renderer_process)
diff --git a/happypose/toolbox/renderer/panda3d_scene_renderer.py b/happypose/toolbox/renderer/panda3d_scene_renderer.py
index 1631899d..0933dcd3 100644
--- a/happypose/toolbox/renderer/panda3d_scene_renderer.py
+++ b/happypose/toolbox/renderer/panda3d_scene_renderer.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
import builtins
import os
@@ -23,15 +21,14 @@
import time
import xml.etree.ElementTree as ET
from collections import defaultdict
-from typing import Dict, List, Set
from dataclasses import dataclass
from functools import partial
-from typing import Dict, List, Optional, Set
-# Third Party
-import torch
import numpy as np
import panda3d as p3d
+
+# Third Party
+import torch
from direct.showbase.ShowBase import ShowBase
from tqdm import tqdm
@@ -53,7 +50,7 @@
@dataclass
class Panda3dDebugData:
- timings: Dict[str, float]
+ timings: dict[str, float]
class App(ShowBase):
@@ -90,7 +87,7 @@ def __init__(self) -> None:
assert len(devices) == 1
if "EGL_VISIBLE_DEVICES" not in os.environ:
out = subprocess.check_output(
- ["nvidia-smi", "--id=" + str(devices[0]), "-q", "--xml-format"]
+ ["nvidia-smi", "--id=" + str(devices[0]), "-q", "--xml-format"],
)
tree = ET.fromstring(out)
gpu = tree.findall("gpu")[0]
@@ -109,7 +106,7 @@ def __init__(self) -> None:
def make_scene_lights(
ambient_light_color: RgbaColor = (0.1, 0.1, 0.1, 1.0),
point_lights_color: RgbaColor = (0.4, 0.4, 0.4, 1.0),
-) -> List[Panda3dLightData]:
+) -> list[Panda3dLightData]:
"""Creates 1 ambient light + 6 point lights to illuminate a panda3d scene."""
pos = np.array(
[
@@ -119,11 +116,13 @@ def make_scene_lights(
[0, -1, 0],
[0, 0, 1],
[0, 0, -1],
- ]
+ ],
)
def pos_fn(
- root_node: p3d.core.NodePath, light_node: p3d.core.NodePath, pos: np.ndarray
+ root_node: p3d.core.NodePath,
+ light_node: p3d.core.NodePath,
+ pos: np.ndarray,
) -> None:
radius = root_node.getBounds().radius
xyz_ = pos * radius * 10
@@ -135,8 +134,10 @@ def pos_fn(
pos_fn_ = partial(pos_fn, pos=pos_n)
light_datas.append(
Panda3dLightData(
- light_type="point", color=point_lights_color, positioning_function=pos_fn_
- )
+ light_type="point",
+ color=point_lights_color,
+ positioning_function=pos_fn_,
+ ),
)
return light_datas
@@ -150,18 +151,17 @@ class Panda3dSceneRenderer:
def __init__(
self,
asset_dataset: RigidObjectDataset,
- preload_labels: Set[str] = set(),
+ preload_labels: set[str] = set(),
debug: bool = False,
verbose: bool = False,
):
-
self._asset_dataset = asset_dataset
- self._label_to_node: Dict[str, p3d.core.NodePath] = dict()
+ self._label_to_node: dict[str, p3d.core.NodePath] = {}
self.verbose = verbose
self.debug = debug
- self.debug_data = Panda3dDebugData(timings=dict())
+ self.debug_data = Panda3dDebugData(timings={})
- self._cameras_pool: Dict[Resolution, List[Panda3dCamera]] = defaultdict(list)
+ self._cameras_pool: dict[Resolution, list[Panda3dCamera]] = defaultdict(list)
if hasattr(builtins, "base"):
self._app = builtins.base # type: ignore
else:
@@ -176,12 +176,19 @@ def __init__(
def create_new_camera(self, resolution: Resolution) -> Panda3dCamera:
idx = sum([len(x) for x in self._cameras_pool.values()])
- cam = Panda3dCamera.create(f"camera={idx}", resolution=resolution, app=self._app)
+ cam = Panda3dCamera.create(
+ f"camera={idx}",
+ resolution=resolution,
+ app=self._app,
+ )
self._cameras_pool[resolution].append(cam)
return cam
- def get_cameras(self, data_cameras: List[Panda3dCameraData]) -> List[Panda3dCamera]:
- resolution_to_data_cameras: Dict[Resolution, List[Panda3dCameraData]] = defaultdict(list)
+ def get_cameras(self, data_cameras: list[Panda3dCameraData]) -> list[Panda3dCamera]:
+ resolution_to_data_cameras: dict[
+ Resolution,
+ list[Panda3dCameraData],
+ ] = defaultdict(list)
for data_camera in data_cameras:
resolution_to_data_cameras[data_camera.resolution].append(data_camera)
@@ -216,13 +223,18 @@ def use_normals_texture(self, obj_node: p3d.core.NodePath) -> p3d.core.NodePath:
obj_node.setMaterialOff(1)
obj_node.set_color(p3d.core.Vec4((1.0, 1.0, 1.0, 1.0)))
obj_node.setTextureOff(1)
- obj_node.setTexGen(p3d.core.TextureStage.getDefault(), p3d.core.TexGenAttrib.MEyeNormal)
+ obj_node.setTexGen(
+ p3d.core.TextureStage.getDefault(),
+ p3d.core.TexGenAttrib.MEyeNormal,
+ )
obj_node.setTexture(self._rgb_texture)
return obj_node
def setup_scene(
- self, root_node: p3d.core.NodePath, data_objects: List[Panda3dObjectData]
- ) -> List[p3d.core.NodePath]:
+ self,
+ root_node: p3d.core.NodePath,
+ data_objects: list[Panda3dObjectData],
+ ) -> list[p3d.core.NodePath]:
obj_nodes = []
for n, data_obj in enumerate(data_objects):
label = data_obj.label
@@ -241,8 +253,10 @@ def setup_scene(
return obj_nodes
def setup_cameras(
- self, root_node: p3d.core.NodePath, data_cameras: List[Panda3dCameraData]
- ) -> List[Panda3dCamera]:
+ self,
+ root_node: p3d.core.NodePath,
+ data_cameras: list[Panda3dCameraData],
+ ) -> list[Panda3dCamera]:
cameras = self.get_cameras(data_cameras)
for data_camera, camera in zip(data_cameras, cameras):
@@ -258,9 +272,11 @@ def setup_cameras(
return cameras
def render_images(
- self, cameras: List[Panda3dCamera], copy_arrays: bool = True, render_depth: bool = False
- ) -> List[CameraRenderingData]:
-
+ self,
+ cameras: list[Panda3dCamera],
+ copy_arrays: bool = True,
+ render_depth: bool = False,
+ ) -> list[CameraRenderingData]:
self._app.graphicsEngine.renderFrame()
self._app.graphicsEngine.syncFrame()
@@ -277,8 +293,10 @@ def render_images(
return renderings
def setup_lights(
- self, root_node: p3d.core, light_datas: List[Panda3dLightData]
- ) -> List[p3d.core.NodePath]:
+ self,
+ root_node: p3d.core,
+ light_datas: list[Panda3dLightData],
+ ) -> list[p3d.core.NodePath]:
light_node_paths = []
for n, light_data in enumerate(light_datas):
if light_data.light_type == "point":
@@ -302,16 +320,15 @@ def setup_lights(
def render_scene(
self,
- object_datas: List[Panda3dObjectData],
- camera_datas: List[Panda3dCameraData],
- light_datas: List[Panda3dLightData],
+ object_datas: list[Panda3dObjectData],
+ camera_datas: list[Panda3dCameraData],
+ light_datas: list[Panda3dLightData],
render_depth: bool = False,
copy_arrays: bool = True,
render_binary_mask: bool = False,
render_normals: bool = False,
clear: bool = True,
- ) -> List[CameraRenderingData]:
-
+ ) -> list[CameraRenderingData]:
start = time.time()
root_node = self._app.render.attachNewNode("world")
object_nodes = self.setup_scene(root_node, object_datas)
@@ -320,12 +337,19 @@ def render_scene(
setup_time = time.time() - start
start = time.time()
- renderings = self.render_images(cameras, copy_arrays=copy_arrays, render_depth=render_depth)
+ renderings = self.render_images(
+ cameras,
+ copy_arrays=copy_arrays,
+ render_depth=render_depth,
+ )
if render_normals:
for object_node in object_nodes:
self.use_normals_texture(object_node)
root_node.clear_light()
- light_data = Panda3dLightData(light_type="ambient", color=(1.0, 1.0, 1.0, 1.0))
+ light_data = Panda3dLightData(
+ light_type="ambient",
+ color=(1.0, 1.0, 1.0, 1.0),
+ )
light_nodes += self.setup_lights(root_node, [light_data])
normals_renderings = self.render_images(cameras, copy_arrays=copy_arrays)
for n, rendering in enumerate(renderings):
diff --git a/happypose/toolbox/renderer/types.py b/happypose/toolbox/renderer/types.py
index 5e92f69d..516162b9 100644
--- a/happypose/toolbox/renderer/types.py
+++ b/happypose/toolbox/renderer/types.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +16,7 @@
# Standard Library
from dataclasses import dataclass
-from typing import Callable, Optional, Tuple
+from typing import Callable, Optional
# Third Party
import numpy as np
@@ -31,22 +30,24 @@
# Local Folder
from .utils import depth_image_from_depth_buffer
-RgbaColor = Tuple[float, float, float, float]
+RgbaColor = tuple[float, float, float, float]
NodeFunction = Callable[
- [p3d.core.NodePath, p3d.core.NodePath], None
+ [p3d.core.NodePath, p3d.core.NodePath],
+ None,
] # (root_node_path, object_node_path)
-Resolution = Tuple[int, int]
+Resolution = tuple[int, int]
-TCCGL = Transform(np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=float))
+TCCGL = Transform(
+ np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=float),
+)
@dataclass
class CameraRenderingData:
- """
- rgb: (h, w, 3) uint8
+ """rgb: (h, w, 3) uint8
normals: (h, w, 3) uint8
depth: (h, w, 1) float32
- binary_mask: (h, w, 1) np.bool_
+ binary_mask: (h, w, 1) np.bool_.
"""
rgb: np.ndarray
@@ -58,7 +59,7 @@ class CameraRenderingData:
@dataclass
class Panda3dCameraData:
K: np.ndarray
- resolution: Tuple[int, int]
+ resolution: tuple[int, int]
TWC: Transform = Transform((0.0, 0.0, 0.0, 1.0), (0.0, 0.0, 0.0))
z_near: float = 0.1
z_far: float = 10
@@ -92,7 +93,7 @@ def set_lens_parameters(self, lens: p3d.core.Lens) -> p3d.core.Lens:
[0, 0, A, 1],
[0, fy, 0, 0],
[0, 0, B, 0],
- ]
+ ],
)
lens.setFilmSize(w, h)
@@ -106,7 +107,7 @@ class Panda3dLightData:
"""Data used to to define a light in a panda3d scene.
light_type: ambient, point, or directional
NOTE: Alpha is largely irrelevant
- https://docs.panda3d.org/1.10/python/programming/render-attributes/lighting#colored-lights
+ https://docs.panda3d.org/1.10/python/programming/render-attributes/lighting#colored-lights.
"""
light_type: str
@@ -185,7 +186,9 @@ def create(
depth_texture = p3d.core.Texture()
depth_texture.setFormat(p3d.core.Texture.FDepthComponent)
graphics_buffer.addRenderTexture(
- depth_texture, p3d.core.GraphicsOutput.RTMCopyRam, p3d.core.GraphicsOutput.RTPDepth
+ depth_texture,
+ p3d.core.GraphicsOutput.RTMCopyRam,
+ p3d.core.GraphicsOutput.RTPDepth,
)
cam_node = p3d.core.Camera(f"Camera [{name}]")
@@ -210,9 +213,10 @@ def create(
)
def get_rgb_image(self) -> np.ndarray:
- """_summary_
+ """_summary_.
- Returns:
+ Returns
+ -------
np.ndarray: (h, w, 3) uint8 array
"""
# TODO : Extract data from the rgb texture ?
@@ -234,7 +238,8 @@ def _get_depth_buffer(self) -> np.ndarray:
https://developer.nvidia.com/content/depth-precision-visualized#:~:text=GPU%20hardware%20depth%20buffers%20don,reciprocal%20of%20world%2Dspace%20depth.
- Returns:
+ Returns
+ -------
depth_buffer: [H,W,1] numpy array with values in [0,1]
"""
diff --git a/happypose/toolbox/renderer/utils.py b/happypose/toolbox/renderer/utils.py
index 204c9785..99cc846f 100644
--- a/happypose/toolbox/renderer/utils.py
+++ b/happypose/toolbox/renderer/utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,21 +14,24 @@
"""
-
# Standard Library
-from typing import List
# Third Party
import numpy as np
import numpy.typing as npt
import panda3d as p3d
+from panda3d.core import AntialiasAttrib, GeomNode, Material, NodePath
# MegaPose
from happypose.toolbox.lib3d.transform import Transform
from happypose.toolbox.renderer.geometry import make_axes, make_box, make_sphere
+
def compute_view_mat(TWC):
- TCCGL = np.array([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], dtype=float)
+ TCCGL = np.array(
+ [[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]],
+ dtype=float,
+ )
TCCGL = Transform(TCCGL)
TWC = Transform(TWC)
TWCGL = TWC * TCCGL
@@ -37,12 +39,16 @@ def compute_view_mat(TWC):
view_mat = p3d.core.LMatrix4f(*view_mat.transpose().flatten().tolist())
return view_mat
+
def np_to_lmatrix4(np_array: npt.NDArray) -> p3d.core.LMatrix4f:
return p3d.core.LMatrix4f(*np_array.transpose().flatten().tolist())
def depth_image_from_depth_buffer(
- depth_buffer: npt.NDArray[np.float32], z_near: float, z_far: float, eps: float = 0.001
+ depth_buffer: npt.NDArray[np.float32],
+ z_near: float,
+ z_far: float,
+ eps: float = 0.001,
) -> npt.NDArray[np.float32]:
"""Convert depth image to depth buffer.
@@ -57,7 +63,13 @@ def depth_image_from_depth_buffer(
def make_rgb_texture_normal_map(size: int = 32) -> p3d.core.Texture:
tex = p3d.core.Texture()
- tex.setup3dTexture(size, size, size, p3d.core.Texture.T_unsigned_byte, p3d.core.Texture.F_rgb8)
+ tex.setup3dTexture(
+ size,
+ size,
+ size,
+ p3d.core.Texture.T_unsigned_byte,
+ p3d.core.Texture.F_rgb8,
+ )
im = np.ones((size, size, size, 3), dtype=np.uint8) * 255
for x in range(size):
for y in range(size):
@@ -79,7 +91,7 @@ def make_cube_node(scale, color=(1, 0, 0, 1)):
node.set_color(color)
node.set_render_mode_thickness(4)
node.set_antialias(AntialiasAttrib.MLine)
- node.set_material(Material(), 1)
+ node.setMaterial(Material(), 1)
return node
@@ -118,7 +130,7 @@ def show_node_center(node, radius=None):
else:
radius = bounds.get_radius()
sphere_node.set_scale(radius * 0.1)
- set_material(sphere_node, (1, 1, 1, 1))
+ sphere_node.set_material(sphere_node, (1, 1, 1, 1))
sphere_node.reparentTo(node)
sphere_node.setPos(0, 0, 0)
diff --git a/happypose/toolbox/utils/conversion.py b/happypose/toolbox/utils/conversion.py
index ad3b7eaf..0bcba699 100644
--- a/happypose/toolbox/utils/conversion.py
+++ b/happypose/toolbox/utils/conversion.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +15,6 @@
# Standard Library
-from typing import List, Tuple
# MegaPose
from happypose.toolbox.datasets.scene_dataset import CameraData, ObjectData
@@ -24,9 +22,9 @@
def convert_scene_observation_to_panda3d(
- camera_data: CameraData, object_datas: List[ObjectData]
-) -> Tuple[Panda3dCameraData, List[Panda3dObjectData]]:
-
+ camera_data: CameraData,
+ object_datas: list[ObjectData],
+) -> tuple[Panda3dCameraData, list[Panda3dObjectData]]:
assert camera_data.TWC is not None
assert camera_data.K is not None
assert camera_data.resolution is not None
@@ -44,6 +42,6 @@ def convert_scene_observation_to_panda3d(
Panda3dObjectData(
label=object_data.label,
TWO=object_data.TWO,
- )
+ ),
)
return panda3d_camera_data, panda3d_object_datas
diff --git a/happypose/toolbox/utils/distributed.py b/happypose/toolbox/utils/distributed.py
index 72750675..dd8f7288 100644
--- a/happypose/toolbox/utils/distributed.py
+++ b/happypose/toolbox/utils/distributed.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,19 +14,19 @@
"""
-
# Standard Library
import datetime
import os
import sys
from pathlib import Path
-from typing import Any, Dict, List
+from typing import Any
+
+import omegaconf
# Third Party
import torch
import torch.distributed as dist
from omegaconf import OmegaConf
-import omegaconf
# MegaPose
from happypose.toolbox.utils.logging import get_logger
@@ -46,7 +45,8 @@ def get_tmp_dir() -> Path:
def sync_config(
- cfg: omegaconf.dictconfig.DictConfig, local_fields: List[str] = []
+ cfg: omegaconf.dictconfig.DictConfig,
+ local_fields: list[str] = [],
) -> omegaconf.dictconfig.DictConfig:
cfg_path = get_tmp_dir() / "config.yaml"
if get_rank() == 0:
@@ -104,11 +104,10 @@ def get_world_size() -> int:
def reduce_dict(
- input_dict: Dict[str, Any],
- average: bool = True
-) -> Dict[str, Any]:
- """
- https://github.com/pytorch/vision/blob/master/references/detection/utils.py
+ input_dict: dict[str, Any],
+ average: bool = True,
+) -> dict[str, Any]:
+ """https://github.com/pytorch/vision/blob/master/references/detection/utils.py
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
diff --git a/happypose/toolbox/utils/download.py b/happypose/toolbox/utils/download.py
old mode 100644
new mode 100755
index bd0e1848..4d5ae21c
--- a/happypose/toolbox/utils/download.py
+++ b/happypose/toolbox/utils/download.py
@@ -1,58 +1,55 @@
+#!/usr/bin/env python
import argparse
+import asyncio
import logging
import os
-import subprocess
+import re
import zipfile
from pathlib import Path
-import wget
+import httpx
+from bs4 import BeautifulSoup
+from tqdm import tqdm
from happypose.pose_estimators.cosypose.cosypose.config import (
BOP_DS_DIR,
LOCAL_DATA_DIR,
- PROJECT_DIR,
)
from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger
logger = get_logger(__name__)
-RCLONE_CFG_PATH = (PROJECT_DIR / 'rclone.conf')
-RCLONE_ROOT = 'happypose:'
-DOWNLOAD_DIR = LOCAL_DATA_DIR / 'downloads'
+DOWNLOAD_URL = "https://www.paris.inria.fr/archive_ylabbeprojectsdata"
+LAAS_DOWNLOAD_URL = "https://gepettoweb.laas.fr/data/happypose/"
+
+DOWNLOAD_DIR = LOCAL_DATA_DIR / "downloads"
DOWNLOAD_DIR.mkdir(exist_ok=True)
-BOP_SRC = 'https://bop.felk.cvut.cz/media/data/bop_datasets/'
+BOP_SRC = "https://bop.felk.cvut.cz/media/data/bop_datasets/"
BOP_DATASETS = {
- 'ycbv': {
- 'splits': ['train_real', 'train_synt', 'test_all']
+ "ycbv": {
+ "splits": ["train_real", "train_synt", "test_all"],
},
-
- 'tless': {
- 'splits': ['test_primesense_all', 'train_primesense'],
+ "tless": {
+ "splits": ["test_primesense_all", "train_primesense"],
},
-
- 'hb': {
- 'splits': ['test_primesense_all', 'val_primesense'],
+ "hb": {
+ "splits": ["test_primesense_all", "val_primesense"],
},
-
- 'icbin': {
- 'splits': ['test_all'],
+ "icbin": {
+ "splits": ["test_all"],
},
-
- 'itodd': {
- 'splits': ['val', 'test_all'],
+ "itodd": {
+ "splits": ["val", "test_all"],
},
-
- 'lm': {
- 'splits': ['test_all'],
+ "lm": {
+ "splits": ["test_all"],
},
-
- 'lmo': {
- 'splits': ['test_all'],
- 'has_pbr': False,
+ "lmo": {
+ "splits": ["test_all"],
+ "has_pbr": False,
},
-
- 'tudl': {
- 'splits': ['test_all', 'train_real']
+ "tudl": {
+ "splits": ["test_all", "train_real"],
},
}
@@ -60,94 +57,185 @@
def main():
- parser = argparse.ArgumentParser('CosyPose download utility')
- parser.add_argument('--bop_dataset', default='', type=str, choices=BOP_DS_NAMES)
- parser.add_argument('--bop_src', default='bop', type=str, choices=['bop', 'gdrive'])
- parser.add_argument('--bop_extra_files', default='', type=str, choices=['ycbv', 'tless'])
- parser.add_argument('--cosypose_models', default='', type=str)
+ parser = argparse.ArgumentParser("CosyPose download utility")
+ parser.add_argument("--bop_dataset", nargs="*", choices=BOP_DS_NAMES)
+ parser.add_argument("--bop_extra_files", nargs="*", choices=["ycbv", "tless"])
+ parser.add_argument("--cosypose_models", nargs="*")
parser.add_argument("--megapose_models", action="store_true")
- parser.add_argument('--urdf_models', default='', type=str)
- parser.add_argument('--ycbv_compat_models', action='store_true')
- parser.add_argument('--texture_dataset', action='store_true')
- parser.add_argument('--result_id', default='', type=str)
- parser.add_argument('--bop_result_id', default='', type=str)
- parser.add_argument('--synt_dataset', default='', type=str)
- parser.add_argument('--detections', default='', type=str)
- parser.add_argument('--example_scenario', action='store_true')
- parser.add_argument('--pbr_training_images', action='store_true')
- parser.add_argument('--all_bop20_results', action='store_true')
- parser.add_argument('--all_bop20_models', action='store_true')
-
- parser.add_argument('--debug', action='store_true')
+ parser.add_argument("--urdf_models", nargs="*")
+ parser.add_argument("--ycbv_compat_models", action="store_true")
+ parser.add_argument("--texture_dataset", action="store_true")
+ parser.add_argument("--result_id", nargs="*")
+ parser.add_argument("--bop_result_id", nargs="*")
+ parser.add_argument("--synt_dataset", nargs="*")
+ parser.add_argument("--detections", nargs="*")
+ parser.add_argument("--examples", nargs="*")
+ parser.add_argument("--example_scenario", action="store_true")
+ parser.add_argument("--pbr_training_images", action="store_true")
+ parser.add_argument("--all_bop20_results", action="store_true")
+ parser.add_argument("--all_bop20_models", action="store_true")
+
+ to_dl = []
+ to_symlink = []
+ to_unzip = []
+
+ parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
if args.debug:
logger.setLevel(logging.DEBUG)
if args.bop_dataset:
- if args.bop_src == 'bop':
- download_bop_original(args.bop_dataset, args.pbr_training_images and BOP_DATASETS[args.bop_dataset].get('has_pbr', True))
- elif args.bop_src == 'gdrive':
- download_bop_gdrive(args.bop_dataset)
+ for dataset in args.bop_dataset:
+ to_dl.append((BOP_SRC + f"{dataset}_base.zip", BOP_DS_DIR / dataset))
+ download_pbr = args.pbr_training_images and BOP_DATASETS[dataset].get(
+ "has_pbr", True
+ )
+ suffixes = ["models"] + BOP_DATASETS[dataset]["splits"]
+ if download_pbr:
+ suffixes += ["train_pbr"]
+ for suffix in suffixes:
+ to_dl.append(
+ (
+ BOP_SRC + f"{dataset}_{suffix}.zip",
+ BOP_DS_DIR / dataset,
+ )
+ )
if args.bop_extra_files:
- if args.bop_extra_files == 'tless':
- # https://github.com/kirumang/Pix2Pose#download-pre-trained-weights
- download(f'cosypose/bop_datasets/tless/all_target_tless.json', BOP_DS_DIR / 'tless')
- os.symlink(BOP_DS_DIR / 'tless/models_eval', BOP_DS_DIR / 'tless/models')
- elif args.bop_extra_files == 'ycbv':
- # Friendly names used with YCB-Video
- download(f'cosypose/bop_datasets/ycbv/ycbv_friendly_names.txt', BOP_DS_DIR / 'ycbv')
- # Offsets between YCB-Video and BOP (extracted from BOP readme)
- download(f'cosypose/bop_datasets/ycbv/offsets.txt', BOP_DS_DIR / 'ycbv')
- # Evaluation models for YCB-Video (used by other works)
- download(f'cosypose/bop_datasets/ycbv/models_original', BOP_DS_DIR / 'ycbv')
- # Keyframe definition
- download(f'cosypose/bop_datasets/ycbv/keyframe.txt', BOP_DS_DIR / 'ycbv')
+ for extra in args.bop_extra_files:
+ if extra == "tless":
+ # https://github.com/kirumang/Pix2Pose#download-pre-trained-weights
+ to_dl.append(
+ (
+ f"{DOWNLOAD_URL}/cosypose/bop_datasets/tless/all_target_tless.json",
+ BOP_DS_DIR / "tless",
+ )
+ )
+ to_symlink.append(
+ (BOP_DS_DIR / "tless/models_eval", BOP_DS_DIR / "tless/models")
+ )
+ elif extra == "ycbv":
+ # Friendly names used with YCB-Video
+ to_dl += [
+ (
+ f"{DOWNLOAD_URL}/cosypose/bop_datasets/ycbv/ycbv_friendly_names.txt",
+ BOP_DS_DIR / "ycbv",
+ ),
+ # Offsets between YCB-Video and BOP (extracted from BOP readme)
+ (
+ f"{DOWNLOAD_URL}/cosypose/bop_datasets/ycbv/offsets.txt",
+ BOP_DS_DIR / "ycbv",
+ ),
+ # Evaluation models for YCB-Video (used by other works)
+ (
+ f"{DOWNLOAD_URL}/cosypose/bop_datasets/ycbv/models_original",
+ BOP_DS_DIR / "ycbv",
+ ),
+ # Keyframe definition
+ (
+ f"{DOWNLOAD_URL}/cosypose/bop_datasets/ycbv/keyframe.txt",
+ BOP_DS_DIR / "ycbv",
+ ),
+ ]
if args.urdf_models:
- download(f'cosypose/urdfs/{args.urdf_models}', LOCAL_DATA_DIR / 'urdfs')
+ for model in args.urdf_models:
+ to_dl.append(
+ (
+ f"{DOWNLOAD_URL}/cosypose/urdfs/{model}",
+ LOCAL_DATA_DIR / "urdfs",
+ )
+ )
if args.ycbv_compat_models:
- download(f'cosypose/bop_datasets/ycbv/models_bop-compat', BOP_DS_DIR / 'ycbv')
- download(f'cosypose/bop_datasets/ycbv/models_bop-compat_eval', BOP_DS_DIR / 'ycbv')
+ to_dl += [
+ (
+ f"{DOWNLOAD_URL}/cosypose/bop_datasets/ycbv/models_bop-compat",
+ BOP_DS_DIR / "ycbv",
+ ),
+ (
+ f"{DOWNLOAD_URL}/cosypose/bop_datasets/ycbv/models_bop-compat_eval",
+ BOP_DS_DIR / "ycbv",
+ ),
+ ]
if args.cosypose_models:
- download(f'cosypose/experiments/{args.cosypose_models}', LOCAL_DATA_DIR / 'experiments')
-
+ for model in args.cosypose_models:
+ to_dl.append(
+ (
+ f"{DOWNLOAD_URL}/cosypose/experiments/{model}",
+ LOCAL_DATA_DIR / "experiments",
+ )
+ )
+
if args.megapose_models:
# rclone copyto inria_data:megapose-models/ megapose-models/
# --exclude="**epoch**" --config $MEGAPOSE_DIR/rclone.conf -P
- download(
- f"megapose/megapose-models/",
- LOCAL_DATA_DIR / "megapose-models/",
- flags=["--exclude", "*epoch*"],
+ to_dl.append(
+ (
+ f"{DOWNLOAD_URL}/megapose/megapose-models/",
+ LOCAL_DATA_DIR / "megapose-models/",
+ ["--exclude", ".*epoch.*"],
+ )
)
if args.detections:
- download(f'cosypose/saved_detections/{args.detections}.pkl', LOCAL_DATA_DIR / 'saved_detections')
+ for detection in args.detections:
+ to_dl.append(
+ (
+ f"{DOWNLOAD_URL}/cosypose/saved_detections/{detection}.pkl",
+ LOCAL_DATA_DIR / "saved_detections",
+ )
+ )
if args.result_id:
- download(f'cosypose/results/{args.result_id}', LOCAL_DATA_DIR / 'results')
+ for result in args.result_id:
+ to_dl.append(
+ (
+ f"{DOWNLOAD_URL}/cosypose/results/{result}",
+ LOCAL_DATA_DIR / "results",
+ )
+ )
if args.bop_result_id:
- csv_name = args.bop_result_id + '.csv'
- download(f'cosypose/bop_predictions/{csv_name}', LOCAL_DATA_DIR / 'bop_predictions')
- download(f'cosypose/bop_eval_outputs/{args.bop_result_id}', LOCAL_DATA_DIR / 'bop_predictions')
+ for result in args.bop_result_id:
+ to_dl += [
+ (
+ f"{DOWNLOAD_URL}/cosypose/bop_predictions/{result}.csv",
+ LOCAL_DATA_DIR / "bop_predictions",
+ ),
+ (
+ f"{DOWNLOAD_URL}/cosypose/bop_eval_outputs/{result}",
+ LOCAL_DATA_DIR / "bop_predictions",
+ ),
+ ]
if args.texture_dataset:
- download('cosypose/zip_files/textures.zip', DOWNLOAD_DIR)
- logger.info('Extracting textures ...')
- zipfile.ZipFile(DOWNLOAD_DIR / 'textures.zip').extractall(LOCAL_DATA_DIR / 'texture_datasets')
+ to_dl.append((f"{DOWNLOAD_URL}/cosypose/zip_files/textures.zip", DOWNLOAD_DIR))
+ to_unzip.append(
+ (DOWNLOAD_DIR / "textures.zip", LOCAL_DATA_DIR / "texture_datasets")
+ )
if args.synt_dataset:
- zip_name = f'{args.synt_dataset}.zip'
- download(f'cosypose/zip_files/{zip_name}', DOWNLOAD_DIR)
- logger.info('Extracting textures ...')
- zipfile.ZipFile(DOWNLOAD_DIR / zip_name).extractall(LOCAL_DATA_DIR / 'synt_datasets')
+ for dataset in args.synt_dataset:
+ to_dl.append(
+ (f"{DOWNLOAD_URL}/cosypose/zip_files/{dataset}.zip", DOWNLOAD_DIR)
+ )
+ to_unzip.append(
+ (DOWNLOAD_DIR / f"{dataset}.zip", LOCAL_DATA_DIR / "synt_datasets")
+ )
if args.example_scenario:
- download(f'cosypose/custom_scenarios/example/candidates.csv', LOCAL_DATA_DIR / 'custom_scenarios/example')
- download(f'cosypose/custom_scenarios/example/scene_camera.json', LOCAL_DATA_DIR / 'custom_scenarios/example')
+ to_dl += [
+ (
+ f"{DOWNLOAD_URL}/cosypose/custom_scenarios/example/candidates.csv",
+ LOCAL_DATA_DIR / "custom_scenarios/example",
+ ),
+ (
+ f"{DOWNLOAD_URL}/cosypose/custom_scenarios/example/scene_camera.json",
+ LOCAL_DATA_DIR / "custom_scenarios/example",
+ ),
+ ]
if args.all_bop20_models:
from happypose.pose_estimators.cosypose.cosypose.bop_config import (
@@ -158,10 +246,22 @@ def main():
SYNT_REAL_DETECTORS,
SYNT_REAL_REFINER,
)
- for model_dict in (PBR_DETECTORS, PBR_COARSE, PBR_REFINER,
- SYNT_REAL_DETECTORS, SYNT_REAL_COARSE, SYNT_REAL_REFINER):
+
+ for model_dict in (
+ PBR_DETECTORS,
+ PBR_COARSE,
+ PBR_REFINER,
+ SYNT_REAL_DETECTORS,
+ SYNT_REAL_COARSE,
+ SYNT_REAL_REFINER,
+ ):
for model in model_dict.values():
- download(f'cosypose/experiments/{model}', LOCAL_DATA_DIR / 'experiments')
+ to_dl.append(
+ (
+ f"{DOWNLOAD_URL}/cosypose/experiments/{model}",
+ LOCAL_DATA_DIR / "experiments",
+ )
+ )
if args.all_bop20_results:
from happypose.pose_estimators.cosypose.cosypose.bop_config import (
@@ -171,57 +271,172 @@ def main():
SYNT_REAL_ICP_INFERENCE_ID,
SYNT_REAL_INFERENCE_ID,
)
- for result_id in (PBR_INFERENCE_ID, SYNT_REAL_INFERENCE_ID, SYNT_REAL_ICP_INFERENCE_ID,
- SYNT_REAL_4VIEWS_INFERENCE_ID, SYNT_REAL_8VIEWS_INFERENCE_ID):
- download(f'cosypose/results/{result_id}', LOCAL_DATA_DIR / 'results')
-
-def run_rclone(cmd, args, flags):
- rclone_cmd = ['rclone', cmd] + args + flags + ['--config', str(RCLONE_CFG_PATH)]
- logger.debug(' '.join(rclone_cmd))
- print(rclone_cmd)
- subprocess.run(rclone_cmd)
-
-
-def download(download_path, local_path, flags=[]):
- download_path = Path(download_path)
- if download_path.name != local_path.name:
- local_path = local_path / download_path.name
- if '.' in str(download_path):
- rclone_path = RCLONE_ROOT + str(download_path)
- else:
- rclone_path = RCLONE_ROOT + str(download_path) + "/"
- local_path = str(local_path)
- logger.info(f"Copying {rclone_path} to {local_path}")
- run_rclone("copyto", [rclone_path, local_path], flags=flags + ["-P"])
-
-
-def download_bop_original(ds_name, download_pbr):
- filename = f'{ds_name}_base.zip'
- wget_download_and_extract(BOP_SRC + filename, BOP_DS_DIR)
-
- suffixes = ['models'] + BOP_DATASETS[ds_name]['splits']
- if download_pbr:
- suffixes += ['train_pbr']
- for suffix in suffixes:
- wget_download_and_extract(BOP_SRC + f'{ds_name}_{suffix}.zip', BOP_DS_DIR / ds_name)
-
-
-def download_bop_gdrive(ds_name):
- download(f'bop_datasets/{ds_name}', BOP_DS_DIR / ds_name)
-
-
-def wget_download_and_extract(url, out):
- tmp_path = DOWNLOAD_DIR / url.split('/')[-1]
- if tmp_path.exists():
- logger.info(f'{url} already downloaded: {tmp_path}...')
- else:
- logger.info(f'Download {url} at {tmp_path}...')
- wget.download(url, out=tmp_path.as_posix())
- logger.info(f'Extracting {tmp_path} at {out}.')
- zipfile.ZipFile(tmp_path).extractall(out)
-
-
-if __name__ == '__main__':
+ for result_id in (
+ PBR_INFERENCE_ID,
+ SYNT_REAL_INFERENCE_ID,
+ SYNT_REAL_ICP_INFERENCE_ID,
+ SYNT_REAL_4VIEWS_INFERENCE_ID,
+ SYNT_REAL_8VIEWS_INFERENCE_ID,
+ ):
+ to_dl.append(
+ (
+ f"{DOWNLOAD_URL}/cosypose/results/{result_id}",
+ LOCAL_DATA_DIR / "results",
+ )
+ )
+ if args.examples:
+ for example in args.examples:
+ to_dl.append(
+ (
+ f"{LAAS_DOWNLOAD_URL}/examples/{example}",
+ LOCAL_DATA_DIR / "examples",
+ )
+ )
+
+ # logger.info(f"{to_dl=}")
+ asyncio.run(adownloads(*to_dl))
+
+ for src, dst in to_symlink:
+ os.symlink(src, dst)
+
+ for src, dst in to_unzip:
+ zipfile.ZipFile(src).extractall(dst)
+
+
+async def adownloads(*args):
+ async with DownloadClient() as dl_client:
+ for arg in args:
+ dl_client.create_task(dl_client.adownload(*arg))
+
+
+class DownloadClient:
+ def __init__(self):
+ self.client = httpx.AsyncClient()
+ self.task_set = set()
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, *args):
+ await self.aclose()
+
+ async def aclose(self):
+ while len(self.task_set) > 0:
+ # for task in list(self.task_set):
+ # await task
+ await asyncio.gather(*list(self.task_set))
+ await self.client.aclose()
+
+ def create_task(self, awaitable):
+ task = asyncio.create_task(awaitable)
+ self.task_set.add(task)
+ task.add_done_callback(self.task_set.discard)
+
+ async def adownload(self, download_path, local_path, flags=None):
+ if flags is None:
+ flags = []
+ flags = Flags(flags)
+
+ Path_download = Path(download_path)
+ if Path_download.name != local_path.name:
+ local_path = local_path / Path_download.name
+
+ if not flags.flags_managing(Path_download.name): # if the dl_path is --excluded
+ return
+
+ if (
+ not download_path.endswith("/")
+ and not httpx.head(download_path).is_redirect
+ ): # file
+ await self.download_file(download_path, local_path)
+ else:
+ if not download_path.endswith("/"):
+ download_path += "/"
+ await self.download_dir(download_path, local_path, flags)
+
+ async def download_dir(self, download_path, local_path, flags):
+ try:
+ r = await self.client.get(download_path)
+ except httpx.PoolTimeout:
+ logger.error(f"Failed {download_path} with timeout")
+ return
+ if r.status_code != 200:
+ logger.error(f"Failed {download_path} with code {r.status_code}")
+ return
+ Path(local_path).mkdir(parents=True, exist_ok=True)
+ soup = BeautifulSoup(r.content, "html.parser")
+ logger.info(f"Copying {download_path} to {local_path}")
+
+ for link in soup.find_all("a")[5:]:
+ href: str = link.get("href")
+ if not flags.flags_managing(href):
+ continue
+ if href.endswith("/"):
+ self.create_task(
+ self.download_dir(download_path + href, local_path / href, flags),
+ )
+ else:
+ self.create_task(
+ self.download_file(download_path + href, local_path / href),
+ )
+
+ async def download_file(self, download_path, local_path):
+ local_path = Path(local_path)
+ if local_path.exists():
+ # logger.info(f"Existing {download_path=}")
+ local_size = local_path.stat().st_size
+ head = await self.client.head(download_path)
+ if "content-length" in head.headers:
+ if local_size == int(head.headers["content-length"]):
+ logger.info(f"Skipping {download_path} already fully downloaded")
+ return
+ else:
+ logger.info(f"Retrying incomplete {download_path}")
+ logger.info(f"Copying {download_path} to {local_path}")
+ local_path.parent.mkdir(parents=True, exist_ok=True)
+ with local_path.open("wb") as f:
+ async with self.client.stream("GET", download_path) as r:
+ total = None
+ if "Content-Length" in r.headers:
+ total = int(r.headers["Content-Length"])
+ with tqdm(
+ desc=local_path.name,
+ total=total,
+ unit_scale=True,
+ unit_divisor=1024,
+ unit="B",
+ ) as progress:
+ num_bytes_downloaded = r.num_bytes_downloaded
+ async for chunk in r.aiter_bytes():
+ f.write(chunk)
+ progress.update(r.num_bytes_downloaded - num_bytes_downloaded)
+ num_bytes_downloaded = r.num_bytes_downloaded
+ if r.status_code != 200:
+ logger.error(f"Failed {download_path} with code {r.status_code}")
+ return
+
+
+class Flags:
+ def __init__(self, flags: [str]):
+ # only '--exclude' were used before so this is the only flag currently usable
+ # if you need to use other flags, feel free to implement them here
+ self.exclude_set: set[str] = set()
+
+ parser = argparse.ArgumentParser("Flags parsing")
+ parser.add_argument("--exclude", default="", type=str)
+ args = parser.parse_args(flags)
+
+ if args.exclude:
+ self.exclude_set.add(args.exclude)
+
+ def flags_managing(flags, href):
+ for el in flags.exclude_set:
+ if re.fullmatch(el, href):
+ return False
+ return True
+
+
+if __name__ == "__main__":
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
main()
diff --git a/happypose/toolbox/utils/load_model.py b/happypose/toolbox/utils/load_model.py
index a2cbf742..61f074fa 100644
--- a/happypose/toolbox/utils/load_model.py
+++ b/happypose/toolbox/utils/load_model.py
@@ -1,8 +1,8 @@
# MegaPose
from happypose.pose_estimators.megapose.config import LOCAL_DATA_DIR
-from happypose.toolbox.datasets.object_dataset import RigidObjectDataset
from happypose.pose_estimators.megapose.inference.icp_refiner import ICPRefiner
from happypose.pose_estimators.megapose.inference.pose_estimator import PoseEstimator
+from happypose.toolbox.datasets.object_dataset import RigidObjectDataset
from happypose.toolbox.inference.utils import load_pose_models
NAMED_MODELS = {
@@ -53,7 +53,6 @@ def load_named_model(
n_workers: int = 4,
bsz_images: int = 128,
) -> PoseEstimator:
-
model = NAMED_MODELS[model_name]
renderer_kwargs = {
diff --git a/happypose/toolbox/utils/logging.py b/happypose/toolbox/utils/logging.py
index 069ad638..5a54888f 100644
--- a/happypose/toolbox/utils/logging.py
+++ b/happypose/toolbox/utils/logging.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,14 +14,10 @@
"""
-
# Standard Library
-import contextlib
import logging
import time
from datetime import timedelta
-from io import StringIO
-from typing import Optional
class ElapsedFormatter:
@@ -32,7 +27,7 @@ def __init__(self):
def format(self, record):
elapsed_seconds = record.created - self.start_time
elapsed = timedelta(seconds=elapsed_seconds)
- return "{} - {}".format(elapsed, record.getMessage())
+ return f"{elapsed} - {record.getMessage()}"
def get_logger(name: str):
diff --git a/happypose/toolbox/utils/logs_bokeh.py b/happypose/toolbox/utils/logs_bokeh.py
index 60dca2c3..03d1facf 100644
--- a/happypose/toolbox/utils/logs_bokeh.py
+++ b/happypose/toolbox/utils/logs_bokeh.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
import datetime
import textwrap
@@ -27,6 +25,7 @@
import numpy as np
import pandas as pd
import seaborn as sns
+import yaml
from bokeh.io import output_notebook, show
from bokeh.layouts import gridplot
from bokeh.models import HoverTool
@@ -43,7 +42,7 @@
class Plotter:
def __init__(
self,
- log_dir: Path
+ log_dir: Path,
):
self.fill_config_fn = check_update_config_pose
self.log_dir = Path(log_dir)
@@ -69,7 +68,7 @@ def load_logs(self, run_ids):
cfg_path = run_dir / "config.yaml"
try:
config = OmegaConf.load(cfg_path)
- except:
+ except Exception:
config = yaml.load(cfg_path.read_text(), Loader=yaml.UnsafeLoader)
config = vars(config)
configs[run_id] = self.fill_config_fn(config)
@@ -77,16 +76,19 @@ def load_logs(self, run_ids):
log_path = run_dir / "log.txt"
if log_path.exists():
log_df = pd.read_json(run_dir / "log.txt", lines=True)
- last_write = datetime.datetime.fromtimestamp((run_dir / "log.txt").stat().st_mtime)
+ last_write = datetime.datetime.fromtimestamp(
+ (run_dir / "log.txt").stat().st_mtime,
+ )
else:
log_df = None
last_write = datetime.datetime.now()
configs[run_id]["delta_t"] = (
- f"{(datetime.datetime.now() - last_write).seconds / 60:.1f}" + f"({len(log_df)})"
+ f"{(datetime.datetime.now() - last_write).seconds / 60:.1f}"
+ + f"({len(log_df)})"
)
log_dicts[run_id] = log_df
- ds_eval = dict()
+ ds_eval = {}
for f in run_dir.iterdir():
if "errors_" in f.name:
ds = f.with_suffix("").name.split("errors_")[1]
@@ -220,7 +222,13 @@ def plot_eval_field(
name = f"{run_num}/{dataset}"
name = "\n ".join(textwrap.wrap(name, width=20))
if len(x) == 1:
- f.circle(x, y, color=color, line_dash=dash_pattern, name=name)
+ f.circle(
+ x,
+ y,
+ color=color,
+ line_dash=dash_pattern,
+ name=name,
+ )
x = np.concatenate(([0], x))
y = np.concatenate((y, y))
f.line(
@@ -333,12 +341,13 @@ def show_configs(self, ignore=None, diff=True):
config_df = df.copy()
self.config_df = config_df
- name2color = {k: v for k, v in zip(self.run_ids, self.colors_uint8)}
+ name2color = dict(zip(self.run_ids, self.colors_uint8))
def f_row(data):
rgb = (np.array(name2color[data.name]) * 255).astype(np.uint8)
return [
- f"background-color: rgba({rgb[0]},{rgb[1]},{rgb[2]},1.0)" for _ in range(len(data))
+ f"background-color: rgba({rgb[0]},{rgb[1]},{rgb[2]},1.0)"
+ for _ in range(len(data))
]
if "possible_roots" in df.columns:
diff --git a/happypose/toolbox/utils/models_compat.py b/happypose/toolbox/utils/models_compat.py
index b0c39b91..aa9207b8 100644
--- a/happypose/toolbox/utils/models_compat.py
+++ b/happypose/toolbox/utils/models_compat.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,9 +13,10 @@
limitations under the License.
"""
+
def change_keys_of_older_models(state_dict):
- new_state_dict = dict()
- for k, v in state_dict.items():
+ new_state_dict = {}
+ for k, _v in state_dict.items():
if k.startswith("backbone.backbone"):
new_k = "backbone." + k[len("backbone.backbone.") :]
elif k.startswith("backbone.head.0."):
diff --git a/happypose/toolbox/utils/omegaconf.py b/happypose/toolbox/utils/omegaconf.py
index 016d8bfe..e1f75266 100644
--- a/happypose/toolbox/utils/omegaconf.py
+++ b/happypose/toolbox/utils/omegaconf.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Third Party
from omegaconf import OmegaConf
diff --git a/happypose/toolbox/utils/random.py b/happypose/toolbox/utils/random.py
index 831ddab1..6424c323 100644
--- a/happypose/toolbox/utils/random.py
+++ b/happypose/toolbox/utils/random.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,22 +14,22 @@
"""
-
# Standard Library
import contextlib
import os
-import time
import random
-import webdataset as wds
+import time
-# Third Party
-import torch
import numpy as np
import pinocchio as pin
+# Third Party
+import torch
+import webdataset as wds
+
def make_seed(*args):
- """Copied from webdataset"""
+ """Copied from webdataset."""
seed = 0
for arg in args:
seed = (seed * 31 + hash(arg)) & 0x7FFFFFFF
diff --git a/happypose/toolbox/utils/resources.py b/happypose/toolbox/utils/resources.py
index 17d6c7a5..9fa1526a 100644
--- a/happypose/toolbox/utils/resources.py
+++ b/happypose/toolbox/utils/resources.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
import os
import subprocess
@@ -52,12 +50,13 @@ def get_cuda_memory():
def get_gpu_memory():
-
devices = os.environ.get(
"CUDA_VISIBLE_DEVICES",
).split(",")
assert len(devices) == 1
- out = subprocess.check_output(["nvidia-smi", "--id=" + str(devices[0]), "-q", "--xml-format"])
+ out = subprocess.check_output(
+ ["nvidia-smi", "--id=" + str(devices[0]), "-q", "--xml-format"],
+ )
tree = ET.fromstring(out)
gpu = tree.findall("gpu")[0]
memory = float(gpu.find("fb_memory_usage").find("used").text.split(" ")[0]) / 1024
diff --git a/happypose/toolbox/utils/tensor_collection.py b/happypose/toolbox/utils/tensor_collection.py
index f0c35c83..c1bd696f 100644
--- a/happypose/toolbox/utils/tensor_collection.py
+++ b/happypose/toolbox/utils/tensor_collection.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
from pathlib import Path
@@ -26,16 +24,19 @@
# MegaPose
from happypose.toolbox.utils.distributed import get_rank, get_world_size
+
def concatenate(datas):
datas = [data for data in datas if len(data) > 0]
if len(datas) == 0:
return PandasTensorCollection(infos=pd.DataFrame())
classes = [data.__class__ for data in datas]
- assert all([class_n == classes[0] for class_n in classes])
+ assert all(class_n == classes[0] for class_n in classes)
- infos = pd.concat([data.infos for data in datas], axis=0, sort=False).reset_index(drop=True)
+ infos = pd.concat([data.infos for data in datas], axis=0, sort=False).reset_index(
+ drop=True,
+ )
tensor_keys = datas[0].tensors.keys()
- tensors = dict()
+ tensors = {}
for k in tensor_keys:
tensors[k] = torch.cat([getattr(data, k) for data in datas], dim=0)
return PandasTensorCollection(infos=infos, **tensors)
@@ -43,7 +44,7 @@ def concatenate(datas):
class TensorCollection:
def __init__(self, **kwargs):
- self.__dict__["_tensors"] = dict()
+ self.__dict__["_tensors"] = {}
for k, v in kwargs.items():
self.register_tensor(k, v)
@@ -61,8 +62,8 @@ def __repr__(self):
return s
def __getitem__(self, ids):
- tensors = dict()
- for k, v in self._tensors.items():
+ tensors = {}
+ for k, _v in self._tensors.items():
tensors[k] = getattr(self, k)[ids]
return TensorCollection(**tensors)
@@ -91,7 +92,8 @@ def __setstate__(self, state):
def __setattr__(self, name, value):
if "_tensors" not in self.__dict__:
- raise ValueError("Please call __init__")
+ msg = "Please call __init__"
+ raise ValueError(msg)
if name in self._tensors:
self._tensors[name] = value
else:
@@ -118,8 +120,8 @@ def half(self):
return self.to(torch.half)
def clone(self):
- tensors = dict()
- for k, v in self.tensors.items():
+ tensors = {}
+ for k, _v in self.tensors.items():
tensors[k] = getattr(self, k).clone()
return TensorCollection(**tensors)
@@ -128,7 +130,7 @@ class PandasTensorCollection(TensorCollection):
def __init__(self, infos, **tensors):
super().__init__(**tensors)
self.infos = infos.reset_index(drop=True)
- self.meta = dict()
+ self.meta = {}
def register_buffer(self, k, v):
assert len(v) == len(self)
@@ -194,3 +196,35 @@ def __setstate__(self, state):
self.__init__(state["infos"], **state["tensors"])
self.meta = state["meta"]
return
+
+
+def filter_top_pose_estimates(
+ data_TCO: PandasTensorCollection,
+ top_K: int,
+ group_cols: list[str],
+ filter_field: str,
+ ascending: bool = False,
+) -> PandasTensorCollection:
+ """Filter the pose estimates by retaining only the top-K coarse model scores.
+
+ Retain only the top_K estimates corresponding to each hypothesis_id
+
+ Args:
+ top_K: how many estimates to retain
+ group_cols: group of columns among which sorting should be done
+ filter_field: the field to filter estimates by
+ ascending: should filter_field
+ """
+
+ df = data_TCO.infos
+
+ # Logic from https://stackoverflow.com/a/40629420
+ df = (
+ df.sort_values(filter_field, ascending=ascending)
+ .groupby(group_cols)
+ .head(top_K)
+ )
+
+ data_TCO_filtered = data_TCO[df.index.tolist()]
+
+ return data_TCO_filtered
diff --git a/happypose/toolbox/utils/timer.py b/happypose/toolbox/utils/timer.py
index b9845092..cf646d15 100644
--- a/happypose/toolbox/utils/timer.py
+++ b/happypose/toolbox/utils/timer.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Standard Library
import datetime
diff --git a/happypose/toolbox/utils/transform_utils.py b/happypose/toolbox/utils/transform_utils.py
index a89812fc..dc69d309 100644
--- a/happypose/toolbox/utils/transform_utils.py
+++ b/happypose/toolbox/utils/transform_utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -20,18 +19,17 @@
import torch
# MegaPose
-import happypose.pose_estimators.megapose as megapose
from happypose.pose_estimators.megapose.config import PROJECT_DIR
def load_SO3_grid(resolution):
- """
- The data.qua files were generated with the following code
- http://lavalle.pl/software/so3/so3.html
+ """The data.qua files were generated with the following code
+ http://lavalle.pl/software/so3/so3.html.
They are in (x,y,z,w) ordering
- Returns:
+ Returns
+ -------
rotmats: [N,3,3]
"""
megapose_dir = PROJECT_DIR / "happypose" / "pose_estimators" / "megapose"
@@ -43,7 +41,7 @@ def load_SO3_grid(resolution):
with open(data_fname) as fp:
lines = fp.readlines()
for line in lines:
- x, y, z, w = [float(i) for i in line.split()]
+ x, y, z, w = (float(i) for i in line.split())
quats.append([x, y, z, w])
quats = torch.tensor(quats)
@@ -52,14 +50,11 @@ def load_SO3_grid(resolution):
def compute_geodesic_distance(query, target):
- """
-
- Computes distance, in radians from query to target
+ """Computes distance, in radians from query to target
Args:
query: [N,3,3]
- target: [M,3,3]
+ target: [M,3,3].
"""
-
N = query.shape[0]
M = target.shape[0]
query_exp = query.unsqueeze(1).expand([-1, M, -1, -1]).flatten(0, 1)
diff --git a/happypose/toolbox/utils/types.py b/happypose/toolbox/utils/types.py
index a4c083c8..d5bbdafb 100644
--- a/happypose/toolbox/utils/types.py
+++ b/happypose/toolbox/utils/types.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +13,5 @@
limitations under the License.
"""
-from typing import Tuple
-Resolution = Tuple[int, int]
+Resolution = tuple[int, int]
diff --git a/happypose/toolbox/utils/webdataset.py b/happypose/toolbox/utils/webdataset.py
index 6ea4ef86..3bccc167 100644
--- a/happypose/toolbox/utils/webdataset.py
+++ b/happypose/toolbox/utils/webdataset.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -51,9 +50,12 @@ def group_by_keys(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=N
if current_sample is None or prefix != current_sample["__key__"]:
if valid_sample(current_sample):
yield current_sample
- current_sample = dict(__key__=prefix, __url__=filesample["__url__"])
+ current_sample = {"__key__": prefix, "__url__": filesample["__url__"]}
if suffix in current_sample:
- print(f"{fname}: duplicate file name in tar file {suffix} {current_sample.keys()}")
+ print(
+ f"{fname}: duplicate file name in tar file {suffix} "
+ f"{current_sample.keys()}",
+ )
current_sample["__bad__"] = True
if suffixes is None or suffix in suffixes:
current_sample[suffix] = value
diff --git a/happypose/toolbox/utils/xarray.py b/happypose/toolbox/utils/xarray.py
index 2bbb3c20..8bcb3a24 100644
--- a/happypose/toolbox/utils/xarray.py
+++ b/happypose/toolbox/utils/xarray.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,7 +14,6 @@
"""
-
# Third Party
import numpy as np
diff --git a/happypose/toolbox/visualization/__init__.py b/happypose/toolbox/visualization/__init__.py
index 73a7b275..09aba5e2 100644
--- a/happypose/toolbox/visualization/__init__.py
+++ b/happypose/toolbox/visualization/__init__.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -13,4 +12,3 @@
See the License for the specific language governing permissions and
limitations under the License.
"""
-
diff --git a/happypose/toolbox/visualization/bokeh_plotter.py b/happypose/toolbox/visualization/bokeh_plotter.py
index 176e2b76..ca915d4b 100644
--- a/happypose/toolbox/visualization/bokeh_plotter.py
+++ b/happypose/toolbox/visualization/bokeh_plotter.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,11 +14,13 @@
"""
+from collections.abc import Iterator
+
# Standard Library
from hashlib import sha1
from itertools import cycle
from pathlib import Path
-from typing import Dict, Iterator, List, Optional, Tuple, Union
+from typing import Optional, Union
# Third Party
import bokeh
@@ -47,7 +48,7 @@ def __init__(
Contains an internal state `source_map` holding pointers to image data.
This can be useful for updating images in real-time without re-creating figures.
"""
- self.source_map: Dict[str, bokeh.models.sources.ColumnDataSource] = dict()
+ self.source_map: dict[str, bokeh.models.sources.ColumnDataSource] = {}
self.dump_image_dir = dump_image_dir
self.read_image_dir = read_image_dir
if is_notebook:
@@ -58,10 +59,13 @@ def hex_colors(self) -> Iterator[str]:
return cycle(sns.color_palette(n_colors=40).as_hex())
@property
- def colors(self) -> Iterator[Tuple[float, float, float]]:
+ def colors(self) -> Iterator[tuple[float, float, float]]:
return cycle(sns.color_palette(n_colors=40))
- def get_source(self, name: str) -> Tuple[bokeh.models.sources.ColumnDataSource, bool]:
+ def get_source(
+ self,
+ name: str,
+ ) -> tuple[bokeh.models.sources.ColumnDataSource, bool]:
if name in self.source_map:
source = self.source_map[name]
new = False
@@ -77,7 +81,6 @@ def plot_image(
figure: Optional[bokeh.plotting.figure] = None,
name: str = "image",
) -> bokeh.plotting.figure:
-
im_np = image_to_np_uint8(im)
h, w, _ = im_np.shape
@@ -89,18 +92,26 @@ def plot_image(
if self.dump_image_dir is not None:
if new:
- figure.image_url("url", x=0, y=0, w=w, h=h, source=source, anchor="bottom_left")
+ figure.image_url(
+ "url",
+ x=0,
+ y=0,
+ w=w,
+ h=h,
+ source=source,
+ anchor="bottom_left",
+ )
im_np.flags.writeable = False
im_hash = sha1(im_np.copy().data).hexdigest()
im_path = str(self.dump_image_dir / f"{im_hash}.jpg")
Image.fromarray(im_np).save(im_path)
im_url = str(self.read_image_dir) + str(Path(im_path).name)
print(im_url)
- source.data = dict(url=[im_url])
+ source.data = {"url": [im_url]}
else:
if new:
figure.image_rgba("image", x=0, y=0, dw=w, dh=h, source=source)
- source.data = dict(image=[to_rgba(im_np)])
+ source.data = {"image": [to_rgba(im_np)]}
return figure
def plot_overlay(
@@ -115,6 +126,7 @@ def plot_overlay(
All images are np.uint8 with values in (0, 255)
Args:
+ ----
rgb_input: (h, w, 3)
rgb_rendered: (h, w, 3) with values <15 px as background.
figure: Optional figure in which the data should be plotted.
@@ -133,18 +145,20 @@ def plot_detections(
self,
f: bokeh.plotting.figure,
detections: PandasTensorCollection,
- colors: Union[str, List[str]] = "red",
- text: Optional[Union[str, List[str]]] = None,
+ colors: Union[str, list[str]] = "red",
+ text: Optional[Union[str, list[str]]] = None,
text_auto: bool = True,
text_font_size: str = "8pt",
line_width: int = 2,
source_id: str = "",
) -> bokeh.plotting.figure:
-
boxes = detections.bboxes.cpu().numpy()
if text_auto:
if "score" in detections.infos.columns:
- text = [f"{row.label} {row.score:.2f}" for _, row in detections.infos.iterrows()]
+ text = [
+ f"{row.label} {row.score:.2f}"
+ for _, row in detections.infos.iterrows()
+ ]
else:
text = [f"{row.label}" for _, row in detections.infos.iterrows()]
@@ -196,7 +210,7 @@ def plot_detections(
text_font_size=text_font_size,
)
f.add_layout(labelset)
- data = dict(xs=xs, ys=ys, colors=patch_colors)
+ data = {"xs": xs, "ys": ys, "colors": patch_colors}
if text is not None:
data.update(text_x=text_x, text_y=text_y, text=text)
source.data = data
diff --git a/happypose/toolbox/visualization/bokeh_utils.py b/happypose/toolbox/visualization/bokeh_utils.py
index 7eb300c6..2611109d 100644
--- a/happypose/toolbox/visualization/bokeh_utils.py
+++ b/happypose/toolbox/visualization/bokeh_utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -19,7 +18,7 @@
# Standard Library
from pathlib import Path
-from typing import Optional, Tuple
+from typing import Optional
# Third Party
import bokeh
@@ -49,7 +48,7 @@ def save_image_figure(f: bokeh.plotting.figure, im_path: Path) -> PIL.Image:
def to_rgba(im: np.ndarray) -> np.ndarray:
"""Converts (h, w, 3) to (h, w, 4) data for bokeh.
im must have values in (0, 255)
- NOTE: Maybe this could be simplified only using Pillow ?
+ NOTE: Maybe this could be simplified only using Pillow ?.
"""
out_im = np.empty((im.shape[0], im.shape[1]), dtype=np.uint32)
view = out_im.view(dtype=np.uint8).reshape((im.shape[0], im.shape[1], 4))
@@ -65,7 +64,7 @@ def plot_image(
tools: str = "",
im_size: Optional[Resolution] = None,
figure: Optional[bokeh.plotting.figure] = None,
-) -> Tuple[bokeh.plotting.figure, bokeh.models.sources.ColumnDataSource]:
+) -> tuple[bokeh.plotting.figure, bokeh.models.sources.ColumnDataSource]:
if np.asarray(im).ndim == 2:
gray = True
else:
@@ -76,7 +75,7 @@ def plot_image(
h, w = im.shape[:2]
else:
h, w = im_size
- source = bokeh.models.sources.ColumnDataSource(dict(rgba=[im]))
+ source = bokeh.models.sources.ColumnDataSource({"rgba": [im]})
f = image_figure("rgba", source, im_size=(h, w), gray=gray, figure=figure)
return f, source
diff --git a/happypose/toolbox/visualization/meshcat_utils.py b/happypose/toolbox/visualization/meshcat_utils.py
index 07e3ff06..428f394f 100644
--- a/happypose/toolbox/visualization/meshcat_utils.py
+++ b/happypose/toolbox/visualization/meshcat_utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,14 +14,12 @@
"""
-
# Third Party
import meshcat
import meshcat.geometry as g
import meshcat.transformations as mtf
import numpy as np
import trimesh
-import trimesh.transformations as tra
"""
Some code borrowed from https://github.com/google-research/ravens
@@ -33,9 +30,11 @@
def isRotationMatrix(M, tol=1e-4):
"""Checks if something is a valid rotation matrix."""
tag = False
- I = np.identity(M.shape[0])
+ img = np.identity(M.shape[0])
- if (np.linalg.norm((np.matmul(M, M.T) - I)) < tol) and (np.abs(np.linalg.det(M) - 1) < tol):
+ if (np.linalg.norm(np.matmul(M, M.T) - img) < tol) and (
+ np.abs(np.linalg.det(M) - 1) < tol
+ ):
tag = True
if tag is False:
@@ -52,7 +51,6 @@ def trimesh_to_meshcat_geometry(mesh, use_vertex_colors=False):
Args:
mesh: trimesh.TriMesh object
"""
-
if use_vertex_colors:
visual = mesh.visual
if isinstance(visual, trimesh.visual.TextureVisuals):
@@ -62,17 +60,21 @@ def trimesh_to_meshcat_geometry(mesh, use_vertex_colors=False):
vertex_colors = vertex_colors / 255.0
else:
vertex_colors = None
- return meshcat.geometry.TriangularMeshGeometry(mesh.vertices, mesh.faces, vertex_colors)
+ return meshcat.geometry.TriangularMeshGeometry(
+ mesh.vertices,
+ mesh.faces,
+ vertex_colors,
+ )
def rgb2hex(rgb):
- """
- Converts rgb color to hex
+ """Converts rgb color to hex.
Args:
+ ----
rgb: color in rgb, e.g. (255,0,0)
"""
- return "0x%02x%02x%02x" % (rgb)
+ return "0x{:02x}{:02x}{:02x}".format(*rgb)
def visualize_mesh(vis, mesh, transform=None, color=None, texture_png=None):
@@ -93,7 +95,7 @@ def visualize_mesh(vis, mesh, transform=None, color=None, texture_png=None):
if texture_png is not None:
material = g.MeshLambertMaterial(
- map=g.ImageTexture(image=g.PngImage.from_file(texture_png))
+ map=g.ImageTexture(image=g.PngImage.from_file(texture_png)),
)
print("material")
@@ -103,9 +105,7 @@ def visualize_mesh(vis, mesh, transform=None, color=None, texture_png=None):
def visualize_scene(vis, object_dict, randomize_color=True):
-
for name, data in object_dict.items():
-
# try assigning a random color
if randomize_color:
if "color" in data:
@@ -134,8 +134,8 @@ def visualize_scene(vis, object_dict, randomize_color=True):
def create_visualizer(clear=True, zmq_url="tcp://127.0.0.1:6000"):
print(
- "Waiting for meshcat server... have you started a server? Run `meshcat-server` to start a"
- f" server. Communicating on zmq_url={zmq_url}"
+ "Waiting for meshcat server... have you started a server? Run `meshcat-server`"
+ f" to start a server. Communicating on zmq_url={zmq_url}",
)
vis = meshcat.Visualizer(zmq_url=zmq_url)
if clear:
@@ -146,7 +146,14 @@ def create_visualizer(clear=True, zmq_url="tcp://127.0.0.1:6000"):
def make_frame(
- vis, name, h=0.15, radius=0.001, o=1.0, T=None, transform=None, ignore_invalid_transform=False
+ vis,
+ name,
+ h=0.15,
+ radius=0.001,
+ o=1.0,
+ T=None,
+ transform=None,
+ ignore_invalid_transform=False,
):
"""Add a red-green-blue triad to the Meschat visualizer.
@@ -186,18 +193,28 @@ def make_frame(
transform = T
if transform is not None:
-
if not ignore_invalid_transform:
is_valid = isRotationMatrix(transform[:3, :3])
if not is_valid:
- raise ValueError("meshcat_utils:attempted to visualize invalid transform T")
+ msg = "meshcat_utils:attempted to visualize invalid transform T"
+ raise ValueError(msg)
vis[name].set_transform(transform)
-def draw_grasp(vis, line_name, transform, h=0.15, radius=0.001, o=1.0, color=[255, 0, 0]):
+def draw_grasp(
+ vis,
+ line_name,
+ transform,
+ h=0.15,
+ radius=0.001,
+ o=1.0,
+ color=[255, 0, 0],
+):
"""Draws line to the Meshcat visualizer.
+
Args:
+ ----
vis (Meshcat Visualizer): the visualizer
line_name (string): name for the line associated with the grasp.
transform (numpy array): 4x4 specifying transformation of grasps.
@@ -215,13 +232,13 @@ def draw_grasp(vis, line_name, transform, h=0.15, radius=0.001, o=1.0, color=[25
def visualize_pointcloud(vis, name, pc, color=None, transform=None, **kwargs):
- """
- Args:
+ """Args:
+ ----
vis: meshcat visualizer object
name: str
pc: Nx3 or HxWx3
color: (optional) same shape as pc[0 - 255] scale or just rgb tuple
- transform: (optional) 4x4 homogeneous transform
+ transform: (optional) 4x4 homogeneous transform.
"""
if pc.ndim == 3:
pc = pc.reshape(-1, pc.shape[-1])
@@ -241,7 +258,9 @@ def visualize_pointcloud(vis, name, pc, color=None, transform=None, **kwargs):
else:
color = np.ones_like(pc)
- vis[name].set_object(meshcat.geometry.PointCloud(position=pc.T, color=color.T, **kwargs))
+ vis[name].set_object(
+ meshcat.geometry.PointCloud(position=pc.T, color=color.T, **kwargs),
+ )
if transform is not None:
vis[name].set_transform(transform)
@@ -251,6 +270,7 @@ def visualize_bbox(vis, name, dims, transform=None, T=None):
"""Visualize a bounding box using a wireframe.
Args:
+ ----
vis (MeshCat Visualizer): the visualizer
name (string): name for this frame (should be unique)
dims (array-like): shape (3,), dimensions of the bounding box
@@ -276,17 +296,18 @@ def visualize_transform_manager(vis, tm, frame, **kwargs):
def get_pointcloud(depth, intrinsics, flatten=False, remove_zero_depth_points=True):
- """Projects depth image to pointcloud
+ """Projects depth image to pointcloud.
Args:
+ ----
depth: HxW float array of perspective depth in meters.
intrinsics: 3x3 float array of camera intrinsics matrix.
flatten: whether to flatten pointcloud
Returns:
+ -------
points: HxWx3 float array of 3D points in camera coordinates.
"""
-
height, width = depth.shape
xlin = np.linspace(0, width - 1, width)
ylin = np.linspace(0, height - 1, height)
diff --git a/happypose/toolbox/visualization/meshcat_visualizer.py b/happypose/toolbox/visualization/meshcat_visualizer.py
index c6d446d0..185ea449 100644
--- a/happypose/toolbox/visualization/meshcat_visualizer.py
+++ b/happypose/toolbox/visualization/meshcat_visualizer.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -15,13 +14,10 @@
"""
-
# Standard Library
-import io
from pathlib import Path
# Third Party
-import meshcat
import numpy as np
import trimesh
from meshcat.geometry import (
@@ -35,12 +31,17 @@
from happypose.toolbox.datasets.datasets_cfg import make_object_dataset
# Local Folder
-from .meshcat_utils import create_visualizer, trimesh_to_meshcat_geometry
+from .meshcat_utils import create_visualizer
class MeshcatSceneViewer:
- def __init__(self, obj_ds_name, use_textures=True, zmq_url="tcp://127.0.0.1:6000", clear=True):
-
+ def __init__(
+ self,
+ obj_ds_name,
+ use_textures=True,
+ zmq_url="tcp://127.0.0.1:6000",
+ clear=True,
+ ):
self.obj_ds = make_object_dataset(obj_ds_name)
self.label_to_object = {}
self.visualizer = create_visualizer(zmq_url=zmq_url, clear=clear)
@@ -67,11 +68,14 @@ def get_meshcat_object(self, label):
# Needed to deal with the fact that some objects might
# be saved as trimesh.Scene instead of trimesh.Trimesh
if hasattr(mesh, "visual"):
- if isinstance(mesh.visual, trimesh.visual.TextureVisuals) and self.use_textures:
+ if (
+ isinstance(mesh.visual, trimesh.visual.TextureVisuals)
+ and self.use_textures
+ ):
texture_path = f"/dev/shm/{label}_texture.png"
mesh.visual.material.image.save(texture_path)
material = MeshLambertMaterial(
- map=ImageTexture(image=PngImage.from_file(texture_path))
+ map=ImageTexture(image=PngImage.from_file(texture_path)),
)
self.label_to_object[label] = (geometry, material)
return self.label_to_object[label]
diff --git a/happypose/toolbox/visualization/utils.py b/happypose/toolbox/visualization/utils.py
index 35a26f36..c1e83ce8 100644
--- a/happypose/toolbox/visualization/utils.py
+++ b/happypose/toolbox/visualization/utils.py
@@ -1,5 +1,4 @@
-"""
-Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+"""Copyright (c) 2022 Inria & NVIDIA CORPORATION & AFFILIATES. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +15,7 @@
# Standard Library
-from typing import Any, Dict, Optional, Tuple, Union
+from typing import Any, Optional, Union
# Third Party
import cv2
@@ -26,7 +25,7 @@
def image_to_np_uint8(im: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
- """Returns a np.uint8 image"""
+ """Returns a np.uint8 image."""
if isinstance(im, torch.Tensor):
im_np = im.detach().cpu().numpy()
else:
@@ -56,10 +55,9 @@ def get_mask_from_rgb(img: np.ndarray) -> np.ndarray:
def make_contour_overlay(
img: np.ndarray,
render: np.ndarray,
- color: Optional[Tuple[int, int, int]] = None,
+ color: Optional[tuple[int, int, int]] = None,
dilate_iterations: int = 1,
-) -> Dict[str, Any]:
-
+) -> dict[str, Any]:
if color is None:
color = (0, 255, 0)
@@ -120,7 +118,8 @@ def get_ds_info(ds_name):
urdf_ds_name = None # not sure if this exists
obj_ds_name = "custom.panda3d"
else:
- raise ValueError("Unknown dataset")
+ msg = "Unknown dataset"
+ raise ValueError(msg)
return urdf_ds_name, obj_ds_name
@@ -134,9 +133,9 @@ def draw_bounding_box(
"""Draw a bounding box onto a numpy array image.
Args:
+ ----
bbox: [xmin, ymin, xmax, ymax]
"""
-
if color is None:
color = [255, 0, 0]
diff --git a/notebooks/cosypose/.gitignore b/notebooks/cosypose/.gitignore
index 34ab6337..d58c777b 100644
--- a/notebooks/cosypose/.gitignore
+++ b/notebooks/cosypose/.gitignore
@@ -1 +1 @@
-perso/
\ No newline at end of file
+perso/
diff --git a/notebooks/megapose/megapose_custom_model.ipynb b/notebooks/megapose/megapose_custom_model.ipynb
index 90609bb3..3d5b2d2f 100644
--- a/notebooks/megapose/megapose_custom_model.ipynb
+++ b/notebooks/megapose/megapose_custom_model.ipynb
@@ -1330,4 +1330,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
-}
\ No newline at end of file
+}
diff --git a/notebooks/megapose/megapose_estimator_visualization.ipynb b/notebooks/megapose/megapose_estimator_visualization.ipynb
index 9a557e5e..be23a8b5 100644
--- a/notebooks/megapose/megapose_estimator_visualization.ipynb
+++ b/notebooks/megapose/megapose_estimator_visualization.ipynb
@@ -243,6 +243,8 @@
"metadata": {},
"outputs": [],
"source": [
+ "from happypose.toolbox.utils.tensor_collection import filter_pose_estimates\n",
+ "\n",
"# Options for inference\n",
"use_gt_detections = True # Note, if you aren't using gt_detections then this should be false\n",
"n_refiner_iterations = 5\n",
@@ -294,10 +296,11 @@
" f\"model_time={extra_data['model_time']:.2f}, render_time={extra_data['render_time']:.2f}\")\n",
" \n",
" # Extract top-K coarse hypotheses\n",
- " data_TCO_filtered = pose_estimator.filter_pose_estimates(data_TCO_coarse, \n",
- " top_K=n_pose_hypotheses, \n",
- " filter_field='coarse_logit')\n",
- " \n",
+ " data_TCO_filtered = filter_pose_estimates(data_TCO_coarse,\n",
+ " top_K=n_pose_hypotheses, \n",
+ " group_cols=[\"batch_im_id\", \"label\", \"instance_id\"], \n",
+ " filter_field='coarse_logit')\n",
+ "\n",
" # Refine the top_K coarse hypotheses\n",
" preds, extra_data = pose_estimator.forward_refiner(observation_tensor, data_TCO_filtered, \n",
" n_iterations=n_refiner_iterations, keep_all_outputs=True)\n",
@@ -311,7 +314,10 @@
" data_TCO_scored, extra_data = pose_estimator.forward_scoring_model(observation_tensor, data_TCO_refined)\n",
"\n",
" # Extract the highest scoring pose estimate for each instance_id\n",
- " data_TCO_final = pose_estimator.filter_pose_estimates(data_TCO_scored, top_K=1, filter_field='pose_logit')\n",
+ " data_TCO_final = filter_pose_estimates(data_TCO_scored, \n",
+ " top_K=1, \n",
+ " group_cols=[\"batch_im_id\", \"label\", \"instance_id\"], \n",
+ " filter_field='pose_logit')\n",
" \n",
" \n",
" if run_depth_refiner:\n",
@@ -969,4 +975,4 @@
},
"nbformat": 4,
"nbformat_minor": 5
-}
\ No newline at end of file
+}
diff --git a/notebooks/megapose/render_megapose_dataset.ipynb b/notebooks/megapose/render_megapose_dataset.ipynb
index 91ee69fd..9561b43c 100644
--- a/notebooks/megapose/render_megapose_dataset.ipynb
+++ b/notebooks/megapose/render_megapose_dataset.ipynb
@@ -1189,4 +1189,4 @@
},
"nbformat": 4,
"nbformat_minor": 4
-}
\ No newline at end of file
+}
diff --git a/poetry.lock b/poetry.lock
index 361db1ec..3a1e6a9e 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,5 +1,16 @@
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
+[[package]]
+name = "addict"
+version = "2.4.0"
+description = "Addict is a dictionary whose items can be set using both attribute and item syntax."
+optional = false
+python-versions = "*"
+files = [
+ {file = "addict-2.4.0-py3-none-any.whl", hash = "sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc"},
+ {file = "addict-2.4.0.tar.gz", hash = "sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494"},
+]
+
[[package]]
name = "alabaster"
version = "0.7.13"
@@ -11,6 +22,52 @@ files = [
{file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
]
+[[package]]
+name = "ansi2html"
+version = "1.8.0"
+description = ""
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "ansi2html-1.8.0-py3-none-any.whl", hash = "sha256:ef9cc9682539dbe524fbf8edad9c9462a308e04bce1170c32daa8fdfd0001785"},
+ {file = "ansi2html-1.8.0.tar.gz", hash = "sha256:38b82a298482a1fa2613f0f9c9beb3db72a8f832eeac58eb2e47bf32cd37f6d5"},
+]
+
+[package.extras]
+docs = ["Sphinx", "setuptools-scm", "sphinx-rtd-theme"]
+test = ["pytest", "pytest-cov"]
+
+[[package]]
+name = "antlr4-python3-runtime"
+version = "4.9.3"
+description = "ANTLR 4.9.3 runtime for Python 3.7"
+optional = false
+python-versions = "*"
+files = [
+ {file = "antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b"},
+]
+
+[[package]]
+name = "anyio"
+version = "4.0.0"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "anyio-4.0.0-py3-none-any.whl", hash = "sha256:cfdb2b588b9fc25ede96d8db56ed50848b0b649dca3dd1df0b11f683bb9e0b5f"},
+ {file = "anyio-4.0.0.tar.gz", hash = "sha256:f7ed51751b2c2add651e5747c891b47e26d2a21be5d32d9311dfe9692f3e5d7a"},
+]
+
+[package.dependencies]
+exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)"]
+test = ["anyio[trio]", "coverage[toml] (>=7)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (>=0.22)"]
+
[[package]]
name = "appnope"
version = "0.1.3"
@@ -24,34 +81,52 @@ files = [
[[package]]
name = "asttokens"
-version = "2.2.1"
+version = "2.4.0"
description = "Annotate AST trees with source code positions"
optional = false
python-versions = "*"
files = [
- {file = "asttokens-2.2.1-py2.py3-none-any.whl", hash = "sha256:6b0ac9e93fb0335014d382b8fa9b3afa7df546984258005da0b9e7095b3deb1c"},
- {file = "asttokens-2.2.1.tar.gz", hash = "sha256:4622110b2a6f30b77e1473affaa97e711bc2f07d3f10848420ff1898edbe94f3"},
+ {file = "asttokens-2.4.0-py2.py3-none-any.whl", hash = "sha256:cf8fc9e61a86461aa9fb161a14a0841a03c405fa829ac6b202670b3495d2ce69"},
+ {file = "asttokens-2.4.0.tar.gz", hash = "sha256:2e0171b991b2c959acc6c49318049236844a5da1d65ba2672c4880c1c894834e"},
]
[package.dependencies]
-six = "*"
+six = ">=1.12.0"
[package.extras]
test = ["astroid", "pytest"]
+[[package]]
+name = "attrs"
+version = "23.1.0"
+description = "Classes Without Boilerplate"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+]
+
+[package.extras]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+
[[package]]
name = "babel"
-version = "2.12.1"
+version = "2.13.1"
description = "Internationalization utilities"
optional = false
python-versions = ">=3.7"
files = [
- {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"},
- {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"},
+ {file = "Babel-2.13.1-py3-none-any.whl", hash = "sha256:7077a4984b02b6727ac10f1f7294484f737443d7e2e66c5e4380e41a3ae0b4ed"},
+ {file = "Babel-2.13.1.tar.gz", hash = "sha256:33e0952d7dd6374af8dbf6768cc4ddf3ccfefc244f9986d4074704f2fbd18900"},
]
-[package.dependencies]
-pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""}
+[package.extras]
+dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"]
[[package]]
name = "backcall"
@@ -64,38 +139,49 @@ files = [
{file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"},
]
+[[package]]
+name = "beautifulsoup4"
+version = "4.12.2"
+description = "Screen-scraping library"
+optional = false
+python-versions = ">=3.6.0"
+files = [
+ {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"},
+ {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"},
+]
+
+[package.dependencies]
+soupsieve = ">1.2"
+
+[package.extras]
+html5lib = ["html5lib"]
+lxml = ["lxml"]
+
[[package]]
name = "black"
-version = "23.3.0"
+version = "23.10.1"
description = "The uncompromising code formatter."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"},
- {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"},
- {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"},
- {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"},
- {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"},
- {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"},
- {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"},
- {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"},
- {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"},
- {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"},
- {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"},
- {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"},
- {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"},
- {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"},
- {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"},
- {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"},
- {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"},
- {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"},
- {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"},
- {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"},
- {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"},
- {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"},
- {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"},
- {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"},
- {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"},
+ {file = "black-23.10.1-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:ec3f8e6234c4e46ff9e16d9ae96f4ef69fa328bb4ad08198c8cee45bb1f08c69"},
+ {file = "black-23.10.1-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:1b917a2aa020ca600483a7b340c165970b26e9029067f019e3755b56e8dd5916"},
+ {file = "black-23.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c74de4c77b849e6359c6f01987e94873c707098322b91490d24296f66d067dc"},
+ {file = "black-23.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:7b4d10b0f016616a0d93d24a448100adf1699712fb7a4efd0e2c32bbb219b173"},
+ {file = "black-23.10.1-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:b15b75fc53a2fbcac8a87d3e20f69874d161beef13954747e053bca7a1ce53a0"},
+ {file = "black-23.10.1-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:e293e4c2f4a992b980032bbd62df07c1bcff82d6964d6c9496f2cd726e246ace"},
+ {file = "black-23.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d56124b7a61d092cb52cce34182a5280e160e6aff3137172a68c2c2c4b76bcb"},
+ {file = "black-23.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:3f157a8945a7b2d424da3335f7ace89c14a3b0625e6593d21139c2d8214d55ce"},
+ {file = "black-23.10.1-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:cfcce6f0a384d0da692119f2d72d79ed07c7159879d0bb1bb32d2e443382bf3a"},
+ {file = "black-23.10.1-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:33d40f5b06be80c1bbce17b173cda17994fbad096ce60eb22054da021bf933d1"},
+ {file = "black-23.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:840015166dbdfbc47992871325799fd2dc0dcf9395e401ada6d88fe11498abad"},
+ {file = "black-23.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:037e9b4664cafda5f025a1728c50a9e9aedb99a759c89f760bd83730e76ba884"},
+ {file = "black-23.10.1-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:7cb5936e686e782fddb1c73f8aa6f459e1ad38a6a7b0e54b403f1f05a1507ee9"},
+ {file = "black-23.10.1-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:7670242e90dc129c539e9ca17665e39a146a761e681805c54fbd86015c7c84f7"},
+ {file = "black-23.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed45ac9a613fb52dad3b61c8dea2ec9510bf3108d4db88422bacc7d1ba1243d"},
+ {file = "black-23.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:6d23d7822140e3fef190734216cefb262521789367fbdc0b3f22af6744058982"},
+ {file = "black-23.10.1-py3-none-any.whl", hash = "sha256:d431e6739f727bb2e0495df64a6c7a5310758e87505f5f8cde9ff6c0f2d7e4fe"},
+ {file = "black-23.10.1.tar.gz", hash = "sha256:1f8ce316753428ff68749c65a5f7844631aa18c8679dfd3ca9dc1a289979c258"},
]
[package.dependencies]
@@ -105,7 +191,7 @@ packaging = ">=22.0"
pathspec = ">=0.9.0"
platformdirs = ">=2"
tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
-typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
+typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
[package.extras]
colorama = ["colorama (>=0.4.3)"]
@@ -113,121 +199,191 @@ d = ["aiohttp (>=3.7.4)"]
jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
uvloop = ["uvloop (>=0.15.2)"]
+[[package]]
+name = "bokeh"
+version = "3.3.0"
+description = "Interactive plots and applications in the browser from Python"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "bokeh-3.3.0-py3-none-any.whl", hash = "sha256:65e36824c99fd46530c559263c6d14eabed6945370cd3beffc2eeedb62d6db6d"},
+ {file = "bokeh-3.3.0.tar.gz", hash = "sha256:cdbe268f842c139ba8fa0fb43c0c55c172c8215ec5a69a2629482c63c9d4039c"},
+]
+
+[package.dependencies]
+contourpy = ">=1"
+Jinja2 = ">=2.9"
+numpy = ">=1.16"
+packaging = ">=16.8"
+pandas = ">=1.2"
+pillow = ">=7.1.0"
+PyYAML = ">=3.10"
+tornado = ">=5.1"
+xyzservices = ">=2021.09.1"
+
+[[package]]
+name = "bop_toolkit_lib"
+version = "1.0"
+description = ""
+optional = true
+python-versions = "*"
+files = []
+develop = false
+
+[package.dependencies]
+cython = "*"
+PyOpenGL = "3.1.0"
+pypng = "*"
+pytz = "*"
+vispy = ">=0.6.5"
+
+[package.source]
+type = "git"
+url = "https://github.com/thodan/bop_toolkit"
+reference = "HEAD"
+resolved_reference = "aa7119c17018f84bf194de61f5895d6b58059482"
+
+[[package]]
+name = "braceexpand"
+version = "0.1.7"
+description = "Bash-style brace expansion for Python"
+optional = false
+python-versions = "*"
+files = [
+ {file = "braceexpand-0.1.7-py2.py3-none-any.whl", hash = "sha256:91332d53de7828103dcae5773fb43bc34950b0c8160e35e0f44c4427a3b85014"},
+ {file = "braceexpand-0.1.7.tar.gz", hash = "sha256:e6e539bd20eaea53547472ff94f4fb5c3d3bf9d0a89388c4b56663aba765f705"},
+]
+
[[package]]
name = "certifi"
-version = "2022.12.7"
+version = "2023.7.22"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
- {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"},
- {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"},
+ {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
+ {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
]
[[package]]
name = "cfgv"
-version = "3.3.1"
+version = "3.4.0"
description = "Validate configuration and produce human readable error messages."
optional = false
-python-versions = ">=3.6.1"
+python-versions = ">=3.8"
files = [
- {file = "cfgv-3.3.1-py2.py3-none-any.whl", hash = "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426"},
- {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"},
+ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"},
+ {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"},
]
[[package]]
name = "charset-normalizer"
-version = "3.1.0"
+version = "3.3.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7.0"
files = [
- {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"},
- {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"},
- {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"},
- {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"},
- {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"},
- {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"},
- {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"},
+ {file = "charset-normalizer-3.3.1.tar.gz", hash = "sha256:d9137a876020661972ca6eec0766d81aef8a5627df628b664b234b73396e727e"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8aee051c89e13565c6bd366813c386939f8e928af93c29fda4af86d25b73d8f8"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:352a88c3df0d1fa886562384b86f9a9e27563d4704ee0e9d56ec6fcd270ea690"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:223b4d54561c01048f657fa6ce41461d5ad8ff128b9678cfe8b2ecd951e3f8a2"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f861d94c2a450b974b86093c6c027888627b8082f1299dfd5a4bae8e2292821"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1171ef1fc5ab4693c5d151ae0fdad7f7349920eabbaca6271f95969fa0756c2d"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28f512b9a33235545fbbdac6a330a510b63be278a50071a336afc1b78781b147"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0e842112fe3f1a4ffcf64b06dc4c61a88441c2f02f373367f7b4c1aa9be2ad5"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3f9bc2ce123637a60ebe819f9fccc614da1bcc05798bbbaf2dd4ec91f3e08846"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:f194cce575e59ffe442c10a360182a986535fd90b57f7debfaa5c845c409ecc3"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9a74041ba0bfa9bc9b9bb2cd3238a6ab3b7618e759b41bd15b5f6ad958d17605"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b578cbe580e3b41ad17b1c428f382c814b32a6ce90f2d8e39e2e635d49e498d1"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:6db3cfb9b4fcecb4390db154e75b49578c87a3b9979b40cdf90d7e4b945656e1"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:debb633f3f7856f95ad957d9b9c781f8e2c6303ef21724ec94bea2ce2fcbd056"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-win32.whl", hash = "sha256:87071618d3d8ec8b186d53cb6e66955ef2a0e4fa63ccd3709c0c90ac5a43520f"},
+ {file = "charset_normalizer-3.3.1-cp310-cp310-win_amd64.whl", hash = "sha256:e372d7dfd154009142631de2d316adad3cc1c36c32a38b16a4751ba78da2a397"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae4070f741f8d809075ef697877fd350ecf0b7c5837ed68738607ee0a2c572cf"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58e875eb7016fd014c0eea46c6fa92b87b62c0cb31b9feae25cbbe62c919f54d"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dbd95e300367aa0827496fe75a1766d198d34385a58f97683fe6e07f89ca3e3c"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de0b4caa1c8a21394e8ce971997614a17648f94e1cd0640fbd6b4d14cab13a72"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:985c7965f62f6f32bf432e2681173db41336a9c2611693247069288bcb0c7f8b"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a15c1fe6d26e83fd2e5972425a772cca158eae58b05d4a25a4e474c221053e2d"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae55d592b02c4349525b6ed8f74c692509e5adffa842e582c0f861751701a673"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be4d9c2770044a59715eb57c1144dedea7c5d5ae80c68fb9959515037cde2008"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:851cf693fb3aaef71031237cd68699dded198657ec1e76a76eb8be58c03a5d1f"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:31bbaba7218904d2eabecf4feec0d07469284e952a27400f23b6628439439fa7"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:871d045d6ccc181fd863a3cd66ee8e395523ebfbc57f85f91f035f50cee8e3d4"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:501adc5eb6cd5f40a6f77fbd90e5ab915c8fd6e8c614af2db5561e16c600d6f3"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f5fb672c396d826ca16a022ac04c9dce74e00a1c344f6ad1a0fdc1ba1f332213"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-win32.whl", hash = "sha256:bb06098d019766ca16fc915ecaa455c1f1cd594204e7f840cd6258237b5079a8"},
+ {file = "charset_normalizer-3.3.1-cp311-cp311-win_amd64.whl", hash = "sha256:8af5a8917b8af42295e86b64903156b4f110a30dca5f3b5aedea123fbd638bff"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:7ae8e5142dcc7a49168f4055255dbcced01dc1714a90a21f87448dc8d90617d1"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5b70bab78accbc672f50e878a5b73ca692f45f5b5e25c8066d748c09405e6a55"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ceca5876032362ae73b83347be8b5dbd2d1faf3358deb38c9c88776779b2e2f"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34d95638ff3613849f473afc33f65c401a89f3b9528d0d213c7037c398a51296"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edbe6a5bf8b56a4a84533ba2b2f489d0046e755c29616ef8830f9e7d9cf5728"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6a02a3c7950cafaadcd46a226ad9e12fc9744652cc69f9e5534f98b47f3bbcf"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10b8dd31e10f32410751b3430996f9807fc4d1587ca69772e2aa940a82ab571a"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edc0202099ea1d82844316604e17d2b175044f9bcb6b398aab781eba957224bd"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b891a2f68e09c5ef989007fac11476ed33c5c9994449a4e2c3386529d703dc8b"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:71ef3b9be10070360f289aea4838c784f8b851be3ba58cf796262b57775c2f14"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:55602981b2dbf8184c098bc10287e8c245e351cd4fdcad050bd7199d5a8bf514"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:46fb9970aa5eeca547d7aa0de5d4b124a288b42eaefac677bde805013c95725c"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:520b7a142d2524f999447b3a0cf95115df81c4f33003c51a6ab637cbda9d0bf4"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-win32.whl", hash = "sha256:8ec8ef42c6cd5856a7613dcd1eaf21e5573b2185263d87d27c8edcae33b62a61"},
+ {file = "charset_normalizer-3.3.1-cp312-cp312-win_amd64.whl", hash = "sha256:baec8148d6b8bd5cee1ae138ba658c71f5b03e0d69d5907703e3e1df96db5e41"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:63a6f59e2d01310f754c270e4a257426fe5a591dc487f1983b3bbe793cf6bac6"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d6bfc32a68bc0933819cfdfe45f9abc3cae3877e1d90aac7259d57e6e0f85b1"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4f3100d86dcd03c03f7e9c3fdb23d92e32abbca07e7c13ebd7ddfbcb06f5991f"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39b70a6f88eebe239fa775190796d55a33cfb6d36b9ffdd37843f7c4c1b5dc67"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e12f8ee80aa35e746230a2af83e81bd6b52daa92a8afaef4fea4a2ce9b9f4fa"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b6cefa579e1237ce198619b76eaa148b71894fb0d6bcf9024460f9bf30fd228"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:61f1e3fb621f5420523abb71f5771a204b33c21d31e7d9d86881b2cffe92c47c"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4f6e2a839f83a6a76854d12dbebde50e4b1afa63e27761549d006fa53e9aa80e"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:1ec937546cad86d0dce5396748bf392bb7b62a9eeb8c66efac60e947697f0e58"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:82ca51ff0fc5b641a2d4e1cc8c5ff108699b7a56d7f3ad6f6da9dbb6f0145b48"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:633968254f8d421e70f91c6ebe71ed0ab140220469cf87a9857e21c16687c034"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-win32.whl", hash = "sha256:c0c72d34e7de5604df0fde3644cc079feee5e55464967d10b24b1de268deceb9"},
+ {file = "charset_normalizer-3.3.1-cp37-cp37m-win_amd64.whl", hash = "sha256:63accd11149c0f9a99e3bc095bbdb5a464862d77a7e309ad5938fbc8721235ae"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5a3580a4fdc4ac05f9e53c57f965e3594b2f99796231380adb2baaab96e22761"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2465aa50c9299d615d757c1c888bc6fef384b7c4aec81c05a0172b4400f98557"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb7cd68814308aade9d0c93c5bd2ade9f9441666f8ba5aa9c2d4b389cb5e2a45"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e43805ccafa0a91831f9cd5443aa34528c0c3f2cc48c4cb3d9a7721053874b"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:854cc74367180beb327ab9d00f964f6d91da06450b0855cbbb09187bcdb02de5"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c15070ebf11b8b7fd1bfff7217e9324963c82dbdf6182ff7050519e350e7ad9f"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c4c99f98fc3a1835af8179dcc9013f93594d0670e2fa80c83aa36346ee763d2"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3fb765362688821404ad6cf86772fc54993ec11577cd5a92ac44b4c2ba52155b"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:dced27917823df984fe0c80a5c4ad75cf58df0fbfae890bc08004cd3888922a2"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a66bcdf19c1a523e41b8e9d53d0cedbfbac2e93c649a2e9502cb26c014d0980c"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ecd26be9f112c4f96718290c10f4caea6cc798459a3a76636b817a0ed7874e42"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f70fd716855cd3b855316b226a1ac8bdb3caf4f7ea96edcccc6f484217c9597"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:17a866d61259c7de1bdadef418a37755050ddb4b922df8b356503234fff7932c"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-win32.whl", hash = "sha256:548eefad783ed787b38cb6f9a574bd8664468cc76d1538215d510a3cd41406cb"},
+ {file = "charset_normalizer-3.3.1-cp38-cp38-win_amd64.whl", hash = "sha256:45f053a0ece92c734d874861ffe6e3cc92150e32136dd59ab1fb070575189c97"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bc791ec3fd0c4309a753f95bb6c749ef0d8ea3aea91f07ee1cf06b7b02118f2f"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0c8c61fb505c7dad1d251c284e712d4e0372cef3b067f7ddf82a7fa82e1e9a93"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2c092be3885a1b7899cd85ce24acedc1034199d6fca1483fa2c3a35c86e43041"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2000c54c395d9e5e44c99dc7c20a64dc371f777faf8bae4919ad3e99ce5253e"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4cb50a0335382aac15c31b61d8531bc9bb657cfd848b1d7158009472189f3d62"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c30187840d36d0ba2893bc3271a36a517a717f9fd383a98e2697ee890a37c273"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe81b35c33772e56f4b6cf62cf4aedc1762ef7162a31e6ac7fe5e40d0149eb67"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d0bf89afcbcf4d1bb2652f6580e5e55a840fdf87384f6063c4a4f0c95e378656"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:06cf46bdff72f58645434d467bf5228080801298fbba19fe268a01b4534467f5"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:3c66df3f41abee950d6638adc7eac4730a306b022570f71dd0bd6ba53503ab57"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:cd805513198304026bd379d1d516afbf6c3c13f4382134a2c526b8b854da1c2e"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:9505dc359edb6a330efcd2be825fdb73ee3e628d9010597aa1aee5aa63442e97"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:31445f38053476a0c4e6d12b047b08ced81e2c7c712e5a1ad97bc913256f91b2"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-win32.whl", hash = "sha256:bd28b31730f0e982ace8663d108e01199098432a30a4c410d06fe08fdb9e93f4"},
+ {file = "charset_normalizer-3.3.1-cp39-cp39-win_amd64.whl", hash = "sha256:555fe186da0068d3354cdf4bbcbc609b0ecae4d04c921cc13e209eece7720727"},
+ {file = "charset_normalizer-3.3.1-py3-none-any.whl", hash = "sha256:800561453acdecedaac137bf09cd719c7a440b6800ec182f077bb8e7025fb708"},
]
[[package]]
name = "click"
-version = "8.1.3"
+version = "8.1.7"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.7"
files = [
- {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
- {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
]
[package.dependencies]
@@ -235,131 +391,38 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "cmeel"
-version = "0.35.0"
+version = "0.50.2"
description = "Create Wheel from CMake projects"
optional = false
-python-versions = ">=3.7,<4.0"
+python-versions = ">=3.8,<4.0"
files = [
- {file = "cmeel-0.35.0-py3-none-any.whl", hash = "sha256:6d942d910abe5241c7a5081ec4ada94e39f2bb1baa941987a540c5bb3ae28b66"},
- {file = "cmeel-0.35.0.tar.gz", hash = "sha256:e6124ab444593577386aa8cdbf27a51648cad9f324dbf01ebf51349e66c68a7a"},
+ {file = "cmeel-0.50.2-py3-none-any.whl", hash = "sha256:271de85eff8ef17f04e835e7edcbff4ede5188f1c2efaacf29ae35e7f9ec67cd"},
+ {file = "cmeel-0.50.2.tar.gz", hash = "sha256:6e646a9dbef76c865fd3a9a7e667eb8727a28347dd6ef9cfde4ff6cbec2e1d34"},
]
[package.dependencies]
-tomli = ">=2.0.1,<3.0.0"
+tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""}
[package.extras]
-build = ["cmake (>=3.22.3,<4.0.0)", "packaging (>=23.0,<24.0)", "wheel (>=0.38.4,<0.39.0)"]
+build = ["cmake (>=3.27.2,<4.0.0)", "git-archive-all (>=1.23.1,<2.0.0)", "packaging (>=23.1,<24.0)", "wheel (>=0.41.1,<0.42.0)"]
[[package]]
name = "cmeel-assimp"
-version = "5.2.5"
+version = "5.2.5.1"
description = "cmeel distribution for assimp, Open-Asset-Importer-Library Repository"
optional = false
python-versions = ">= 3.7"
files = [
- {file = "cmeel_assimp-5.2.5-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b0fe81c5b77713dab1f668fcf7ad66c11090f443f5b7690bb253afa5dd704d33"},
- {file = "cmeel_assimp-5.2.5-0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ef84f047655a93a817df279aa431e471dee81b986a4f98b28a2269c5fdaacab1"},
- {file = "cmeel_assimp-5.2.5-0-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:b41051361b32c8f45075a28a2fb837e894b0c3b0a3c010476f0e03b9a3c995ed"},
- {file = "cmeel_assimp-5.2.5-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d4f27602141079731ce5944fbe107d0e0ebddb944cab97093a88e3bd0469f838"},
- {file = "cmeel_assimp-5.2.5-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ab52e94569f7f8945bfe4bd7d5dc618e739022607b8152d81196b174b9b8d63f"},
- {file = "cmeel_assimp-5.2.5-0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7448b223834ec98e8b4d8b9b0ca91d68f55ba26ab31aa83406389acd3f107cd9"},
- {file = "cmeel_assimp-5.2.5-0-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:2f837e0cc25580e1012f0487463072298260f7c432b66b1f36bc8a9eb976bf45"},
- {file = "cmeel_assimp-5.2.5-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2edd52fd8133fdead81e0acbefde0097de19086bba6daee86773ad70e2f1c910"},
- {file = "cmeel_assimp-5.2.5-0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:05d17daa970e3cf5e0841c7395f1c0cb67b36f63b85bd872ca869e560fa3ad28"},
- {file = "cmeel_assimp-5.2.5-0-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:86887ff6e9bbf580aa57c5a1db11519d98968fbf1d69fc7590350802583e20ab"},
- {file = "cmeel_assimp-5.2.5-0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b93febfb8d9c28d4ed209a37d3eb00689ea705a80a2d4097984187b972f5bc3"},
- {file = "cmeel_assimp-5.2.5-0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bf48fab6bc28b5c6b63849b71c56a4f710be0c1b6affff70ed51fe1ee4e8d6e1"},
- {file = "cmeel_assimp-5.2.5-0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bb6488a7a9b50ce0c2b5da7e6ad52681adbdbb0a897ea82a4754baf7c39dff2e"},
- {file = "cmeel_assimp-5.2.5-0-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:8b7b15857d02d5ac39c3612192c01d4983fe8a0df455ddab9e02d5adeb61dae9"},
- {file = "cmeel_assimp-5.2.5-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4cf182aba68f32890da4f8fa2a1229db354cb8abfe62edc552a0a2103019339e"},
- {file = "cmeel_assimp-5.2.5-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b53c04137d1d9ed78c3af60ca5408eeeceb3265c87400bef210eacaf711200e9"},
- {file = "cmeel_assimp-5.2.5-0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ce8d50843086b989ee161c212c25f4e461f49a5749812457020f9eba1f81af74"},
- {file = "cmeel_assimp-5.2.5-0-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:19d84d2b78c01488b114a8579e3f528de5703c912e6ee1178679171d1e9a9ee4"},
- {file = "cmeel_assimp-5.2.5-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e63044d6596ab7f14dad9730f6b35fb923e30da6d62bc552e745ce1ff45c126"},
- {file = "cmeel_assimp-5.2.5-0-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:80057a70f495d2256754509800e65071d93360c5fde0b54dc13de00818333cf0"},
- {file = "cmeel_assimp-5.2.5-0-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:fef5b2fd07c75dbadfdeae5eeb008caf1fd83623a891effe2884ae34b7c91634"},
- {file = "cmeel_assimp-5.2.5-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:09e3a84d4319ed5d88ada779ac2c95d59e3be76b0be8d1cfec95fa9e7cfbde9b"},
- {file = "cmeel_assimp-5.2.5-0-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:86b3f8e0e947710dd7ddb285a7d90d034391adc12fa91f36c60d69070fcf342f"},
- {file = "cmeel_assimp-5.2.5-0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5f87ec09c2e47ab8966c1e8b7428140aec5fb746fdd865b4cde18e1b0b41b880"},
- {file = "cmeel_assimp-5.2.5-0-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:c3d12b70cc5d76b68a9ad9d77de03751f3cd5c7237231880b1980a8c06d61d43"},
- {file = "cmeel_assimp-5.2.5-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8685bac63d82941331b8576ce196ec54f08f106188493810b7d5bc07b459501a"},
- {file = "cmeel_assimp-5.2.5-1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b6a2deb727e14841deb474f8a88a512c54a58927feeb181bb65342158234c48a"},
- {file = "cmeel_assimp-5.2.5-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c272b38b41ac2a5b9e18ecaed73e04f8e2733a052b473b83fdb87139a5758cc8"},
- {file = "cmeel_assimp-5.2.5-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e4664ffe82ee434662144d00cdadae2c96953126f1b604d57bd2421fafad473b"},
- {file = "cmeel_assimp-5.2.5-1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:6e565601b49e138a1e6095ba731f8a3dd5675d3cfdfcbbf320a351e32aff9a08"},
- {file = "cmeel_assimp-5.2.5-1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:0d671abe2c51067d3fe33ce4ba88933146a65a1d3d8d7d0a15fc5bee33cfbd46"},
- {file = "cmeel_assimp-5.2.5-10-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:67a42847e7ba59065144b34cf7276ac2374dab96fd66d157c438a67d500a11b4"},
- {file = "cmeel_assimp-5.2.5-10-py3-none-macosx_11_0_arm64.whl", hash = "sha256:4cc98116dbe41041977ff50931b990c15d1675a74aeb9ef2eaabe29b7555d369"},
- {file = "cmeel_assimp-5.2.5-10-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:e14b68f0b4135d0d25a637e44c9c04da9afdbc057c41ff54292aab2b65bc0c2c"},
- {file = "cmeel_assimp-5.2.5-10-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:885a3f782136ed4f404da20e65daa512d93cf91c3260319c3c820e95b10a4b53"},
- {file = "cmeel_assimp-5.2.5-10-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:74e22fcaf3a1ea2a26b8b1787f7d214f6a70a2455ee4089cb46319d7b43ee02d"},
- {file = "cmeel_assimp-5.2.5-11-py3-none-macosx_11_0_arm64.whl", hash = "sha256:83c4ce82ff9b0580db2692f092cf371999f691ddd54d950855fd2a0eff51ca75"},
- {file = "cmeel_assimp-5.2.5-12-py3-none-macosx_11_0_arm64.whl", hash = "sha256:62b096dd42dfa2af1d39240f13d852dccb02304750859a65652c4e705cf64eb0"},
- {file = "cmeel_assimp-5.2.5-12-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:d5f530c000af4a5b08b097a75b47900814eb08d63693b60e0f64c6aaad2725a3"},
- {file = "cmeel_assimp-5.2.5-12-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:877f3ee0b29a8e963dd4fdd31946a10e361972f1b6048d4ce626d19512e37ef9"},
- {file = "cmeel_assimp-5.2.5-12-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:6465bf9012d0416e50780846d2ff605da898e0d6fc984fe9abc6771041d4aaa0"},
- {file = "cmeel_assimp-5.2.5-3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e48880c4f9f7df7ccb9c856b0d0cd0fa7541d24df9a1ea83af92a20802490a56"},
- {file = "cmeel_assimp-5.2.5-3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07c8b73950baa9568d4792bfc2b7c100e492c72f54e548299973f1d6ab730734"},
- {file = "cmeel_assimp-5.2.5-3-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:aa214ebf6539f70ce448ec2a222768dd61152a47db4f33d6c50eed2ce3a47271"},
- {file = "cmeel_assimp-5.2.5-3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61900a23f77a1dcad5684c012b1b6da3cf2dab42f467a792dbc5c200976b052e"},
- {file = "cmeel_assimp-5.2.5-3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:112b4cea71fc7444b750d265edd4b14821cc0ce97ce72e2664b02d42cddd63a1"},
- {file = "cmeel_assimp-5.2.5-3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6aa402c81457a941c3f9d83e286e659a0112a796358340ba6389ea161cf023b0"},
- {file = "cmeel_assimp-5.2.5-3-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:06319ebd2ebe00fdbe5d137f80e818800f2e17217d60578303fcd92450f2f590"},
- {file = "cmeel_assimp-5.2.5-3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ac558847b3f71063604da719351330ede1b5144336671f911d8414438b07ebd9"},
- {file = "cmeel_assimp-5.2.5-3-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:264d3d3a18d40b8269ab955d77fa903ed376e35f8003f1b38e97a20c203bf889"},
- {file = "cmeel_assimp-5.2.5-3-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:5fd949b25eeb5aaf371c3204202313da465acf9d191e4b17b966f85931facd45"},
- {file = "cmeel_assimp-5.2.5-3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2ae7fc7820b86ff4eab45511e22f030ac3900216bd6d8892a88d4352baf5a3b2"},
- {file = "cmeel_assimp-5.2.5-3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a7d366d0bbd0151072b7ceb24aca672891a2b7f787c09a02bafedd6a61b84880"},
- {file = "cmeel_assimp-5.2.5-3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:77aa0d712bd2063fe086e900438f480b28bc5b8844f65712153a6c9c6f01dc66"},
- {file = "cmeel_assimp-5.2.5-3-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:207451aaec46223767ec4fac418e762d47c1e1076a56bd1fc7626afedc33f561"},
- {file = "cmeel_assimp-5.2.5-3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3248085586f074d48fcc2a26794a6ab9c398c524f26685635aff073aae601d6f"},
- {file = "cmeel_assimp-5.2.5-3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aa74eac5ae39bf2e725b79f645f6fe079685e454bf4d539cc9612f1553ccd742"},
- {file = "cmeel_assimp-5.2.5-3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:95f31286b11c8a1777e2175444996651913f6c1f569762279c0108c3aced1f7a"},
- {file = "cmeel_assimp-5.2.5-3-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:31e562af95be0770033aeecc0de9de7d89fe7a1fdd946ac45294ce54ab9ffe67"},
- {file = "cmeel_assimp-5.2.5-3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:819c0c0bad7730e6e700233cf8017cdaccb830a77966e7dab270e6e1e8f05830"},
- {file = "cmeel_assimp-5.2.5-3-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:3f0ad441fd032d3c8f9b8132d05f52cdb8df2deb56664f9927d1f5e5de934552"},
- {file = "cmeel_assimp-5.2.5-3-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:9f8ffd8324433bb59c761f6eb09c13258ca25fa4280cc7258633f4938143d398"},
- {file = "cmeel_assimp-5.2.5-3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8e693eb1f46ebc5736a80cf98cbd78ce233c6fccec83da9d15798f8219d461aa"},
- {file = "cmeel_assimp-5.2.5-3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:4e9d3ca207c771c1390bd6db5aea050540162d491800f9ed680040648e14dbd9"},
- {file = "cmeel_assimp-5.2.5-3-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:ef8be798b8b34b84444ef2abf586e8b0dafa695064a2d34e1217f51b8ee9ca4b"},
- {file = "cmeel_assimp-5.2.5-3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:82945169cb81532ce6dcff28915c1faeb524ef85c3438331149216c28816c118"},
- {file = "cmeel_assimp-5.2.5-3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a4247413ad4716f53101f799e43772291a850329e59e651e1322059e7b8259a9"},
- {file = "cmeel_assimp-5.2.5-3-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:13f68956ffc0c34798ff5ed063414932af10132ef6e98235a27e487fef817cdd"},
- {file = "cmeel_assimp-5.2.5-4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9fe05e5018c745fbe8bf734f3a782f318a463c8add4467a5d6656423efecd243"},
- {file = "cmeel_assimp-5.2.5-4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c47843e9897c600e2ac9e83fda63b565f65829034764a93044f6145f993c4912"},
- {file = "cmeel_assimp-5.2.5-4-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:3b6b51c209e4dc9e8ac466c0d1f704ee3f2a4fda4c3900da276098aa695bd13a"},
- {file = "cmeel_assimp-5.2.5-4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:83083a59bebd7f892fb090fa9fa467741539614d0a9d16fe79687f6331d010ba"},
- {file = "cmeel_assimp-5.2.5-4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a4d7c6a11eb59719ac9bfbbbcd61fbfec89cd03b9d41324f0c92c48e16031fa"},
- {file = "cmeel_assimp-5.2.5-4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:29e07e904a4d8393a41a5b993b3c3701199021debb3207203ca5613510de543a"},
- {file = "cmeel_assimp-5.2.5-4-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:86356e63a2f18a23686df1c1e3bd1cbfa876ef7057abaa93ccc1da3f31961265"},
- {file = "cmeel_assimp-5.2.5-4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74545bf6fcace82ab91ce39885e0435005b0964d1ad806541ad70c34a5084992"},
- {file = "cmeel_assimp-5.2.5-4-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1f63d63208dac71bdd9f9f8f151446fb35cb80a6d5cd0ffe64f313a7a875cfbc"},
- {file = "cmeel_assimp-5.2.5-4-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:fa2ea6a2e939f52f9807e94577ac3344d5911bbef2f56f5c21fc398ffcd77311"},
- {file = "cmeel_assimp-5.2.5-4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:74ff6d134e403c334dbb9c8ae1c66deaaec67f0e928bad16e092f48d6d108b00"},
- {file = "cmeel_assimp-5.2.5-4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:04ddc379b7491463c895afffb44d3b43829ce923bee46047cb2d35aa5c4dc925"},
- {file = "cmeel_assimp-5.2.5-4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cb4350c77d7cfbc246938593ef48ad7f29fd13b3a41f38207ae6d4b6bda2c043"},
- {file = "cmeel_assimp-5.2.5-4-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:432a9e5c47099496bfa53180032cfe3587d646f48d223da2d999c95a9e31b72b"},
- {file = "cmeel_assimp-5.2.5-4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:340be386a071bd56333eed1e25fcba1908154b39d1daeccd0d8f09dd0f385d95"},
- {file = "cmeel_assimp-5.2.5-4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2ada292f994be2264d03b6dbc1ed897ca580524d28a91e2bdc015edd2cdf4a85"},
- {file = "cmeel_assimp-5.2.5-4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:88159e103e414e08b79a09315cdec122eeea14fb1b6da75f9ecb72a65752e9e8"},
- {file = "cmeel_assimp-5.2.5-4-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:d68fe473d2bb54a0150f5a70ec63c87d622855cdc0a21fa90f4761db7600eb9b"},
- {file = "cmeel_assimp-5.2.5-4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ffc540b9aa8a0b127e8c7c829d321ef4eab134c01fe9565429a6c3eaabfcc637"},
- {file = "cmeel_assimp-5.2.5-4-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:5a84d3f90aebc9ffabe2d3aa3b809f5a148b830a80e371bb1fe6df6800162888"},
- {file = "cmeel_assimp-5.2.5-4-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:a3d04d74b0c4043a7e10ea0098fe5d2ca9570e72a7c2d859acdda691c2405a05"},
- {file = "cmeel_assimp-5.2.5-4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d2f3476f7329279133ba7d3a1fe5ff4846f5624cae69a2ffb2388e393d0adfab"},
- {file = "cmeel_assimp-5.2.5-4-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:a77a2717ac0bc49bc8eed4732e7b080da41abfef301a0521faeb19cfe4b8fc24"},
- {file = "cmeel_assimp-5.2.5-4-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:4e4db7bb9fa9a041a5c8a67af2b70ea3494f5f1940c48c01605de06ad6470cb7"},
- {file = "cmeel_assimp-5.2.5-4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6954c2df01b70b550c631b8dfc482e12887a2653554830d0cad0ecf3f311b99e"},
- {file = "cmeel_assimp-5.2.5-4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2ddc3ec2ca3f9d3182a9346ab7e3f84e9f31c7ad1b27a8a36a754516509369bc"},
- {file = "cmeel_assimp-5.2.5-4-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:824e92560d63ce3ebf4401d9359b0890e02191d1bc5f1d554be3defd61e8ad4f"},
- {file = "cmeel_assimp-5.2.5-5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e4a1ba07b5984890c347d246d2fdb24233372da6a80a07cf86656be73207bec4"},
- {file = "cmeel_assimp-5.2.5-6-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:d8c1e13d43b961a198127bd507b221e6ece24dbfa129599790ffccd1c769cf0e"},
- {file = "cmeel_assimp-5.2.5-6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:0458bef842351c66c09c7d669f01dce3b0fc912582ac9086a7409feef27fdd20"},
- {file = "cmeel_assimp-5.2.5-6-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:b5612b895a9bbee9e8fc631517448d29c2e106440ab1fbddd768abb0d8281009"},
- {file = "cmeel_assimp-5.2.5-6-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:e6ad7553128385b887409d6147189573c7f9756952dae7058324d225e85069e7"},
- {file = "cmeel_assimp-5.2.5-7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:0397f3cd6835d5a37cdb02566688223e84b813d1819ab92f4ab31de83f11c5ba"},
- {file = "cmeel_assimp-5.2.5-8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:24deafb30c1d7c9c1822708103a9b1193d7f7e5f2efded06739822688e182bd9"},
- {file = "cmeel_assimp-5.2.5-9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3c382e5bf7b942266a12cce4a81b8e23a987710e95f4610df1d43f7911b28449"},
+ {file = "cmeel_assimp-5.2.5.1-0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:e670147aaf496b2a3c7df77085e8447a5248e5315e753c58779306e77de0339e"},
+ {file = "cmeel_assimp-5.2.5.1-0-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:2ec936fd91cc7ecec6b709a01dc99f3f8e480ca51a412605d8757c71a4bb766e"},
+ {file = "cmeel_assimp-5.2.5.1-0-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:e356f9358d72869db3be75c1177272f30da9cb82910f05ba8d9ead4c41d95756"},
+ {file = "cmeel_assimp-5.2.5.1-0-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:2b9eaf019170fe35196eff5db7ac17ab8a7b2812307ed607e91c2788a41ccffe"},
+ {file = "cmeel_assimp-5.2.5.1-0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:29541e02d7b7275db4504e60676015ef0a5dd5f27f379758ad3f993c56916d7f"},
+ {file = "cmeel_assimp-5.2.5.1-1-py3-none-macosx_12_0_arm64.whl", hash = "sha256:dbb8abd89c7701dcd0d29470f76d35bd3d33d86aad0f35f47910d8a4aad00fff"},
+ {file = "cmeel_assimp-5.2.5.1-1-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:98e597ed23e56992148c86e1dd7453cde44d0a2ae2bbfbad8deb41ffa005f031"},
+ {file = "cmeel_assimp-5.2.5.1-1-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:44eea04b6c0a87fcd8c00fdf77ab0baed1e35652f6a8c814b3017b79b0c5738e"},
+ {file = "cmeel_assimp-5.2.5.1-1-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:6dd9db07ff2729af1166e4fc78745719363e8fa7949f2682664f3b29a6e874f9"},
+ {file = "cmeel_assimp-5.2.5.1-1-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:1ece9d59c63ecc765892e68efacd907ac660a95c7fbb62599556ac22e0bbcba8"},
]
[package.dependencies]
@@ -367,261 +430,96 @@ cmeel = "*"
[[package]]
name = "cmeel-boost"
-version = "1.81.0"
+version = "1.82.0"
description = "cmeel distribution for boost, which provides free peer-reviewed portable C++ source libraries."
optional = false
-python-versions = ">= 3.7"
-files = [
- {file = "cmeel_boost-1.81.0-10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:69b38effb61069b47ac796925dade5072c6d560fdce54fab03d45a46445540bd"},
- {file = "cmeel_boost-1.81.0-10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8c59fb1a5ab97fc5bafafcff057b498151e9896487d896eaf06c3160d7a9b63"},
- {file = "cmeel_boost-1.81.0-10-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:3088a0f8eee3743749e99ea922923608e1255da18ca35d47d291a51f1de4f7b2"},
- {file = "cmeel_boost-1.81.0-10-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:84eb59ae337d5bb8be66c1e17e0e62ecbd5a39c46fb46873b16c872e79248c39"},
- {file = "cmeel_boost-1.81.0-10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bfdedb85e54c260feb7815476d8cd079c93565f94b3e4b9fa388da888d5aff61"},
- {file = "cmeel_boost-1.81.0-10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02238c9258c3a3b475d4941bca3fc7aec879821f5f5066c7aac62476e0030e1d"},
- {file = "cmeel_boost-1.81.0-10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4aa99465f402841c9f90f755478c0066470f72823b6ca8d235caae724c6abdc4"},
- {file = "cmeel_boost-1.81.0-10-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:fcd458f89a3c1e41f5e4c4aac04a8abd381e7b0c4a08b8e53383d9a87cd3cfde"},
- {file = "cmeel_boost-1.81.0-10-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:f7d9d69f0db3199320a8e2d21ec7c37866cee903c7017d4f7c00ad25cbc00f57"},
- {file = "cmeel_boost-1.81.0-10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:847457ce8ba3fbacd26e4bcec52804c8a911fe8a909cd180d4e7b349af6758ed"},
- {file = "cmeel_boost-1.81.0-10-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:92ac138c14ccfe90364e62de451973045179b2038d70743b01aabb8c3271688b"},
- {file = "cmeel_boost-1.81.0-10-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:dad0163d3905a7bf1440a701bcb67ccb3f7ff5a2d9683c1402aead4b27f34899"},
- {file = "cmeel_boost-1.81.0-10-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:6ad44f738c3c292ff4b31729dba39335f3dbdc696784df9229c51ba9b9a7bf77"},
- {file = "cmeel_boost-1.81.0-10-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:26c4e91e5e6347d3a92ed8871a26a319fe325382d48a0d8819da1b1988363180"},
- {file = "cmeel_boost-1.81.0-10-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:02a6cb1ec41a4e946ee2f3ce76cae6a54d5a61cd4dca84219c52aed3298e70e3"},
- {file = "cmeel_boost-1.81.0-10-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5ff4e4a9d033698c06c8cfd7da40475f11880004004266fda854652acd708f48"},
- {file = "cmeel_boost-1.81.0-10-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:18fac9a9c87242c9b7ff571fa7646b3ce87fa6ea62c4b6973820921c1f18e8a2"},
- {file = "cmeel_boost-1.81.0-10-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:f10381cd6da435301b69ff979dd6dfdbaec33dbaf86478845308bb74db70caf5"},
- {file = "cmeel_boost-1.81.0-10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:de218eb735dc70e52d45d701a5aa0899c5a1d59a5f3ef9067b1f183dc3550d29"},
- {file = "cmeel_boost-1.81.0-10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d1dbeefca41737e1c922d27f9850b2a186ecd31bca688e8a487982da39ff356"},
- {file = "cmeel_boost-1.81.0-10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:64684f8c0cc74ff1c953b17a088615e15f9a64156787b1fde91e4e70e82b591f"},
- {file = "cmeel_boost-1.81.0-10-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:86ee54c0f82783f0718d7ef8d1a7ecc26ef68d4e18328124a4c0d488abd93975"},
- {file = "cmeel_boost-1.81.0-10-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:32b4e7927f0987e30682d3de2461b5310b236e4aab2942f82e5826e7c47611ee"},
- {file = "cmeel_boost-1.81.0-10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7543f4e47906f5af51717d2d0301fd9858c61de6da28b3f4d619d1ef562db565"},
- {file = "cmeel_boost-1.81.0-10-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:72f03b397d8f2b8b3152d16740532ef896d77594e35f3b8e9cfa742d9d19a75d"},
- {file = "cmeel_boost-1.81.0-10-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:e6ba54c38aae961271e58923d3f229035112095077eb30bdcb5abe7ebed5350a"},
- {file = "cmeel_boost-1.81.0-10-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:07313788f4abaad0c0be6015733c5519147acba123d466ba78c4d41fd6983d26"},
- {file = "cmeel_boost-1.81.0-10-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:6309646c4355cade3f2f3e7697f32d206377ccf68b19ca31d5f4cc4df11a5baf"},
- {file = "cmeel_boost-1.81.0-10-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:c2685dfbdedb34e3bd2a7046503e5fbc287d5f27c1ef31ebb9e3b5fee457e7c2"},
- {file = "cmeel_boost-1.81.0-10-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a957c6efc15afb8dda5b5d462621aa29f04d87d9ba388d497df6cafab6884407"},
- {file = "cmeel_boost-1.81.0-10-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a5b4563e02ed2c4dccdfc01a3394a3caf8cddc38d1047ad8d84735303aeef502"},
- {file = "cmeel_boost-1.81.0-10-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:b708738493cb8d871307eda7b3aa5afb64035a4b7582fe690104ee59bdca39cb"},
- {file = "cmeel_boost-1.81.0-11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a965ab7a1e3a90e8f354c2882e1bdd87bf99f689213182d9b8e108d24c37b5b7"},
- {file = "cmeel_boost-1.81.0-11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7cf91c64c72a63c72e9f708ca04ba773bac89176d2449b26f04ae24a93a213c0"},
- {file = "cmeel_boost-1.81.0-11-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:992ba0179f433c66873ff41578dabd506c196f1d1e7cf05e935f518188cf22cf"},
- {file = "cmeel_boost-1.81.0-11-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:294b73ba36603eab35101cb017dfec33b8cceb181e9e841276f6016fd5f390d8"},
- {file = "cmeel_boost-1.81.0-11-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdd96efaef328637d21b9c87a3d0f67bfc5a87384a19662efa345d77595bef40"},
- {file = "cmeel_boost-1.81.0-11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0bf4bc412836dfe0ffdbab0a40188f743675b34d6b6bed25d090e15feec87b1d"},
- {file = "cmeel_boost-1.81.0-11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d30748a2e240170b419fc68015a314d3aa7d10a94fd2cb1e10f16134b488d9f8"},
- {file = "cmeel_boost-1.81.0-11-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:494a636a402480dcac64c71930b6f9faa91c24b6b0690a2df43f36e93972a195"},
- {file = "cmeel_boost-1.81.0-11-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:4ac476d41b46335580ff400743402d6fb6234c982695082674e8e3908e8d475b"},
- {file = "cmeel_boost-1.81.0-11-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bbaffca8c8cb077daed421caf802e36fb89f4ce1276fa842a895cffeabd841e9"},
- {file = "cmeel_boost-1.81.0-11-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:a6ad899186afd3f90c79a1c46341e8d4ab08e4acd91dc222dd787065f853c9ee"},
- {file = "cmeel_boost-1.81.0-11-cp37-cp37m-macosx_12_0_x86_64.whl", hash = "sha256:af8a9da6388e512d7b151786eebf4f50fa69a7f8c24e0d1e7801369099743f52"},
- {file = "cmeel_boost-1.81.0-11-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:a5816d0a9a527f236773b950f57043ee700c1bc879df659305f586d53447dd54"},
- {file = "cmeel_boost-1.81.0-11-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:60efb0b96ee9b450530f939275119aad44a13b795b4de8dcd1e7d8a87e54d203"},
- {file = "cmeel_boost-1.81.0-11-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:940e0845f311724a3500c5c2614e542216d6ac798dde23d74ee65928117ff494"},
- {file = "cmeel_boost-1.81.0-11-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:133196c5170b5a36a619cfcbc995ba68d7281630b4552bc677ea442316c028eb"},
- {file = "cmeel_boost-1.81.0-11-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e53fc6c8dd4b42ca01ccfa95218a2bada931c93378464e28a6959a90b71774be"},
- {file = "cmeel_boost-1.81.0-11-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:8c07cfc9f13309ac7682d301e09e39f66e73e795d721508239d1aeb3c7ea8bdc"},
- {file = "cmeel_boost-1.81.0-11-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:fb551a1eea6dea350201682e45991d4ef9a9a4ec61c406a5376e20bf4482f36b"},
- {file = "cmeel_boost-1.81.0-11-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:29c6d0a93f24274472a570be744643ba55a2b30a067421e76551a0b7be6fc95c"},
- {file = "cmeel_boost-1.81.0-11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3739705649fb73bed7d026e515c13ef798b6829687b59317559a68f0fc17151c"},
- {file = "cmeel_boost-1.81.0-11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0a39edf942aaea1203ac76c1f4dde051b0d22946b6801e3ca4bd90c606eb014c"},
- {file = "cmeel_boost-1.81.0-11-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:90fab2da3cd92dd6da1413ccdd2c3465141752b5d79daab65dda7c492a8580f9"},
- {file = "cmeel_boost-1.81.0-11-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:66e9e982b3a18674d973f62657d6bc9d3b66293f266eb3fd8661b0cda655c4ca"},
- {file = "cmeel_boost-1.81.0-11-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4eecb46ddfd3c96edafd24ec105a088ebb135f23a4b4d6af90abad55914606fc"},
- {file = "cmeel_boost-1.81.0-11-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:aadda27978562fa1e303023e8ee9e3feb11b2040715611a76a8d65a17fee89a5"},
- {file = "cmeel_boost-1.81.0-11-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:fb8bd6c1daa5c4dc0b4dde5d5647f85f252aad3e1719443f98ac4feb016d8cf6"},
- {file = "cmeel_boost-1.81.0-11-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:199b4058366a413028525478b7fd60af3aae3d9455b4e27ce6b0905c5caf74d1"},
- {file = "cmeel_boost-1.81.0-11-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:c802baed94a3174217ca6c653a9a83c780f2df1311cc02b1c25f9034b13b1d43"},
- {file = "cmeel_boost-1.81.0-11-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:7c5ecce62227962d5f564bc49e0e284391e2ad11e87696bf8cf6b12cc9c6cde3"},
- {file = "cmeel_boost-1.81.0-11-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:396c3f98fdc570a2a3592e9162a674b18e07157c22c297b8eb7f0ebf7227b9af"},
- {file = "cmeel_boost-1.81.0-11-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2d36ea1cb5ea2a52a5b39808fa458cf64fe7441ca92adbf3e8ef94fe94f5b549"},
- {file = "cmeel_boost-1.81.0-11-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:3f15003d0aceb77ca6b59148a17d5f1f220251ed2d72e4fcb73b614664a3aa43"},
- {file = "cmeel_boost-1.81.0-12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:32692d2e7df218e279b3b6e35efbe67770e9f969853912d1d39e4797d2209321"},
- {file = "cmeel_boost-1.81.0-12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f83f82bc1019347eb9aaeed350de7686c2065dd8015dcf2e13da9d4e0163392b"},
- {file = "cmeel_boost-1.81.0-12-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:c04cc597262a9dbc5e1201eb74c86b4d0df0b1ae38e29a9dcdce4ca528cf4395"},
- {file = "cmeel_boost-1.81.0-12-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:996b731ac897ccdf7bd88de3b29e4e27e689f0f8c3a9b255ffa82150ac1df11b"},
- {file = "cmeel_boost-1.81.0-12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6ad72df1b311981b8748f7cfbcb0ea8415e343496fe5accccc5f8f4dddfb1778"},
- {file = "cmeel_boost-1.81.0-12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:67a60e6e767e9b2bc0defb1f805df28c5f658327443493cfa504e2987d42724e"},
- {file = "cmeel_boost-1.81.0-12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7dc3547cb2f8d669de19c05c59369f9ca62673d6aa0dd7239ac1e38f30fa8e0f"},
- {file = "cmeel_boost-1.81.0-12-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:67755fba009a7f0e46657150118cc8025f93fa9dfaa061a56cafea728b42d934"},
- {file = "cmeel_boost-1.81.0-12-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:07f62b0e28a0a5f11ad7913dc7cc591842dfa5c7f67e18100632de45df46200a"},
- {file = "cmeel_boost-1.81.0-12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb105154e2b203433cd932be4431281ff8d11d01b0c7bc1eb4acad7952206eba"},
- {file = "cmeel_boost-1.81.0-12-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:403defee93a6ac2769e3cd2f242b232842fe3d0abe26df52bb673617014f28cb"},
- {file = "cmeel_boost-1.81.0-12-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:63e61832523aac18632e05ec00bb6878c5a25288b6543cbcfc0fbcfebbee86b5"},
- {file = "cmeel_boost-1.81.0-12-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:17fa3c976911840dba7ac9a2e85b744a54a94d84a421bbb763d180c050e181b4"},
- {file = "cmeel_boost-1.81.0-12-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:60ce8114203e351e980d19090abdaeb1796730219710d548caf61535263211c3"},
- {file = "cmeel_boost-1.81.0-12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c6fa631ae2bb7a4b5e81e09561e47e5cbed9b73fd362b850d6e4778db752297a"},
- {file = "cmeel_boost-1.81.0-12-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ce0e75ccec4d5c6c1bf7198302511c0392c517b7f1ea05e6c09c469f069dc98e"},
- {file = "cmeel_boost-1.81.0-12-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:73c51282066a2f794af626d2fb90f9803a3d2b257947ac70009fabfceb3c3590"},
- {file = "cmeel_boost-1.81.0-12-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:65751f57f5892f48ff47e2ac28e32ab1aa428afb89388eddd0df2de679ce021f"},
- {file = "cmeel_boost-1.81.0-12-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c83c6ae6ee3eb1192ecd3fe500f055774479f10f77071d143a7e0d800fa0ad6d"},
- {file = "cmeel_boost-1.81.0-12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0485300067a99202a460035d84cf82c75495e5b59f1ef86eea47016d636b90be"},
- {file = "cmeel_boost-1.81.0-12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:80dbc9f8ee3b4a49c4da3e3138230a1192aa649cb445254860993a115b327a3c"},
- {file = "cmeel_boost-1.81.0-12-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:05ef5d6c537d9d85ee3c22d7e95ebbc99a2f791f81eab0366785a297f5760c05"},
- {file = "cmeel_boost-1.81.0-12-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:7586960cb43517d0c35615614e46682b228ea9c5d6ccf54a5d59350d1735caeb"},
- {file = "cmeel_boost-1.81.0-12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4ba8151bab9ab1e9f6177fde04decb1c5d9ca2b06e55dae6e1671a2eb5a4df98"},
- {file = "cmeel_boost-1.81.0-12-pp37-pypy37_pp73-macosx_10_15_x86_64.whl", hash = "sha256:4dbf30acfcc2875b11dc1b7a96e845e2bf81d302cb40cf2e3d1b61c1537d204c"},
- {file = "cmeel_boost-1.81.0-12-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:0b8b629ef8dddf624f72fcee8f0e22a1d26cc481b9a6e5faa84ad340ec5ed45d"},
- {file = "cmeel_boost-1.81.0-12-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e860a3198d05dc693d81398d690206cb220f304e6cf2c22f4f1851f4200c5fde"},
- {file = "cmeel_boost-1.81.0-12-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:c8936e320e3de023bbd48b4b0797c7e020558e8ae971eba97cf1b5f533619591"},
- {file = "cmeel_boost-1.81.0-12-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:eac9940add0e1db2e9ca835a236390449cd4b29b6d233e106e1caff521c627bf"},
- {file = "cmeel_boost-1.81.0-12-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bb24652090f5d1598d2d4d10af1a4443b3bdf230f5580437b5f965d046e45400"},
- {file = "cmeel_boost-1.81.0-12-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:aa64dde45657571afb6c2fa13973f8d8a5645f66faf87e90141c83cf212775fd"},
- {file = "cmeel_boost-1.81.0-12-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:99a2ef4b470af46e27cbf7582233edf2af05856b36e941a5a59b6c279118555f"},
- {file = "cmeel_boost-1.81.0-8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6f8b1aebfd6e9e3059a515b3696e947bc817e2b15533789e8308a594c0a90c6e"},
- {file = "cmeel_boost-1.81.0-8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41cfb2641c8e55ee6a6cb95f5a7400eb6d34205ad5250c1725acf8ef34f4ac83"},
- {file = "cmeel_boost-1.81.0-8-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:93916c4397add37cc5f30db8c0227a490e6e86f69a63f6e0e633f89467e466df"},
- {file = "cmeel_boost-1.81.0-8-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8f9ab262a65741d25aa3975c3675077da2810da4f283b9a0d735c8c3b6907cbc"},
- {file = "cmeel_boost-1.81.0-8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a898c7cc2f2b90f55911db8f926a8a00627100e54316cf1e336da027342eee5f"},
- {file = "cmeel_boost-1.81.0-8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:36c53ae1be39627df3005cc119dcb1805e52e239f9210d697bc500b887e7e1d8"},
- {file = "cmeel_boost-1.81.0-8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0e3a5b2bf3754269db4b607b8cc31386cf067515d9e8524418d1bb2a754dc4a9"},
- {file = "cmeel_boost-1.81.0-8-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:dfb51539466d85d2b1d1fb081da14698b44538f15ff94d9f65bbb3cd017e48fb"},
- {file = "cmeel_boost-1.81.0-8-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:0bc262c08794dc36850e89eefd1d71defda989860c712f33d796b0688afabdfc"},
- {file = "cmeel_boost-1.81.0-8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:017df5f18bb01de5e6021632d80886d296211f97a4004fdf6524a2ec212003c3"},
- {file = "cmeel_boost-1.81.0-8-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:fe47a3490265587d57bfffa5dc51a61f15af8a6c2b4debffaf9124af5b671b4d"},
- {file = "cmeel_boost-1.81.0-8-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:16be86569cc651345142702961480c29b11fad54b8f18d8dc36249b37f7f7b35"},
- {file = "cmeel_boost-1.81.0-8-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:bbd07ccd7a6c2ee9aa89c952170c86a46de747f85a5e0b348f26e6bd6edfc575"},
- {file = "cmeel_boost-1.81.0-8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:dbbb24fe333ea235860c6ae73573000961c1c6cbacff406c4482e009e87c0d77"},
- {file = "cmeel_boost-1.81.0-8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:267d95496ed34b9bf0aeba9bfcf2c5afa3c719b30281cb198fee26eacb63841f"},
- {file = "cmeel_boost-1.81.0-8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ea1a9096632d7625059b8428518cffed0be03295094ced5dc907a26253deb1b7"},
- {file = "cmeel_boost-1.81.0-8-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:44157c39a3e6cf8e2cdd5a98c5bf0bc6264ecd2aae8b6169e284f63dee7136fc"},
- {file = "cmeel_boost-1.81.0-8-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:297c03e06718765cbec13e294e840fb6a7c5847788427cc066d7161e1cd5123b"},
- {file = "cmeel_boost-1.81.0-8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5658556cd485beda429692ab4ec24c218fffd7150019e51b38efa1122995ec74"},
- {file = "cmeel_boost-1.81.0-8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6bedd097c3a08439f2d22cb87679b4cd1b1e0ff106895e63a46ea038bc2aa0b4"},
- {file = "cmeel_boost-1.81.0-8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8942c2c355addcd228e1ee5292d4814573849d70547fa375efce8395e6176a2d"},
- {file = "cmeel_boost-1.81.0-8-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:1567513705b5fb572a60a9c18f23ea35e79af42fb7f7ece28e8f14c93bc94205"},
- {file = "cmeel_boost-1.81.0-8-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:23dfa9b720896c7ea39dc724a3ef9bf17818a497fbf9bc863b15db5b6cdc91f0"},
- {file = "cmeel_boost-1.81.0-8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c49bf7fe2ed5a389cee6454c934e155d28b5feac8d9ebe18b545741e107b41ef"},
- {file = "cmeel_boost-1.81.0-8-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:a05b9793f13ee0c2f68f9da91cc0e9b3a40dbab2bec4134bd9a8463944b7b27a"},
- {file = "cmeel_boost-1.81.0-8-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:06345540702978ee3d68ab9bddfc4fd14e28c4a9184d6f1cca087ad8f8907cec"},
- {file = "cmeel_boost-1.81.0-8-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c410b15cf608567bddb4d9d213b57f67da2e0b35c02ac340bb7c17b4c4b53c83"},
- {file = "cmeel_boost-1.81.0-8-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:e3c083e36b60f4c9290e33a31d3c5e82f6b74f88f93ce925c8d44b2d204feede"},
- {file = "cmeel_boost-1.81.0-8-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:dba00d6b19f1ab142b86a453029155de7b086e9fc6d542c9ae4bb55938ee2ec2"},
- {file = "cmeel_boost-1.81.0-8-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a8e97c2a0f0b33e63cd24ba3407e2da2eb802afbb1b6ce8bbba694908b79fb58"},
- {file = "cmeel_boost-1.81.0-8-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:f189098c8906f5ec6d7ab44f7283c9637833f258680aebe3d616b8196fa3404c"},
- {file = "cmeel_boost-1.81.0-8-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:d3cbd8c8fad7a61b830f187d26387e1583e2783ec927c98b2688740def97712f"},
- {file = "cmeel_boost-1.81.0-9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b88b70d493986e9cd29c9e737e388188cc3c469413738d0db1636eabe6171f70"},
- {file = "cmeel_boost-1.81.0-9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b060ba3a764043fe4980eb6693e298037f950b52cc3df9e7471cae024e839e71"},
- {file = "cmeel_boost-1.81.0-9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:be231131460e50d947705dad1f6c4f3771f9926a3998107f719bc284089f4dae"},
- {file = "cmeel_boost-1.81.0-9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9f8de8d6a70d9a1e4a7b25edcd540f9c517611e91d7a1229a286ebea0e77abbf"},
- {file = "cmeel_boost-1.81.0-9-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:6c492206a9488176eeab443147db33e3db7cf2778ed043044cef5fba707c711e"},
- {file = "cmeel_boost-1.81.0-9-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e9f034f170f8514f84eaed8f1f00bc362b977d9ea1994fd6312ec3db6ee02ec0"},
+python-versions = ">= 3.8"
+files = [
+ {file = "cmeel_boost-1.82.0-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:921d803a76d566ad1f58e2cd880c4bfa7409e1fb83ab11a5eea92cb859c705ed"},
+ {file = "cmeel_boost-1.82.0-0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d166c9ced8b828f1d17829e320616ca5097f9461316fcffeb123ccb453bc90cf"},
+ {file = "cmeel_boost-1.82.0-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:a785a017f7d44844fda1e2df81be28ca216c8ef85074c33078c535a1eb590037"},
+ {file = "cmeel_boost-1.82.0-0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:7798c16a372e834eb08dffcb849c439eed9349ee174e3eeb26da7fc1bf269d2a"},
+ {file = "cmeel_boost-1.82.0-0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7e3c0a79c06af17e10922ae5f4e27e4c59c0053cec131cababa5bfb2562d992c"},
+ {file = "cmeel_boost-1.82.0-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:95babd3030550fb23e89dcac190723773cd85d5edf1e1967d2041cbfe64bd620"},
+ {file = "cmeel_boost-1.82.0-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:abf30a02bcb0edc1fd1da72aa42e82b54e04a79ee9be424df8f935d0b99393f6"},
+ {file = "cmeel_boost-1.82.0-0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:52fcbd60cfeca6dee13c0492e833762099035fdeeb3c762829dffd36b8aebd47"},
+ {file = "cmeel_boost-1.82.0-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2749ab5631fc0206b39418c8f182dabc4e87c7e15a385514e1d43f1a030e3bc1"},
+ {file = "cmeel_boost-1.82.0-0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:73a5cdafc7c87774591b2d0cd4451b5f08c67f8f7056ae14079eed78ed66d83e"},
+ {file = "cmeel_boost-1.82.0-0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f4a96968918b9fb6d2c2b3da93a6cc303e84bb40e91d199ee1c6514c1536c4f8"},
+ {file = "cmeel_boost-1.82.0-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:42d5ae1c6416e3e35385a4a799a5383cd3499e25fc21f10be7454d677f85e08f"},
+ {file = "cmeel_boost-1.82.0-0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:303dd0bdd3bddb607c2799219536dae49abb89bb1240441a565bd6947772420c"},
+ {file = "cmeel_boost-1.82.0-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:8e46baaa0043f88ad160e904d0db33673dd2f4bca02d7b87f637b012a62ca63c"},
+ {file = "cmeel_boost-1.82.0-0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:51e9b039c671e91cf4354e1daa5e5961ba4a704ba13fbc4e3ff13717b9bb38bf"},
+ {file = "cmeel_boost-1.82.0-0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:42cfefac87b2ee31dc997c90159c05fdb280fa2925c715c688db7e4f122aa4dd"},
+ {file = "cmeel_boost-1.82.0-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:18a5152b2499aa4198402cfcd6d4cc50f5da7ca8bb832588bac5c179e647411c"},
+ {file = "cmeel_boost-1.82.0-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:35fa2ab6d06e1f00015f045f0a4a8f90eddd1a837db64c0e58117bb846ce1db6"},
+ {file = "cmeel_boost-1.82.0-0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bb1311ff2ba534db162ca16cd824ebf3ff747f3eb93f2592b82a765ea434b260"},
+ {file = "cmeel_boost-1.82.0-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:dfdcdbaeb6441227b83b4a72e5ee861dd28e774bb4344d01d9c9305ba2a68bd0"},
+ {file = "cmeel_boost-1.82.0-0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:389d7249f35e9000326ac29483fcd0969de0b9d5175a7929b3364301f2d7f767"},
+ {file = "cmeel_boost-1.82.0-0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bf1fb125aa92a898655ba1fa96be5d1006aa201e37e302b98db1767884472e54"},
+ {file = "cmeel_boost-1.82.0-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:074086bc12c3d1a830bed6f278ea9f324716fd8fa25a1f5ec6959a2db2770d91"},
+ {file = "cmeel_boost-1.82.0-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:bb44c89cc9e70f35b39c47a8074872cb82d804eae0c1444d7d914daec128d20e"},
+ {file = "cmeel_boost-1.82.0-0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:8e7d4f49f9973b8efcc501bdd25133e0a59301685b7b65ec04ba6cb664d5fa3f"},
+ {file = "cmeel_boost-1.82.0-0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:43d4214d915cdd1981ee5ca78f30c9d740be12557582518d9a05a7f6a9037d67"},
+ {file = "cmeel_boost-1.82.0-0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5633d626b1d62a88a5e43d003716342d5a4940cd2c71f90399ed64ad8f618f3f"},
+ {file = "cmeel_boost-1.82.0-0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:17ca3fed4eec35880794e9ffd424e5836c165415f00b9c5aaa7ec52e6d889fed"},
+ {file = "cmeel_boost-1.82.0-0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fbe6fdb3c8b625259df0cd65df581ae027a44d140f06070eddac68427fdfb314"},
+ {file = "cmeel_boost-1.82.0-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:27a3e407e25c701b6e4e949b5084ebe8d065a38c3395de10bd5a9456eb57d75f"},
+ {file = "cmeel_boost-1.82.0-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:28319ae7ef398ca6e8b4c822015f97413b4ed7f730efb0651fca5bd879b5b0d4"},
+ {file = "cmeel_boost-1.82.0-1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:2baa72e3f529f9ab6a2306b35e1036f641a0ad824e3e94961de545dc3ded1265"},
+ {file = "cmeel_boost-1.82.0-1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:7b99b30369177d37bfe30a78b65524a608409fcdccf7031edcb0c5c10dc90702"},
+ {file = "cmeel_boost-1.82.0-1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:78140cfa70442351809b8e37361c3aa1f8d95abc73f80c3a7445075e697fc2d3"},
+ {file = "cmeel_boost-1.82.0-1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fffdd7275a3d4b4dd41da35573fc1fbe77e1402c235583701d5972c84a2ba8f3"},
+ {file = "cmeel_boost-1.82.0-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:312d49dccd81617ad66ad567f34b2d929d75e25ebe733804596c7de3591a05e5"},
+ {file = "cmeel_boost-1.82.0-1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81cf5a0ae2b3cf97884ff167b0ae223079446cf56dce0d9ab19fb85622e73444"},
+ {file = "cmeel_boost-1.82.0-1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:e5121b4ad203b6a5b611827cb3702d80734c6283753b572661a064cf3c092296"},
+ {file = "cmeel_boost-1.82.0-1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:75d0b0422cd8d4a86f759fe54f72b9447d3c8cbb738276ee83eea6f881503c4e"},
+ {file = "cmeel_boost-1.82.0-1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b78c44799e5044f32a1c9e480f8b241a38daac39255402e720662018c915396f"},
+ {file = "cmeel_boost-1.82.0-1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb0b515bb171f3e8c78e111c885a16b1469600472ef35cf351c02d20926649e8"},
+ {file = "cmeel_boost-1.82.0-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:829d3d4207017391ad6e43507fa3318a181df2f3163b541eccd60fcc458c8730"},
+ {file = "cmeel_boost-1.82.0-1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:96f90896ab371df223bd68532ef55b80ef03dfce741a23cf611b27690654245f"},
+ {file = "cmeel_boost-1.82.0-1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d563dc2b80f5c920dabcbbb11ebf1899d72bb41138a3c16f690bf88b675eab58"},
+ {file = "cmeel_boost-1.82.0-1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8258881aa5990ed25b031a88ef402d7f3a745d1e311120cf38e898ca7fe27755"},
+ {file = "cmeel_boost-1.82.0-1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3bd40c7faeb8f769757ce65189c873e9cd433ab6fcba2f636947b00f22485500"},
+ {file = "cmeel_boost-1.82.0-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e6db1eb1abdedb74fc8b6f6d098c2a2c2acec861851dd9dbead40eb8c7df2bec"},
+ {file = "cmeel_boost-1.82.0-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:45916b1e2795169b0c3b0ee32fb7bcb77ed340ff04d5c654ae0d7a8b222e7170"},
+ {file = "cmeel_boost-1.82.0-1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:2fb6c70fc794e050228a5e428bcd6651d058d4cc54c0dae752110a1a5c51b076"},
+ {file = "cmeel_boost-1.82.0-1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:a41eb6b0566e874dc7674a686eafcd0a12a724ba36749f8cba63694cbe39ac08"},
+ {file = "cmeel_boost-1.82.0-1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e89ae0bd727f8dcf4c205552dbe8967f11565fb9709b33906cbf25271fb3aeeb"},
+ {file = "cmeel_boost-1.82.0-1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:841bb0813457a2cce05d3e142f97b65823545af794f6e063dffa1a1f72f8957b"},
+ {file = "cmeel_boost-1.82.0-1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0595973c6b77d2c46948f76c2b1b98137c96a1e4266d5cb088ee0a70ba7a16c4"},
+ {file = "cmeel_boost-1.82.0-1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:4dfb27adc240c8858fc459898757ce4a4c1c78e3293863f0c818d42483fde9b5"},
+ {file = "cmeel_boost-1.82.0-1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e4159109810a58bf03ce98a31c79e17221d7223b076e0f236e8150cc7a60fa1a"},
+ {file = "cmeel_boost-1.82.0-1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e54640b3837e138de8ed68a10df73d2e22dd2c0416a2c8ce3636cfd794ab93ff"},
+ {file = "cmeel_boost-1.82.0-1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:eec70eb2a214a6bf85f670a932ec3702215e4400522969cb73c33b316484049a"},
+ {file = "cmeel_boost-1.82.0-1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:897241fc8f4f710ea77c5a09d968e8c3bd9494ab15e45b791822b16eebc7236b"},
+ {file = "cmeel_boost-1.82.0-1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:41d3a60ca0e30e36d1803c8a68d060238040d7cc29ab9d31255a597ba4257191"},
+ {file = "cmeel_boost-1.82.0-1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e3bdd2bcb950a29c06fa76718d92875a4826b1f2562cf7c7da43ddbe7f6fbd0a"},
+ {file = "cmeel_boost-1.82.0-1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:5cc97b5485b7c2eee99c039c6018288f5b95ff875304d8ec9d210ef2811032a7"},
]
[package.dependencies]
cmeel = "*"
-numpy = "*"
+numpy = ">=1.23,<1.25"
[[package]]
name = "cmeel-console-bridge"
-version = "1.0.2.1"
+version = "1.0.2.2"
description = "cmeel distribution for console-bridge, A ROS-independent package for logging that seamlessly pipes into rosconsole/rosout for ROS-dependent packages."
optional = false
python-versions = ">= 3.7"
files = [
- {file = "cmeel_console_bridge-1.0.2.1-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6245409bd46c0e55caa102d5e46bc56f7e7c49ddec3482994f5af139ae7db336"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:f845b4280bfba535c49f9038431f8596a2d2c3a86e47cb13da3d88d9d46fbc1d"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:98f0da7d373a358da51a2dbe8aa2cfde3b040d476e2e82905626fd3dab0f0e0a"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:799a4fb00627d0bd5bf21ed3ead65484553aeb22466837b57f58c9c88c294de7"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0d5557d7556e2a40759cc619adab20499c79085b36126d0ca5b10fa22a71407f"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:70eb2cb6a1f9ffff5d8234948a0b9cd689dcf7fad27b0fde79f17937dd0514a3"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:e7ef1b0c9fa9b00e03212f1ae540aa948e511633cf66792c5b2fe62d7cca00fb"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ce38db3bae076a8e22516f6c6864869fe1af3dbc46c65b5817c086f645514b53"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:7510af24e0ea4cb08e05238b4921440b33a547f741ad9fe142ab877bd5ee2334"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:4de1eef4d1441313396c5b86eeadfc8c55ca8d9a46c401c00de1cc5cb573e493"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:97800601767bbebb98c97ffe5ae3020c3375fd39a70d289165afb4eacfb1c530"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:37a7e70e5a3883a51d085069514f9b63867b4f991496557669968576a3be4792"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b553e8706b072c8b38e6057259a25f3ed5d5eb0041befbe99b647507de23b456"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:fcb9ec066a90a286f66a8c32f02c5f4921dc7554e0a457d74c1daff91973f7d9"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:133e4f4b587d05e89caa1a304a43728e8738925a1e89b17dc46e4bc5c4121e58"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:10ca3ccdb43fdbc5133a2e38888142d16fed4e5d4171ea88e9f952c43e30a46f"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:326ac074793d76f32364c599ab72f52790183a9a1901fb1ea2a2047af7d089a8"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:923e19d8c604544d75eab54db38c06a4de7364c0d905d680d66c5887aa48a241"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:34a4199005facbc00c3b0688391e7cd35ede0ee74f41a14b615f55353ca239f1"},
- {file = "cmeel_console_bridge-1.0.2.1-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bd8585beac2fdf208c1ccca1767481939ca945be95ae3405ed0e6ef972f47f80"},
- {file = "cmeel_console_bridge-1.0.2.1-0-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:7819adbef424fbe5979e67d2fbc956ee2710ba1e00c0dfe82c75ad7e25f35292"},
- {file = "cmeel_console_bridge-1.0.2.1-0-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:c208b761fccbbec802e55cf2c23e4c47706082087da4524cc42e0ecb9bf7f5a1"},
- {file = "cmeel_console_bridge-1.0.2.1-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7cc2dd9fa0702a3a9f5e73c23c7c2d06f49b8531d00260e75ee69cfa3de0a23b"},
- {file = "cmeel_console_bridge-1.0.2.1-0-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:0e5a94306f13227c3570022556485fd0ce210df509247d953e1ceca988029a30"},
- {file = "cmeel_console_bridge-1.0.2.1-0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a571d07bee19c1ba1d15291390492ee96a90801fa63964d68172bda99beca7d7"},
- {file = "cmeel_console_bridge-1.0.2.1-0-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:c7a1c166d127e64ec9353dccf1d3347fd315904ddfd259530febf0cf1c5b0063"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:429d31140e7fcf48bf4bfb9d4dd18a5b5790f6b5f7f66446c46a5239d8d3de0a"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:01c368cfe95ea65f38c8e5724992d3f2adb657e7f3dad0a622ceeb4e49b191b9"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:315181ad8a1c40770e79085f188f3ca95a425195628e9debf811b2bac69261e8"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c5d22b12992ab6c9faac7048a2bc1ce3e772220335a881c29a77524a72069938"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6df6f05be29306f9fb38ca43fd7c2f43aec6f656ee7abc0413284d357434971c"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:868523b8018e22baaeb41d7f5576e4a04e0f9601e2842e23f97a43c13512343e"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8236be3cfbe7c45da65b39bb7d8855f3727f279d6945ae3bf0077188885f5e77"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:a57a4df697dd43d3ce002f8960dbbbf647590ce866d2b12684d484c695861021"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:1873090644b87cd3131eb38bd30b4aa6a53eaa56cf7b01aad7e02b324a7d629c"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f024a8b4f2cff29c299215914963b00d4c017835b32be50f114a23db1c5665c0"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:0e00d1404000d7ab5e744e44e25c55e2507de3f76ebc60cfe749415f2693d8f1"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:a88a78c139d397fe1b06f93952c51be9c69c98d5f078cf337738f22baf47f8e5"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:0630aeaa01788818a4a4e26a3a290e9c0d3d25578ae537bafdd57c3596b3727e"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:29812a731d28ac4aabe75d656415682dd303bfded1f6e37ed59494bd7427114b"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:52fe9e5d351b83b47e633e14aa8a63bb17847a668dbc0c35c25fc3afb9bf6877"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:661f072c90110eeb14eb1ba0482b57f1811efec1d302b20369720bae87aca157"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:516161e2478ec95215b1a574f76a3c568a8fdbebd207e52c3d99d7d90a3ba208"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:5b4b6f5e725eb6bf5c32dee14a000c8e4a9f5917fabc675ddbeff3fcf87385bc"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c650fed2c80131f57c7958c8d4761a16262e674f325257856e82295bd4f8deb9"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e14a41466637ba2dd6167cb019becdf79fa639144b2228d65f49f69cbae9ebb"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:091034dd33396794764838b14d3632f70a329e1e3f97dc9c665887367446a9d1"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:008f74f976fbdebb2d2bd1feadcc07069417b036151f2322f107d5f1ea8f8052"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:496096abce560eed36c258772abb1b2cab81c2b3d1370a4a16680f3a632fe28a"},
- {file = "cmeel_console_bridge-1.0.2.1-1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:834ef275329ae857c6bd1413a64f94bcfbdac064aae03a0f71711c14fe842630"},
- {file = "cmeel_console_bridge-1.0.2.1-1-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:1c96a36f62ace5f3d10b7ffb45bb8aa02b90da458b92c8bc4d7641f478fe2a0f"},
- {file = "cmeel_console_bridge-1.0.2.1-1-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:b22f382c762c13ee23e57b020febe56841dee84fef980639cf5facac3cbcf10d"},
- {file = "cmeel_console_bridge-1.0.2.1-1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:29698c37fad77ad461493b280448bff420cae0448ac55692cc861c8b741c139e"},
- {file = "cmeel_console_bridge-1.0.2.1-1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:6fabc837418c5c84439cb26851d1a8041970f82217b457d81debb8abd8acc142"},
- {file = "cmeel_console_bridge-1.0.2.1-1-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:56eb982926be4f9f381b4b68767b0ffb6733617bff70a568b813b2be70548b2f"},
- {file = "cmeel_console_bridge-1.0.2.1-1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f5fcc658ca6ea3530199fc2ea84973c493ab6735820682c6b791d9b42cb65bda"},
- {file = "cmeel_console_bridge-1.0.2.1-1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:a8799981c02d8f5ab77ce29ad803c72308f406a59400674d08f7cae53ea6ba18"},
- {file = "cmeel_console_bridge-1.0.2.1-1-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:44dafd8344c167d3db4928456e494c78ae6493b17fd721a32a1aa6b05ec92290"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b02e765570c7cdd75637fdd6ecc3963af0532cbdb511ebea52b58a11b73ce1c2"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:450a363e3a9ef0f3e67f1ff4eac3ecbce9a238d63071be85169428bdd2091d56"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:df1bfdc34a8cb52f8e3406cb82e7cf261440c771a4a1fad9812aca1fc76376f3"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:2f341275abc6331668eeb1db184024363cda7374d13beb922fa4c730da8c1ed8"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f2980425e4e8f9f94a2b6c1e9cfde3db2547f06606190938f35a314ff8ae524d"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad8304ba300db10be2c78276ee0ed92c2c42e5259c8f0587bb5e5d78c5306ddb"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a7af6bb340cbb3720633d6a1f94a818164e5f0ec6d1d95090c26a3b5355a633"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:5b0cbb74cd2d267bdaffb1e29d4b6bb8489bcbd2a2e82451f843c1ea16c5e838"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:b000cf6f0dc3757ff5059fa1af5d007e8a7dca175ffe7fdd53fe4a992ec2523b"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:70f0e10d949389dd6aee66d0a3ced11216fda0fd97070a576bfae7850f0e7f8d"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:df1e72996d9e5112ba73702b03f393a6d5577ab735f5c68710d94bcd24205e5f"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:62132e26bf474ff99958a92ca1f9d3e9763797e1123c43d2df33345d120e8ef0"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:a1b133e34233d71e2441142e0fe0a38abebcc49a0d55e0168c0386387160c1dd"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:39da51d40ee4615fa6370c3e1fac4fb3880df954292544563ca5aa6ebf05852c"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be14fe10e11516d4d52246d46eb988065b6d956485214f373269ae34de43028a"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dff3dfb10dbc7d9452e965abd7700984d2cea69c6e80e5603fde2a7f6468d51d"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:2fe7ae9e1f109a39a05323456c8249ba81bbee8297e0b724882efea15bc303d6"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:4e112bb98f513f64c9c4275f83f2f3716c41707f161dbebe9e0637102ce19cfa"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0f4807bd8d233c7a0b60792455090e75a6bf62ef3365816c35d8b983ef1149e3"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7667dbb3c48711c7193a5cef3a67429419d6f856093e34d433d4a12e60a148fd"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:02198e09610904fcd688e360c9a154f59f0327f1adad520d2e73052c550da358"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:134e8cacc29d59d736d4ab9171bc77faea4b5a959df053db033e974967584bc6"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:d03204d45ac0225a5b0266c185c6329ad6d93f651ec4f94a49260b8968cb4b4e"},
- {file = "cmeel_console_bridge-1.0.2.1-2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6963e8f951bb3674c2ceeccd3471f6382ae6461a39c44df8200a6a00a24d779b"},
- {file = "cmeel_console_bridge-1.0.2.1-2-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:feeeb41121eb0c1664232a0105f7ed72bcb9b24a0125108dde8cffe2a63bd272"},
- {file = "cmeel_console_bridge-1.0.2.1-2-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:1687b9a7c86c2382b463815f7a65d9495dec3c32c0a72fd29ad68fcd4012b996"},
- {file = "cmeel_console_bridge-1.0.2.1-2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bb76f595c520266308ae9778e7e43a3605f3941672cfe9f0e7bf77f44451b3b"},
- {file = "cmeel_console_bridge-1.0.2.1-2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:c858934e491416fa667180276e44a3c99144d6988f9b9467f37ff6bb3375eed5"},
- {file = "cmeel_console_bridge-1.0.2.1-2-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:181cb0f98ebe5a09073af75d9dabf462007d960653f7286fd5be304d9de9aead"},
- {file = "cmeel_console_bridge-1.0.2.1-2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:692cf50358aebde1ad0b04f8428f326b4bc72172b75879b2ba14436fdcb9fd78"},
- {file = "cmeel_console_bridge-1.0.2.1-2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:90387c9ede94bf95c2e4ac09a6dde8883ee92792837f0a7caed8bc331ce75c7a"},
- {file = "cmeel_console_bridge-1.0.2.1-2-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:352625d292f7117656ef913a4ce1208a913ec2a5b534df77e52b43baed13ea36"},
- {file = "cmeel_console_bridge-1.0.2.1-3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:a13d4147be189af4bb938b2300e73312153a25eb131881dde2fde73089135671"},
- {file = "cmeel_console_bridge-1.0.2.1-3-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:9338daf7d6ae49cc2efb56431d22fb25f8fc287f5c26f0702514e0439c11d2ed"},
- {file = "cmeel_console_bridge-1.0.2.1-3-py3-none-manylinux_2_35_x86_64.whl", hash = "sha256:72204c3e29aa6404bdb2fe6c91f25a7d5d3d63eceefdd220993ef38a093382bc"},
- {file = "cmeel_console_bridge-1.0.2.1-5-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:2fed7b6efa8c1d9e584dcbbfaea1f23b4d25d90faba79e92c90db29e1558ffc0"},
- {file = "cmeel_console_bridge-1.0.2.1-5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:05060f1b61adbf694cee78e22c64775df793bc740bea1e3d2c78eef60f9dfe19"},
- {file = "cmeel_console_bridge-1.0.2.1-5-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:b74c4ffeaf034ba1a978abc14ecb90930f4b04f64948b3f388aa8dfe911c3e8b"},
- {file = "cmeel_console_bridge-1.0.2.1-5-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:f583fae8264940859cbeed77eb3a15a896986e43c7dcdf44a653731847f556fb"},
- {file = "cmeel_console_bridge-1.0.2.1-6-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:775eb19da55da563a0f7d6b7bc8123423c768e229ef195aeadb25ca38af18922"},
- {file = "cmeel_console_bridge-1.0.2.1-6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cc70385391785a0ccfa23b1b5334920380ff8a1f02e1455379134decb3b288ae"},
- {file = "cmeel_console_bridge-1.0.2.1-6-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:074bd0096ca5880490fe7578c06d4048350a2f4ba9cdc7960508a7a99f127e74"},
- {file = "cmeel_console_bridge-1.0.2.1-6-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:054a1862c2d5cc99572cbf62a877c746c2add994c24b966172543ddb696917dc"},
- {file = "cmeel_console_bridge-1.0.2.1-6-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:8dc11762eeb14cd990ed289e764b6ae2de4312565c69dde2a8b495c8a84323ff"},
+ {file = "cmeel_console_bridge-1.0.2.2-0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:16d97047ae4d732db0af48993b145bf11e6ef4d1579b29e3ed2f65ad4349d0f1"},
+ {file = "cmeel_console_bridge-1.0.2.2-1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:55b10444c7f09525e93f9db406020db56d08ed023a2eb27010077db476057d6b"},
+ {file = "cmeel_console_bridge-1.0.2.2-1-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:9326a6cf1a2eda3447ddfecae92769a15df0327803c34e148090d54c95d79871"},
+ {file = "cmeel_console_bridge-1.0.2.2-1-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:ff004576e9877c4a7555aa1275e5c2cefef0a68f69f65669ff4da842bb2621bb"},
+ {file = "cmeel_console_bridge-1.0.2.2-1-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:a2d992ee22b4671d9a695b1ac2aa738a862262f78fa393c38cbaad06d8205b89"},
+ {file = "cmeel_console_bridge-1.0.2.2-1-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:d6dcc5f8eadf18cd3e3d6155ba4aadd9b45e791912659bc14e5efdda7b5c2af2"},
+ {file = "cmeel_console_bridge-1.0.2.2-2-py3-none-macosx_12_0_arm64.whl", hash = "sha256:d5ccf73b733e480bf71d0430da9c569101f5019bb97abeb397d41515085b344c"},
+ {file = "cmeel_console_bridge-1.0.2.2-2-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:e2b4b33f6c81bb935315cd8b243dcad3b151bd6da47c6dc5f46d1facfaae4d7d"},
+ {file = "cmeel_console_bridge-1.0.2.2-2-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:23cb0e37678a75778ba63444e598c41b4a694dab46037966357e81374bf4d182"},
+ {file = "cmeel_console_bridge-1.0.2.2-2-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:5933c56b6d5f4c1d1642c8088ad2c01d31bd6ec6fa8c0d2cd1683791e8242f9a"},
+ {file = "cmeel_console_bridge-1.0.2.2-2-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:375eb20ff3eb0bc7d7002e7a5b04cfdbb17ec9ed29b28e66bfe1644e9588a99c"},
]
[package.dependencies]
@@ -629,112 +527,25 @@ cmeel = "*"
[[package]]
name = "cmeel-octomap"
-version = "1.9.8.1"
+version = "1.9.8.2"
description = "cmeel distribution for OctoMap, An Efficient Probabilistic 3D Mapping Framework Based on Octrees"
optional = false
python-versions = ">= 3.7"
files = [
- {file = "cmeel_octomap-1.9.8.1-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e9721a9aa6eed5d3308d15f4c9f4e12fd01e7573e4e6088820ed0f58fe79fe6e"},
- {file = "cmeel_octomap-1.9.8.1-0-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:cea79126fd5d3f52c18590d56cfb6f8079baec606924de6173cd8a431b2dcd90"},
- {file = "cmeel_octomap-1.9.8.1-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:5e62385d62612350e86de1977371178e8abf1ab2e8b4bbee855ac131e58a36c3"},
- {file = "cmeel_octomap-1.9.8.1-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1923944e6d05e2df97b2abf7c8f03d5047bce798579f3813127064bde418c272"},
- {file = "cmeel_octomap-1.9.8.1-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:41df61cde5f0da8fdba1087c214066444c62b46b00a28b6effafbdf5ed7cbd71"},
- {file = "cmeel_octomap-1.9.8.1-0-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:31303a1ceab6278551eb6d46c0d6c3ff295010f61d0ff94002e29bae0b0bf87f"},
- {file = "cmeel_octomap-1.9.8.1-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:a1ef6eabb5a093fee5b58e480daf7c3c6ca5299c49e33d61c37f4b528b996219"},
- {file = "cmeel_octomap-1.9.8.1-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f8f09b0bb8e520a170a005475f5b86edcea7ef44332a42d1a608e7c1fb3f886a"},
- {file = "cmeel_octomap-1.9.8.1-0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:cda4a0ed1b5f5e63dfc7fcf52f139484db9cc390fa7e43a13782c0f05bdfded9"},
- {file = "cmeel_octomap-1.9.8.1-0-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:d2bf9b2fb2295ec754f7488c081f32fd054d6b2d4a1e7ea4db374c06bc84553d"},
- {file = "cmeel_octomap-1.9.8.1-0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:db4daac0790ddc83c60456d5d3c7bfff39fe33acc1f800c0550aa63d0d164d10"},
- {file = "cmeel_octomap-1.9.8.1-0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:f3465c754b05f1e206a8fdf4ba6b0cea57ec869f9ae49933e3513d4c20943bc8"},
- {file = "cmeel_octomap-1.9.8.1-0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:af12dc2e8f5fe5cd488de51a8c27782e4c77e6a1933a39c53ea05c86334a3378"},
- {file = "cmeel_octomap-1.9.8.1-0-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:c3aba231d31c826dc93bd0aeafb0890d6e1fdc66fe3b1573b183460f9b866c55"},
- {file = "cmeel_octomap-1.9.8.1-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:98467e3442d3272e326a0ce94e886fda06a1fa2fde7247d3ecd50b00bad55ec1"},
- {file = "cmeel_octomap-1.9.8.1-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07373bf5a1cfecc37b6f366b9c2bc0c76d12714436185305ff02ba6f3948f158"},
- {file = "cmeel_octomap-1.9.8.1-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:52a6c75d423b791fb6c52639757a85e64f9973c681189c74941760778150b461"},
- {file = "cmeel_octomap-1.9.8.1-0-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:b493f546fe850e4ae17d803082663b1af59b7382c3d88374b097d40d62e31ce8"},
- {file = "cmeel_octomap-1.9.8.1-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:485aa05305751628d2e59bb40c6aef4899e29fbd8125720d3dfd25122b519b1b"},
- {file = "cmeel_octomap-1.9.8.1-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1a1e5003359784bc8d134cc7eb8d610d4d418986a2a72884216c82119473d011"},
- {file = "cmeel_octomap-1.9.8.1-0-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:875aeeedb02d091ad82c78e2af252fe6631e8e30402ff2c9c5783c7b3446d7a1"},
- {file = "cmeel_octomap-1.9.8.1-0-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:7877c68e483d494978efea4d00a94cb3a84d81585a80ab52a107f1b1aa2db6cd"},
- {file = "cmeel_octomap-1.9.8.1-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ad081c8c150d4821914e44501b4098a528a27dbaec60e70e873673e48668ff29"},
- {file = "cmeel_octomap-1.9.8.1-0-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:a0a0d61fb7655d67949e282144b0ff50ba12b1eeb37ef2b0bb605c5820dc5f63"},
- {file = "cmeel_octomap-1.9.8.1-0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a445f960a2837b6c4feb349079219882dfbd1ffb13b02d2109e179282c8a3ef4"},
- {file = "cmeel_octomap-1.9.8.1-0-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:da6411dc52ebff09f3584d2350e998e64b759653fb2ec144b8a451b1d3b9f74c"},
- {file = "cmeel_octomap-1.9.8.1-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:97c29a085907463c06bcf19b32d2e1c25f07ec1e41f5085b14f3c4c9f9b2e7fc"},
- {file = "cmeel_octomap-1.9.8.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f1c8736ad40240c6d6fd424383bf11c7330ce7ec015933cdf726cbca36213f45"},
- {file = "cmeel_octomap-1.9.8.1-1-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:c86b70fb5894ea6d8c72905018329e417ce72717021219d927c327d9f20809b6"},
- {file = "cmeel_octomap-1.9.8.1-1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:1740f6f101f40365deb0736d13f9d8758e27cc2986b86af1071876bbe4555f7a"},
- {file = "cmeel_octomap-1.9.8.1-1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd6a713434a43cd9bc1da333c595a9314148cfac64868927ce884392e7e4fc04"},
- {file = "cmeel_octomap-1.9.8.1-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f6125a9d96f7f59d047c78a978ca4e12904f75302b9e1248bcf52411af35e53f"},
- {file = "cmeel_octomap-1.9.8.1-1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3998ab546b01fc511665466d01a9cf90e65e57f1a57a733149e12f9922d0e148"},
- {file = "cmeel_octomap-1.9.8.1-1-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:9ff9fdb3f5faec0fc08c56498e2ef8640ce959420128ec3e8e7efe2af18a864c"},
- {file = "cmeel_octomap-1.9.8.1-1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:4f1b6789c7a41a218de44da4b428f4a604ce3eece54866fd76866cd2a0a10a6a"},
- {file = "cmeel_octomap-1.9.8.1-1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8674bbb3f987459eeb218abfa4dd208c69ae6ef3e1a77bcc05d4cd19d5f3d68"},
- {file = "cmeel_octomap-1.9.8.1-1-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1541afc9516c9aa4504038759fab479a08535492957715abf49e85d7f94f7b44"},
- {file = "cmeel_octomap-1.9.8.1-1-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:d14798e8747b67cb3076604debedf2fa4e6f227cc4546e6bd815cd2ee2fe1ab2"},
- {file = "cmeel_octomap-1.9.8.1-1-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:ceb0c0db451a1e0028e4b04c2a0787e79dc859264ef0d80b3ec6de9c971fe230"},
- {file = "cmeel_octomap-1.9.8.1-1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9523da65a3547dfc59114ef95ae9936fc86305c6e46e77c512eeae19f7fd88d3"},
- {file = "cmeel_octomap-1.9.8.1-1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:944e06616809534fbef2d4a341d339be4a3c54f48c3edb989492640390a673b0"},
- {file = "cmeel_octomap-1.9.8.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d05b93c2a2562ba193933584d563f9bc741f55bb6ac5d7ee23826e07eca05da3"},
- {file = "cmeel_octomap-1.9.8.1-1-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:725aae1966588ada14d7c3e25969499c249314d2a5cac72ed8de63a244bb9712"},
- {file = "cmeel_octomap-1.9.8.1-1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:7f4c2e2f732387fdb145a0789a93de386217cd0b54d69a070ebe6abc4258c02a"},
- {file = "cmeel_octomap-1.9.8.1-1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f7e547e0e851fb1accfa1ce12b337286684d9a43da81ed2fb71e147e49e68d90"},
- {file = "cmeel_octomap-1.9.8.1-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bed8f851656329f9d223e0bf46955dd5eaf877fc638de007352fb2ca2c4a0226"},
- {file = "cmeel_octomap-1.9.8.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77044cf51e7f6b8df6f7fef32d4cf9887d6054ade17d3826e5070ba7aa60adbf"},
- {file = "cmeel_octomap-1.9.8.1-1-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:970afa38d52dbb445d567c1b296b2610dc0836ff2ac0364f0be78e6cfa5426ed"},
- {file = "cmeel_octomap-1.9.8.1-1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:75e67a3748a39fb036de3b7f439da1114d462aff137a0643d3cbef21722b8f0f"},
- {file = "cmeel_octomap-1.9.8.1-1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:90bef02bcace904d6e6e1c59259dca71f6ab4fe89100f96255f199f658ded505"},
- {file = "cmeel_octomap-1.9.8.1-1-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:95783fa9d0626cb9dec5befeb660ecd33828e9bad8c858e50ea928124705bf5e"},
- {file = "cmeel_octomap-1.9.8.1-1-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:df9caaa7c9d22cd12f098f40157491420467fc1081a8a4df3618d4a56ad4aee4"},
- {file = "cmeel_octomap-1.9.8.1-1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7745e7bbed13153f536ccfc6cf8f2c9af0726723242bbc2e9e5c31f6485befee"},
- {file = "cmeel_octomap-1.9.8.1-1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:0d29bf6a25ad42736b838e7f2dea17e62aea636868da3f5a4cfc28665d2e5236"},
- {file = "cmeel_octomap-1.9.8.1-1-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:1796c16e9772b685789922b8c6a0dd8a61818cdbfcb7ba26915e83c50cf7bda7"},
- {file = "cmeel_octomap-1.9.8.1-1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:070e9f286982eea0ffe5812a1799c615ca40f8fd4d344e3b4265b398b65f0e7c"},
- {file = "cmeel_octomap-1.9.8.1-1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:be98883ac8ca5cf55978961e8d37042e5c28bc69d16a10a37082043b148bd765"},
- {file = "cmeel_octomap-1.9.8.1-1-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:789b3de24375ff2d4d3cb2cd3655a3d7eedf1a52254218c511424649ff9d6447"},
- {file = "cmeel_octomap-1.9.8.1-2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4aadde97af959453a1cf30b10ab5a340db744fa8a2258ffc0c7f0f49c0cac2dc"},
- {file = "cmeel_octomap-1.9.8.1-2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a38f6d71b0e0984ae4823017a9d608d5eb2e0a227906a9f18f6e752d7abf520"},
- {file = "cmeel_octomap-1.9.8.1-2-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:209b5063780bf743b6b4af1c69fd4cc146cf763707132ce2aa0bbfe198aa39cb"},
- {file = "cmeel_octomap-1.9.8.1-2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:679e56ae9c7a83c091dc5d8e95e334e02dd47a8308d72db719ee0f4a85017dc8"},
- {file = "cmeel_octomap-1.9.8.1-2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:41065f8f0b423f8e50478c4770471449ce8b32e08dbbe73b5cd9f5b91363567a"},
- {file = "cmeel_octomap-1.9.8.1-2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0a97657eed0e53b603dc5281ae4ff80b4e2bdd407a575e336488b9dee46ce1fa"},
- {file = "cmeel_octomap-1.9.8.1-2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4250ef540db4a7585f3ccc0b7ad97417e5f10ff2447e1c2e9d4ef8d90861a572"},
- {file = "cmeel_octomap-1.9.8.1-2-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:93c794855d8b92488dfe438df1737ce3b83969a8548f9a9a5944a45a932f127b"},
- {file = "cmeel_octomap-1.9.8.1-2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5aecdca0a8e12a39e86c3b2225550a5acb01db021ea98b295ef6cf9b5a5d9cba"},
- {file = "cmeel_octomap-1.9.8.1-2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5401dde5e3385932588419100ca33602e73d688061ac6d4aa1a1028e5a3a925c"},
- {file = "cmeel_octomap-1.9.8.1-2-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:235950f396f6ac15b5ec0a4ec07af62863d6fa33bf2445415d5ccebb0f19802d"},
- {file = "cmeel_octomap-1.9.8.1-2-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:f0d2b465e5f70b02588b71c270e354ecaa415a9a7a7740084e4a96f3b95dca02"},
- {file = "cmeel_octomap-1.9.8.1-2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:f7b23f68936804cfc7f7176121cdf726cc305292ff3557ba9daca0c1e42eadfb"},
- {file = "cmeel_octomap-1.9.8.1-2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:30f4538b6c89a5ad30ed957361c511240e1e28f27688eeb64053067e5951a546"},
- {file = "cmeel_octomap-1.9.8.1-2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70039994c120f115374e1f33b01b24a9f24f88bbe7aacef36c34c5645f4676a9"},
- {file = "cmeel_octomap-1.9.8.1-2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:28f7b8c2bdb11cc14a2cdd19b1611cc9250c3543379eb96bf265eb486c3ec1f0"},
- {file = "cmeel_octomap-1.9.8.1-2-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:ab165b97640b91e1d77053bbad5ec676f572527b6a69d63c7714887607c6580e"},
- {file = "cmeel_octomap-1.9.8.1-2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:f54b5b1a7ba1ce271743436c985f43953ccf7c030ff660078f16178d0b588216"},
- {file = "cmeel_octomap-1.9.8.1-2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:af8811d51fa65b6c1717f4fd0086993c0570fd8bcf37e889c493c92211c32db3"},
- {file = "cmeel_octomap-1.9.8.1-2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4d56c908f60c475349cc042c9ff4277ff21a093abc3ec5b29d4fff808e866f49"},
- {file = "cmeel_octomap-1.9.8.1-2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c4047c77ac11c4989b47ccf78cf9dcc8f31e065189096d7092bd0b6d7e2a2a7a"},
- {file = "cmeel_octomap-1.9.8.1-2-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:cc0535d066a815bcddef881612d414a9ababb2ce73f2fcbd404a0e6ffefc4d7a"},
- {file = "cmeel_octomap-1.9.8.1-2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:3d717c34ad78985795f056cd45eca014d6a16ccdd613a50785d4ccffecee4d01"},
- {file = "cmeel_octomap-1.9.8.1-2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ac02c51561f5d81517576e0b70982dcadb239542e30a1d7a056842146512490"},
- {file = "cmeel_octomap-1.9.8.1-2-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:94a04247c7b2017c8e787c3738c16aac8477b8f34350f5a0d1fcd5ec998fe10a"},
- {file = "cmeel_octomap-1.9.8.1-2-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:c249d5d59f3b05df71393cd62cbdf2cea389087784ddd4ceec49a72ea046b74f"},
- {file = "cmeel_octomap-1.9.8.1-2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fc3886be3ff7bd4241585da97ed77a28d1896d97384966e957d432372f5fb54e"},
- {file = "cmeel_octomap-1.9.8.1-2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:ea6af32421b1fc099e611579e475c00e7e3d2a56a638d1ce95a994e30ca18dc9"},
- {file = "cmeel_octomap-1.9.8.1-2-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:f3704e8e01e805431a29dc43337bba36d60ee93b14dbb774b44b26ffbad45257"},
- {file = "cmeel_octomap-1.9.8.1-2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d48d7d9e19597a20e556cae7883e4ab150250cf602d2d4d325814ce2fa005ebd"},
- {file = "cmeel_octomap-1.9.8.1-2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:ced0cc54b65486ae3b132ac6a9f6da33f977bcaa22b62730d0aaf2c368dfb458"},
- {file = "cmeel_octomap-1.9.8.1-2-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:c8a0f13c2d3f6468c1a0ba81f8dbbdf143563364ce2e2d0dee0c7a143c34d60a"},
- {file = "cmeel_octomap-1.9.8.1-3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:21f2331e0be5e19fe37fcbadb771a554514467f79558c5148c3edc98b206e6a0"},
- {file = "cmeel_octomap-1.9.8.1-4-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:63c41d8cad55f6d24e35f6bd2e53a75be8cea59054eebc0b2745284cb68d7ecc"},
- {file = "cmeel_octomap-1.9.8.1-4-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:db52c489c3fdb8d59ca5c488cae68cf9067e216f030395210fb326b104b033d6"},
- {file = "cmeel_octomap-1.9.8.1-4-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:9162064b8bfeef12b0767dcd8e5db2c3f8110824321b9805bf4284328613be15"},
- {file = "cmeel_octomap-1.9.8.1-5-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f45f999c273eb5b8cadc44f7f49756636f7d5c38d49b2c5fd2c84685202283de"},
- {file = "cmeel_octomap-1.9.8.1-6-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:3de3f84ba96d8f977764e750af7f32ac6f21bcbd40cec14b1a085fcc226b6ff9"},
- {file = "cmeel_octomap-1.9.8.1-6-py3-none-macosx_11_0_arm64.whl", hash = "sha256:3765155b0b089855be2bd95b5957b4ee184a0551ea0fe0b98c1727b454393969"},
- {file = "cmeel_octomap-1.9.8.1-6-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:7a96f1daacfa685de6eb46bf7472c0cbc54645fedbeee17510993f1ef3d9492d"},
- {file = "cmeel_octomap-1.9.8.1-6-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:2030dfb5ad98baebd48c8a5c8a85e3ff39351a8dbbae294c0e4082352523d6b3"},
- {file = "cmeel_octomap-1.9.8.1-6-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:e2e5afd0c802cd7f062abf1b0228a2ebe531ffe57b8bbd2c41663fe8c5bd3949"},
- {file = "cmeel_octomap-1.9.8.1-7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:7e427412be51a55831ed66f4f7e99f8d78949180402890d7911066dac156b20f"},
+ {file = "cmeel_octomap-1.9.8.2-0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:4d510a9d5766ef15af40e4e3943a5ee589ba0357cfe10b5a710e2f04b0b19f57"},
+ {file = "cmeel_octomap-1.9.8.2-0-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:21e4779766c9fa76e2cafa8c7f2a60712b0010523bfc8d51d7c8f4e2aff95267"},
+ {file = "cmeel_octomap-1.9.8.2-0-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:0003eae2cd8ce1dee194a21350a9f6e9e0b3dec89a8a6f80ab4c6ff58986276d"},
+ {file = "cmeel_octomap-1.9.8.2-0-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:f07ab209481ff5a9ad44d9d649bf3b04119e2947f9d52b16cf3e4408b7653e88"},
+ {file = "cmeel_octomap-1.9.8.2-0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:14adc8913cf44a5b570588926d84fff440733ad0d4b8ec781269137e277cfe18"},
+ {file = "cmeel_octomap-1.9.8.2-1-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:fce38aa60c22b77396678e8f9406b98475312851a96ebf6cd654d2d60a3aa73f"},
+ {file = "cmeel_octomap-1.9.8.2-1-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:5c4671f89f1b1dd4a0e868d464ae218ad8c5fe8a120fd6b4bcfca64b4bcfc45d"},
+ {file = "cmeel_octomap-1.9.8.2-1-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:8af40226c6c82d86630f8d3093d9b2405d0a4eed1adbd79fee4cc18e19d27f96"},
+ {file = "cmeel_octomap-1.9.8.2-1-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:1477026e475da08e6a596ff640f787b445f6d61f27a2551d1a33ede1a52960e3"},
+ {file = "cmeel_octomap-1.9.8.2-2-py3-none-macosx_12_0_arm64.whl", hash = "sha256:f053dd94c1c90b70cab3eb54906893222167272ba3d5f7f24a1713a9e014beca"},
+ {file = "cmeel_octomap-1.9.8.2-2-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:ca93c09b0fcd3a5830b1238235599adad22cce8630771b33bf318507ab564efc"},
+ {file = "cmeel_octomap-1.9.8.2-2-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:175aa7e03c0d0e7beaa4fe4f7ea7c89e55165ddf7668c470687f369889c7481c"},
+ {file = "cmeel_octomap-1.9.8.2-2-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:18886ebeb30e3355efef4321bee31f8eab93dfb4745aaf2ccd0e7effeab117cb"},
+ {file = "cmeel_octomap-1.9.8.2-2-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:b5f986f3c6b18306769f441d7f1667023ac847bd79f90cab549596313b2bf780"},
]
[package.dependencies]
@@ -742,109 +553,26 @@ cmeel = "*"
[[package]]
name = "cmeel-tinyxml"
-version = "2.6.2.1"
+version = "2.6.2.2"
description = "cmeel distribution for TinyXML, an obsolete thing."
optional = false
python-versions = ">= 3.7"
files = [
- {file = "cmeel_tinyxml-2.6.2.1-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:04d613a962a813bce325f669edcfe38f5e48b56e085ffd554091a5dea9708bb5"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:b9351051d692e5c732b8cbf7e099ba55f955b2a08c20bb5ca7029e4b49e94375"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:d2980144acdabe352f6d0ec94655389ca70e46cd373d68c8d762c9d1224f154e"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b23696f01b69ab3c5182b280f548081d1c2ee5a14e47792a34931502cde09ad4"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b9d83e92a23bdee4b0fb9dc10aa505a1837e1f53af63e0ec737e36ad84c5a642"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:728eb2ea14eff37313279707ddb407e0c1a74d39b91bf038484b22610b0d78e2"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:1b38755e1fcffa6feea52b8b46ac36a7ae29301d44086e9430a28f7d9bfd2ebf"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4280aeeaeb325b39b57940f3a9c3c307faaa87c8b610f16368e3ec5c3b785588"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:e332cb217849d69e684d063953f7c3e889efad0549b56ab50f8fd4dd2ad20a4d"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:6279ef0e5aff5ed4558ab0ba8c86a1f2d37e5766da28d536f794c4f3f8ebe49c"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:11eaf4004037c0cc9c52838cb2bc33b2fe842ef7d46a4d89d2d966b86e457319"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:66ba683e8bc7230739f09b6c3e892fb178b3a3d994ebce086e1ef90cb79f2554"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9cfb5a9180f7fe3c5fba31683be7e2b8aa77e740dfa2f5a94bee82d7dd098c3"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:4b198a9b0eaf47959df32b9a5e087c05d30c59ffca8b093f06ff27dd665566a5"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:4d6f8b9ebc4b24adb9f1c9d1c7b13e0468d3c73e312feec25cc5dc7641f1bdfb"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:54eedeb4f3a8e49a66d121cab75d3f9a2bfa763adcc33c12d87c469e7b368cca"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bd94054bbbf1bca9aca78bb03a6519c094bab03179ff967f13213d296a5b309a"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:2bd4bc5762451738bae0b44b5729422349b4503e1d89ed8c96ed3467f05aaf02"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:a205472673b970c7a80fafe05cc051dc7e0983dcef4b80280edfae8cfa37cf0f"},
- {file = "cmeel_tinyxml-2.6.2.1-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:88331fe5a64bc055a1dc75ee041a7f31714aefd82eb6170f893b96fd066e895a"},
- {file = "cmeel_tinyxml-2.6.2.1-0-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:d3bf672742744d6a70911643643ef2983c96423506621ca9562ddb787287b875"},
- {file = "cmeel_tinyxml-2.6.2.1-0-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:9d04dd421f648fa54671bc841b169704f1b21634735159250a65e82ad424109d"},
- {file = "cmeel_tinyxml-2.6.2.1-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8de8dd47599e76664db0fb16b340c66d93e3551c0301a56ad0aa45d27b53177a"},
- {file = "cmeel_tinyxml-2.6.2.1-0-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:f728e4c375e272d4d4acaeaa3e8fe2bdcb17bb0e2fa34ce7a9f4aaa3a32ec449"},
- {file = "cmeel_tinyxml-2.6.2.1-0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6b88f0164ca37c583ab1c39fc90f6ddd19bfd6d0980f16ce0e1165110c30b387"},
- {file = "cmeel_tinyxml-2.6.2.1-0-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:e156810c8177df4860b733e0c6ef74c8d22aacd1b92c35fb4a93f33256e3ad1a"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4201c047656eebfecc4cc00c059a8f09f3615773ae32c1e498755a0a1a2b4503"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ea500911b1ddb91bd9087059dc53c4ac69a4dc98357d184daf3ea81b3bb24e1c"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:60a5e5ff5907be1a7686142b687b364ebd6e42f190d3c7131c8b842d4a458ec9"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:57249452e502c289b20b10bb0f46be149ea7c267b5c691524b594f941cd539d3"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:99e9afd2d2b69562facb42ada10c98b58fe4aa8719e36df7933ee46bd932ca77"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f53a7deb456b164c977b5e88053cc95baaf7b447dea0aaac267afab9a5d9ce28"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2921973114369c0b7520f5d41895e069e2ef9ae5f35f3fe6a4d24aa240db1bfc"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:950697acc9e50c0bbcc1dc9d826f487b3f192b8e6cdc1453c4426c0b54d3567f"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:89db7a3dedc98be5e8fa019d243bb5c73c032d0377bb77721d21ad6a40530239"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c42188eccbdb904954eedc345e4f99ab6b3e000d5955070cd3a1ca1626d57c93"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:db337250f51910308d3513c335d57d8343f41c9bffd0e3c1ed9a624526feff1e"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:57f7824698e08dbe6e61a0231c2df0ccaedb98e0cbc52dd0b15f6b990de0835e"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:91693ded7a4d8e944635b0e361ac0c9a9ef3686438cb20f4446db4deb69dbe65"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:abcd362e2531b049c9f0546e5d3d61321e2b4a4b53c7ef99018ef023c3a9ca3c"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9d89a4b1047a37f5c5c1574a5eca1849a7768febde6079a3a6b874e482dd2e9c"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1fccd05095535d07bb8c62797183699a61f3e92ad6e448074b1341fd903c90f0"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:ca848401c5124a267e936754165fff991bde9a43eca5409e19dea45896821b29"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:5c34e754f5440b42db77bc5168799a4bce30505d9c36665e66504771d4a057a0"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cf7894b0da98b0bf56368fe6429f3733e4fe788f9db19d788e809a600e6038e9"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1d43b231fe6014514841e12f68729ba154243b868e729dd1a2605aebd33e4401"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:171a3250a6f93e5d6a057660f37021fa66f79f9fbc31557d0d6011ab76c279b2"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:583e7d8e13f42fcaf9886805e5bfe53a73ba82e5c6fb4adafeaf17c462c7b692"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:4f763f7885b268e1631343c1a3ef8e3574f78a7e28f07fd3616871ee7da5a1ff"},
- {file = "cmeel_tinyxml-2.6.2.1-1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e884452d411a8d6c679f659361373c324f69e736c4b64d2acceb08c4b93448cb"},
- {file = "cmeel_tinyxml-2.6.2.1-1-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:15b4ce5e1edf3ae551a08833b7f953c49167673f0daae07bb419d8c59e9bbbae"},
- {file = "cmeel_tinyxml-2.6.2.1-1-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:3c3d031ca505d63604ec782377745f599a8df08e6e2f94f58b0b1cb84b07120b"},
- {file = "cmeel_tinyxml-2.6.2.1-1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:05d623f390219b4b847f776fd0d111bd65f5928fa50d70696bb7a755e506640c"},
- {file = "cmeel_tinyxml-2.6.2.1-1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:db0d750b4d2ca3eed83ee6802c86abd10836f6a4f2b32621709b9f6d06b09948"},
- {file = "cmeel_tinyxml-2.6.2.1-1-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:fb005d0b4e1ae01a6d7509bab24b68c4b1c7f2fc2ad9c271c1755cf98000820b"},
- {file = "cmeel_tinyxml-2.6.2.1-1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:942d010eda71e1cf29777bd18583f8efe649e3a6b2914858a9504bb96c5cc5c3"},
- {file = "cmeel_tinyxml-2.6.2.1-1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:127fd8061e07e2e75031887e4d5455a4d2a914b2608f7e23e69d5c1b79f60796"},
- {file = "cmeel_tinyxml-2.6.2.1-1-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:a24a4ccf961e2d77ac82d93ee31f72a5d794088e18ec93b096de1c7212a45d6d"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7f998a3639a525625703cd165df70ac25081c258033f38ef96a7e12fd00e52f"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:426ce6e2cf8c6cc0f4a150a50fcbdf38ef288333dcdf3f9c6eb2a35de7b7724b"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:6fae6975638a06be9c702f2669902edb11ba0d8bf707382bf8a696f622135778"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:7faf5297abdc678fdb91c9bce8cdf56c95aa7aee39d8455e7c96dcfbf586bdec"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b3477557418711abb6a3dede833938db7e1eba1edb83d483f355e5db3ee19f"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b1423e1eb0261d9639ebcdfb43631b90c2418f992f4963ab5775fa62a4464e9e"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:679ae94ddf67e2738709b49d975cd7559723d0add4dc8fd0f12ecd4c9667eab5"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:256c780eee3f16bc66f7b0fc931c5a4a4eb4f099785f80d5b9ca15b58f74749d"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:46e19fbbe7c3f0b7967f5db05daf21221cf2722fd012b9eeaccdc5dbbcf3d764"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:50525b4acc64376b0b4b9b574d8f7a1e6b3546be3c2e6ee4ea55fd58d88f2a45"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a018e0000d7d6f60d569391a917ee69496163960f4fe2b761c4692446e973b51"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:7392849ef912667100d205e19f75831175918c1aa85ead1deecfb2a854c8efb2"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:eaa7f14efb9ccd829a459157fa45ac83cb3e0f2369cc1d8e5a8e96b9931b9087"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fd8499709c3741d31f77af104ea97598f7f12aae1997a0f7d1a2baded192e222"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0301b1a2ec07fa0f20ad1cd7b0233f55822aec17cf39533b2d4e269c6f0fe7bf"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:929e624c859b4668ffa67c5c6322472b19257661d74078236973a259e828a15f"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:454e9e05c90bd4a401d1db0fe0eb3e4eebd7ceff8ce6296c9f0203c34fa3c1c9"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e27d90a6ecc7978a85ca5b70960b551ba20227d57f6de10f4179febbc2152955"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e6242edf0fe2b5cb0206d08cc7e988ee39da5217b9c6edf99108b3db687253fd"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1750e80ef63084344075f6374c48847e474e7fad08b7aa80dad94399f577b615"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:909d29e56d2d545b2a4b2b06203b9b1f2cd73ae3b70c06e2026ac1a99cbff4cb"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:d57345df02a76e975acdd403d86d449c0a71b2b7b5306aa7bc8826268724de93"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:4fe32f47dc652d91d7984eb47c06ff0e50b4de8fbcfa55924690458be8769498"},
- {file = "cmeel_tinyxml-2.6.2.1-2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d537bc1d785cd81465b3e8bf0f1efa666e93fb2974312ef9db2fce38251ce10e"},
- {file = "cmeel_tinyxml-2.6.2.1-2-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:a5fe96375580cab7d92c9e02ac1703a037468dff145b87b54dd4f61b09f4372c"},
- {file = "cmeel_tinyxml-2.6.2.1-2-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:ba73d692b4ae4d1a33e1f803a627a962d23db146ad4dd277451da12f7f357404"},
- {file = "cmeel_tinyxml-2.6.2.1-2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1eea31fbe324a0e7f5246db613c66385cf4a1dbc7ca7518bb0866320be74e57a"},
- {file = "cmeel_tinyxml-2.6.2.1-2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3927c1c73c085d12643d63686bddd6313b85262c3fa497612d6df683b432c898"},
- {file = "cmeel_tinyxml-2.6.2.1-2-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:61b519b3488d07a70e617fdf06a3baf3c1d7a7ba637953b8d5e7f1572e0f8121"},
- {file = "cmeel_tinyxml-2.6.2.1-2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1b75fbee702d9147992e9c0c98d8524c37c4ec5dee4ca048f4ec013d91fad64a"},
- {file = "cmeel_tinyxml-2.6.2.1-2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:d1768d1b81179cafc584b133767bcfd559dc7eb35f4052158e203c97c04c36b3"},
- {file = "cmeel_tinyxml-2.6.2.1-2-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:80863946696ce7f5f6a820dbbfe43230f2b490605ea447e248c1f942a30fb867"},
- {file = "cmeel_tinyxml-2.6.2.1-3-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:9182555a0962b46ff22e4de104893dbe4e68ecf81d85c0748d763a44b6283469"},
- {file = "cmeel_tinyxml-2.6.2.1-3-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:dda3ee26f956e12201b1e51d8705349f2df0ab9b58727046f46efea7f8237ecc"},
- {file = "cmeel_tinyxml-2.6.2.1-3-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:3dc770b6a710ae49ef37226de52c2423ef3dcabd040d2fe09c01480df2d844c3"},
- {file = "cmeel_tinyxml-2.6.2.1-4-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:07fcce875c2850a74a76d54093ef6a04eddfc788fb796fd78d7d4bf762c39722"},
- {file = "cmeel_tinyxml-2.6.2.1-4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5cd4c29d1f80fb0dcd2c5c0ac245548e1596c727a60f12c2ff1909cbca99d8ec"},
- {file = "cmeel_tinyxml-2.6.2.1-4-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:1955d420370b2d9086fb6932a1f576cc8c5352f062d41ce9f29e1440ee498c40"},
- {file = "cmeel_tinyxml-2.6.2.1-4-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:ff0174a436bfd05805d9971a431820966bf95b611ec64fcaa6da8fb223ff4e86"},
- {file = "cmeel_tinyxml-2.6.2.1-4-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:7a7912726afd0755199e470cb32c258b9fd4901670f6bb5938a6366a9a7d5c21"},
+ {file = "cmeel_tinyxml-2.6.2.2-0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:819e113a9ddb74c5d4df2ef3a8c4ca0ccc6d7ddf8a51a078575a4a2626280b82"},
+ {file = "cmeel_tinyxml-2.6.2.2-1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:43768e9f8cf92b337555c6615b95a9ffd4d3c1fb4a571dc3de2eaf4ed805721f"},
+ {file = "cmeel_tinyxml-2.6.2.2-1-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:ab68c61eb5e20a5dc671f370eb5066cbd868da23c6fe7754d79c01097611225e"},
+ {file = "cmeel_tinyxml-2.6.2.2-1-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:def3a40eaf1aa9e16f641a85e18baa1440820fbec5bb144533c1cb087dec8598"},
+ {file = "cmeel_tinyxml-2.6.2.2-1-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:ca09d64773ea7579a0537e996ea533c5b85aa6141a716fb8d43c96d8fe814279"},
+ {file = "cmeel_tinyxml-2.6.2.2-1-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:ea75d7d1ffa73f216e55c4ef82e0ddfbfa2584f74deb70139e34ca66e0e9754f"},
+ {file = "cmeel_tinyxml-2.6.2.2-2-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:c387a84b6971c8d08307da19767d79c759e3b5bc6931a09440df7047c6c431c5"},
+ {file = "cmeel_tinyxml-2.6.2.2-2-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:6c071be06b2da07b78ded40380ab91efb927b359a91ef983b2f49a3654c06746"},
+ {file = "cmeel_tinyxml-2.6.2.2-2-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:d1cfc138f74b92404a905e36b514b6cf7420bc08bd59c296adad9d078ce048a8"},
+ {file = "cmeel_tinyxml-2.6.2.2-2-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:65bccfc14744ee4f051eb0de8ef40b5ef26bbf37403f4060d0dc7d2d85d20799"},
+ {file = "cmeel_tinyxml-2.6.2.2-3-py3-none-macosx_12_0_arm64.whl", hash = "sha256:136966c8c0c8a712c915cc1d1e60a781a0284204632ff73b63930c2b6c3e3e0b"},
+ {file = "cmeel_tinyxml-2.6.2.2-3-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:fe6489a2c4f4b7f2b3355584bae5fa88ae2c78ada402ba6fe5c13b69c1ad83e8"},
+ {file = "cmeel_tinyxml-2.6.2.2-3-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:7d784705b8145d86bbcfb70cbf328dda4159fd9e2a3ecd92af8ac5bd0e778623"},
+ {file = "cmeel_tinyxml-2.6.2.2-3-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:57d4c654372c45d5f66067d150c56fb1ee63c8712b496c7d5b89d911d45e642c"},
+ {file = "cmeel_tinyxml-2.6.2.2-3-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:510b41c988920d55db187e5a906bced62a88ff623c502636622059876b974d38"},
]
[package.dependencies]
@@ -852,106 +580,28 @@ cmeel = "*"
[[package]]
name = "cmeel-urdfdom"
-version = "3.1.0.1"
+version = "3.1.0.3"
description = "cmeel distribution for urdfdom, URDF parser"
optional = false
python-versions = ">= 3.7"
files = [
- {file = "cmeel_urdfdom-3.1.0.1-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9316c72486f6ae37ef6b7281462f1997c4b96a60c41f6a94bff7f7bd6091ac39"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:9e974fda18fce18c63897be74216475d5105a49ea0ff73e42414b424229bcda9"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c32a5e666e2183f49a06d8eaac08d450ccc01da5e3531580b9219d1b3b2b1eba"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e2d38798b1bb436f0f26fb77e9fb07339b50be2c75fa54ff2b9aafb04c630c9e"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5f4312829a33c9e40abd8dbe2affea6cea283d0261158d348131443b15d07f25"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:c0a1cea8f7590d3f48aabfcf4aeb1f22d91af695db5b2dfa95010bc3db9da269"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:575dee08aab9349640b14ed918a9c5704ed769e328f62c85d292a5283bc61565"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:94175092c3a014e19b3691c1d98547a88dbc3ec94318b52645f6964a18155b8f"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:e94bef77f79917aec6a6f293adbe737d99fb63146906e04e6a2dbb4ddce54844"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:ab0ffd0dc279c3a1017e38e4fc7ec7ef79b3e7318685aa887b3923b67440cdec"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:d71a432944bf2218c2a74ce8fd08c1147ca069ea746c496159efdd988c43c700"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d565e662d8b2564be853e356795d2fdb5ff319656a889e8392e66d0ed62d98a8"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d42162f9a04aa4ad47449bce6e3d21afbc045dd7e79289f0952ca0fd183d25e4"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:b69a815720c6a4ceebef317fbcac93a3321f29504a4ed9ea7297516155cca9e1"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:eaa673302a65b325b351a4f3df05787c7e2e99c3f0f10c80f43527b5594ea718"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f94e9ab745eaabf8190ac3c7899c40a4f2b92da9049b5627fc004c540cdc77fd"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:826c0b60892bf5b29f9526449d56202957a10fffd3b7c0e045c53f1156d13428"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:059eedefcf88f6f67bd1eaf86e451c234cf4674b9ada2edda687dbf0921b6617"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ff41e40826e1731658ab4ed8ee63639e2743db0160fecd3eb0f7cf774a1dfa34"},
- {file = "cmeel_urdfdom-3.1.0.1-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8472a5f8a235dc69e1ff18c4642c4e9f47273d10374f2a5a204b2b3639db0f22"},
- {file = "cmeel_urdfdom-3.1.0.1-0-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:7d3a6f50eba6fa84c3e96db719d65fef150951b90354233a5d8e8ccaad4d5e44"},
- {file = "cmeel_urdfdom-3.1.0.1-0-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:c62ed757109f55679676d45bf576cb281cd9898a8873028af69b2a3b85c0c02f"},
- {file = "cmeel_urdfdom-3.1.0.1-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ff7fccd93aed10d494bfeda31e09a3d5630f4c7dff356978955a67d422788713"},
- {file = "cmeel_urdfdom-3.1.0.1-0-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:43ff9612bb1b269b9d44ac28b3880d3af59d71a46a5c186339ee78093bb151ba"},
- {file = "cmeel_urdfdom-3.1.0.1-0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f76f17710c39f6e6fed7fc4abb2cb4f3a0a7c82785e2808e07dd1e952a79ee02"},
- {file = "cmeel_urdfdom-3.1.0.1-0-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:aa356e2d96979db0978fd0fb58bd8d8d5fd9286f87adfc3e8b5ccdb580d7777f"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c6c148fdfec94e4527ab48f5d2f8a45dad26457faa73afb28e472392e6e25414"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d28895446826b9967b2385f2e5086539764d2ede5eedf73a069adafa1d9c778"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:7ca063de5f47bab780be2bffcf35aad3c20863f6c91a33b69a3bcc4a19016088"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:8f6826c83476109f22efc22fd5112214ccbc0dc38d2f563e086276f8c05b61b4"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ead6415b7f9be3c450c29ce4068d7a268f09a2d39f3d6b0062052da0e30793b8"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3658676e3fe9eaa63aa413a8cdfdaa435d52b5627e41202991974847509fc87c"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d1abfac051a831d3e336ab503de369dd08b858a3e31627bed07aeae7d4ca655f"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:26ac294a6d76fec9469738e8282ba1916b5c77eac92d00dfc1e16811399f0341"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:565c88f2a5da9644ee60d3f90a97d86a658a91b4d916860429dcbb29fd5a8eb6"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f2d8a9e25fd1e92bde657606a89745405fe8a6dfb4c058e0047ddeaaca0b6e8f"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:0e966f96cc56092738c5913d842022bbcbb9109b91d9e3977803e2e510542211"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:a378a03f5aeb919266d5e7417e2a282ed6d232f73fbd427ea16d247570d66db3"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:7b0fd98f8e674c15a2631e0f5a09cfd1b0bbbf23ddb4d69084a2f6e5a46b55d5"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d9112a6040996ae1416d37eb146f1476c51fd5eda2eb16c7771e36ddc9c7a22c"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a121598e3ecac00443ca42f33fc0e5c7e3d6622830269d1e2fb972a57e6b53d2"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7c66fb83efa8987bb32233b956c3bae8ac7b5842d8c522ade58cd9bea0bc34b9"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:3f3de62cdddd6e7e3dc78ee16801a1a4aea8d1fe1870da51b53c09d6f092a48b"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:87a64f973c3b5177beba2a1df07f0877c963efb807465d3310d321e7bfd9936d"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7a2e8b20199e96b345f9fc5097ed6afbcd5fc76cd396e0b0de0760214ef057b"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:aea06dc0fc5412faea32c55b5feb80f18c0d087ab4a7eb7eca5c193b4d30076f"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1d4d566ff2bad6819bba2d69b53ddb0bb488d69b4da8ce857ae0da91f3389a81"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:be8b349d665bfd58986d4fb7dccddf9e3ce2e153f1a2d800a28a9b4f270b94f2"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:5b5edb2386854d5addbccbaec087a435b727de98f3db14c6596cb9a254bd15ac"},
- {file = "cmeel_urdfdom-3.1.0.1-1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7861ac96427a2155ed2ff95f3f9783920141bbfae38c8dfc97b29d8a07bc0d0d"},
- {file = "cmeel_urdfdom-3.1.0.1-1-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:04a224add29a8d2648fffe6330a85796def312dd8527d46e7db4cc9d314cc4f3"},
- {file = "cmeel_urdfdom-3.1.0.1-1-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:87f929e6502d31b6a7817a9de84d44d6fb0a085fb4e9ab408fccc6c05b722557"},
- {file = "cmeel_urdfdom-3.1.0.1-1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d9e6df279a81d9dd5e308d5c464c0f4e781dda29a47423031a234449e30dfcab"},
- {file = "cmeel_urdfdom-3.1.0.1-1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:119b4afb0adde0b17be1123a0ac8fe3c57d33a7e75aa9d5b7157c6159f90a308"},
- {file = "cmeel_urdfdom-3.1.0.1-1-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:03bd73683af06f728d9cb128eca68f4ff24aee50df52e2cb731df2f36c3e70d1"},
- {file = "cmeel_urdfdom-3.1.0.1-1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c3326a44182c7ec61055228a9d49a52ea1a4cf87ed610253e60d5b4f5338875c"},
- {file = "cmeel_urdfdom-3.1.0.1-1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3a3eaf563f68611475087e70201b6bfae9aaa42dc614c86f08977699f0a8308f"},
- {file = "cmeel_urdfdom-3.1.0.1-1-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:7578d11923b79ba955518b53abf5082ada10845582a8baf7a5940a29dab9476d"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b01b38be539b707d0a2b14a804cee25d47451ce5dddfa98585b368eef30af991"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:4cd97ed5e3274dbf05e05bde7c2014e7eb698de013802d4b8624b4e20e78826c"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:1ddfbb04d9826850db6e56601f98be43b7b783695821c4e8632f6605a717dee6"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:39f55d036fe81740a5ededf2eeab944dc8706edfac3a50df060f8b9aaedd43c8"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:36a14da1acad1a7bf26e61d9dc31107abd9a774603ad76ab57ee97d2cc6a234d"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:1d2321e412a706bb5424ff770bf11437abd6321a06f19b1fade9f0d507b41b35"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bf839cd91115cf3c63465053d231c2edfbbd341f75b6a14c41b855e4362a85c4"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dc3bc3f8286c1a46adb1a0d03167092a56046f324e4ed5a9fe2a59e1f9957d66"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:831f3867a8f73029f08e41c7d6571e78e320d44aa1f582dd20147295b1a4953f"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:323f0e3d7c295344dd9ddf34b20eb983956a0571a9810ea3f5aec80f2b2f1874"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:5e7ca664666f1a5b26e75a8bfce81610ecda6b1b53d28c2ace4758af642427c9"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e6ae9cdaa3ea9e2ef6e9333d4a053f70d6a89d552f9013f0a955c0d9e2fe98d9"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9a7dda0d0089468a14e9e42d21e46b3dbbeccdcd178ecfb7da7bffd06b3c5f46"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:8e8f65c7e7b350e14708572d638857ffc4911b9d24cb426ee5996b0144956990"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:d38c6381438eedcef2d02385ae6a93201503fb5583978c934544e13a8cd25f4d"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:787cab0cb96cd4bc774ec0dac42bad093dba618f2d85465ddd6e0519e9174a14"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:699e49ba6b1d57135249339b4b4f50b6410c3cb7b974bba936d136c93c4406bd"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:2ba6a45e12244d9f46efa620c1d27a90088d647ceb9842e75b1a06c74f59f1d6"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:9213f0fb982ce799f6e3520dd186ace5416cb578f0b1099b2d13f3971b12466d"},
- {file = "cmeel_urdfdom-3.1.0.1-2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f2135edc2397c8704175f3949eea07ff540954bddec90ba1e7a18d00d153a3d8"},
- {file = "cmeel_urdfdom-3.1.0.1-2-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:79e075127e0b080b7f94f547313d70f3cf0f8731b3d0321a1f07f1f5af52b170"},
- {file = "cmeel_urdfdom-3.1.0.1-2-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:0a3382b4e335fbf960cc2cc4e5f20c7d3d831914f03324efab4bf49cedbb3180"},
- {file = "cmeel_urdfdom-3.1.0.1-2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7eb969eace9e7c047d3da29d367c758d1c166fea46225f70ef68e4e9387ca012"},
- {file = "cmeel_urdfdom-3.1.0.1-2-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:4be0d83cd16570faf58162a1365580a2b6d23db3c4283786a638db970c96068e"},
- {file = "cmeel_urdfdom-3.1.0.1-2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6dbb70ebfe850a206ff5476495ec87f4803acaabb699c7faa79aabe08f98c0a5"},
- {file = "cmeel_urdfdom-3.1.0.1-2-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:3fa53d1a51ae3d41cb24e528cbf31ae54f79103a47ddea7efeccc77e49b07ee7"},
- {file = "cmeel_urdfdom-3.1.0.1-3-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:5008b44ce3d31ce91d2aa8edd101fe51329bb9476d38ce928e349acae132996e"},
- {file = "cmeel_urdfdom-3.1.0.1-3-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:c3dbfc13792da8ffa6948821f373a41765a2274c48a86cdbdbcc7a2094ed5491"},
- {file = "cmeel_urdfdom-3.1.0.1-3-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:9ed65c061387d07faf0978eea949ccec9ff58484040ef22747cc3d85bd05b136"},
- {file = "cmeel_urdfdom-3.1.0.1-4-py3-none-macosx_10_15_x86_64.whl", hash = "sha256:9c4099068ae1d2531d330effc75f599efcfe7067c93bcc0a0b4fea98dec4b563"},
- {file = "cmeel_urdfdom-3.1.0.1-4-py3-none-manylinux_2_17_aarch64.whl", hash = "sha256:377196c7f877c26b9b7895952951f12e3d5b0acaf3b73a448c7b822a8a13dbb4"},
- {file = "cmeel_urdfdom-3.1.0.1-4-py3-none-manylinux_2_17_x86_64.whl", hash = "sha256:3f08f30016cb9acedb51ee3615e3d717879755b9111be1465508f86ea92c6c9a"},
+ {file = "cmeel_urdfdom-3.1.0.3-0-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:04d42b80edbd91b33c9fcdd6fb6a0deab1fbadd8fe7c10defeeacf61f60f8156"},
+ {file = "cmeel_urdfdom-3.1.0.3-0-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:4697bf68a5adbcaf4c8b6e99f989055358d2345d4eb734ea0bb12f40606f82fa"},
+ {file = "cmeel_urdfdom-3.1.0.3-0-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:57dfda60bf9d6548e9f835c3ed2b72c08c7cef0c762d718597c9a01106a99793"},
+ {file = "cmeel_urdfdom-3.1.0.3-0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:f51b3ba27514b8294ca5d98b4db62bdc5598d3cc8bdab5fe89f27938bdee8e97"},
+ {file = "cmeel_urdfdom-3.1.0.3-1-py3-none-macosx_12_0_arm64.whl", hash = "sha256:90200d3c0aae50b64001c8d22a524cc6d36c975fc718113ba15f5740d7b7cf5b"},
+ {file = "cmeel_urdfdom-3.1.0.3-1-py3-none-macosx_12_0_x86_64.whl", hash = "sha256:38eb5980fd1af9013882d1dd6ef8a723aea2a26caf9bc739445c58c26fff3f13"},
+ {file = "cmeel_urdfdom-3.1.0.3-1-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:437ba7051f9026c4bf9129e14d240fd59013f76b4f5327ec912726d52283506e"},
+ {file = "cmeel_urdfdom-3.1.0.3-1-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:54d484c7b8ed644f4f589232d4bc12d68f8613bb2b98cbb39b44a029c6ccedad"},
+ {file = "cmeel_urdfdom-3.1.0.3-1-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:e175d65b977e5a26ecc334b7acfed487e604d7fa3a045a528b62fa13f8a5c34c"},
]
[package.dependencies]
cmeel = "*"
+[package.extras]
+build = ["cmeel-console-bridge", "cmeel-tinyxml", "cmeel-urdfdom-headers"]
+
[[package]]
name = "colorama"
version = "0.4.6"
@@ -963,64 +613,164 @@ files = [
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+[[package]]
+name = "configargparse"
+version = "1.7"
+description = "A drop-in replacement for argparse that allows options to also be set via config files and/or environment variables."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "ConfigArgParse-1.7-py3-none-any.whl", hash = "sha256:d249da6591465c6c26df64a9f73d2536e743be2f244eb3ebe61114af2f94f86b"},
+ {file = "ConfigArgParse-1.7.tar.gz", hash = "sha256:e7067471884de5478c58a511e529f0f9bd1c66bfef1dea90935438d6c23306d1"},
+]
+
+[package.extras]
+test = ["PyYAML", "mock", "pytest"]
+yaml = ["PyYAML"]
+
+[[package]]
+name = "contourpy"
+version = "1.1.1"
+description = "Python library for calculating contours of 2D quadrilateral grids"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"},
+ {file = "contourpy-1.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e48694d6a9c5a26ee85b10130c77a011a4fedf50a7279fa0bdaf44bafb4299d"},
+ {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a66045af6cf00e19d02191ab578a50cb93b2028c3eefed999793698e9ea768ae"},
+ {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ebf42695f75ee1a952f98ce9775c873e4971732a87334b099dde90b6af6a916"},
+ {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6aec19457617ef468ff091669cca01fa7ea557b12b59a7908b9474bb9674cf0"},
+ {file = "contourpy-1.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:462c59914dc6d81e0b11f37e560b8a7c2dbab6aca4f38be31519d442d6cde1a1"},
+ {file = "contourpy-1.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6d0a8efc258659edc5299f9ef32d8d81de8b53b45d67bf4bfa3067f31366764d"},
+ {file = "contourpy-1.1.1-cp310-cp310-win32.whl", hash = "sha256:d6ab42f223e58b7dac1bb0af32194a7b9311065583cc75ff59dcf301afd8a431"},
+ {file = "contourpy-1.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:549174b0713d49871c6dee90a4b499d3f12f5e5f69641cd23c50a4542e2ca1eb"},
+ {file = "contourpy-1.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:407d864db716a067cc696d61fa1ef6637fedf03606e8417fe2aeed20a061e6b2"},
+ {file = "contourpy-1.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe80c017973e6a4c367e037cb31601044dd55e6bfacd57370674867d15a899b"},
+ {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e30aaf2b8a2bac57eb7e1650df1b3a4130e8d0c66fc2f861039d507a11760e1b"},
+ {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3de23ca4f381c3770dee6d10ead6fff524d540c0f662e763ad1530bde5112532"},
+ {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:566f0e41df06dfef2431defcfaa155f0acfa1ca4acbf8fd80895b1e7e2ada40e"},
+ {file = "contourpy-1.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b04c2f0adaf255bf756cf08ebef1be132d3c7a06fe6f9877d55640c5e60c72c5"},
+ {file = "contourpy-1.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d0c188ae66b772d9d61d43c6030500344c13e3f73a00d1dc241da896f379bb62"},
+ {file = "contourpy-1.1.1-cp311-cp311-win32.whl", hash = "sha256:0683e1ae20dc038075d92e0e0148f09ffcefab120e57f6b4c9c0f477ec171f33"},
+ {file = "contourpy-1.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:8636cd2fc5da0fb102a2504fa2c4bea3cbc149533b345d72cdf0e7a924decc45"},
+ {file = "contourpy-1.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:560f1d68a33e89c62da5da4077ba98137a5e4d3a271b29f2f195d0fba2adcb6a"},
+ {file = "contourpy-1.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:24216552104ae8f3b34120ef84825400b16eb6133af2e27a190fdc13529f023e"},
+ {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56de98a2fb23025882a18b60c7f0ea2d2d70bbbcfcf878f9067234b1c4818442"},
+ {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:07d6f11dfaf80a84c97f1a5ba50d129d9303c5b4206f776e94037332e298dda8"},
+ {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f1eaac5257a8f8a047248d60e8f9315c6cff58f7803971170d952555ef6344a7"},
+ {file = "contourpy-1.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:19557fa407e70f20bfaba7d55b4d97b14f9480856c4fb65812e8a05fe1c6f9bf"},
+ {file = "contourpy-1.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:081f3c0880712e40effc5f4c3b08feca6d064cb8cfbb372ca548105b86fd6c3d"},
+ {file = "contourpy-1.1.1-cp312-cp312-win32.whl", hash = "sha256:059c3d2a94b930f4dafe8105bcdc1b21de99b30b51b5bce74c753686de858cb6"},
+ {file = "contourpy-1.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:f44d78b61740e4e8c71db1cf1fd56d9050a4747681c59ec1094750a658ceb970"},
+ {file = "contourpy-1.1.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70e5a10f8093d228bb2b552beeb318b8928b8a94763ef03b858ef3612b29395d"},
+ {file = "contourpy-1.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8394e652925a18ef0091115e3cc191fef350ab6dc3cc417f06da66bf98071ae9"},
+ {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5bd5680f844c3ff0008523a71949a3ff5e4953eb7701b28760805bc9bcff217"},
+ {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:66544f853bfa85c0d07a68f6c648b2ec81dafd30f272565c37ab47a33b220684"},
+ {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e0c02b75acfea5cab07585d25069207e478d12309557f90a61b5a3b4f77f46ce"},
+ {file = "contourpy-1.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41339b24471c58dc1499e56783fedc1afa4bb018bcd035cfb0ee2ad2a7501ef8"},
+ {file = "contourpy-1.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f29fb0b3f1217dfe9362ec55440d0743fe868497359f2cf93293f4b2701b8251"},
+ {file = "contourpy-1.1.1-cp38-cp38-win32.whl", hash = "sha256:f9dc7f933975367251c1b34da882c4f0e0b2e24bb35dc906d2f598a40b72bfc7"},
+ {file = "contourpy-1.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:498e53573e8b94b1caeb9e62d7c2d053c263ebb6aa259c81050766beb50ff8d9"},
+ {file = "contourpy-1.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ba42e3810999a0ddd0439e6e5dbf6d034055cdc72b7c5c839f37a7c274cb4eba"},
+ {file = "contourpy-1.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c06e4c6e234fcc65435223c7b2a90f286b7f1b2733058bdf1345d218cc59e34"},
+ {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca6fab080484e419528e98624fb5c4282148b847e3602dc8dbe0cb0669469887"},
+ {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93df44ab351119d14cd1e6b52a5063d3336f0754b72736cc63db59307dabb718"},
+ {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eafbef886566dc1047d7b3d4b14db0d5b7deb99638d8e1be4e23a7c7ac59ff0f"},
+ {file = "contourpy-1.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:efe0fab26d598e1ec07d72cf03eaeeba8e42b4ecf6b9ccb5a356fde60ff08b85"},
+ {file = "contourpy-1.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f08e469821a5e4751c97fcd34bcb586bc243c39c2e39321822060ba902eac49e"},
+ {file = "contourpy-1.1.1-cp39-cp39-win32.whl", hash = "sha256:bfc8a5e9238232a45ebc5cb3bfee71f1167064c8d382cadd6076f0d51cff1da0"},
+ {file = "contourpy-1.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c84fdf3da00c2827d634de4fcf17e3e067490c4aea82833625c4c8e6cdea0887"},
+ {file = "contourpy-1.1.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:229a25f68046c5cf8067d6d6351c8b99e40da11b04d8416bf8d2b1d75922521e"},
+ {file = "contourpy-1.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10dab5ea1bd4401c9483450b5b0ba5416be799bbd50fc7a6cc5e2a15e03e8a3"},
+ {file = "contourpy-1.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:4f9147051cb8fdb29a51dc2482d792b3b23e50f8f57e3720ca2e3d438b7adf23"},
+ {file = "contourpy-1.1.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a75cc163a5f4531a256f2c523bd80db509a49fc23721b36dd1ef2f60ff41c3cb"},
+ {file = "contourpy-1.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b53d5769aa1f2d4ea407c65f2d1d08002952fac1d9e9d307aa2e1023554a163"},
+ {file = "contourpy-1.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11b836b7dbfb74e049c302bbf74b4b8f6cb9d0b6ca1bf86cfa8ba144aedadd9c"},
+ {file = "contourpy-1.1.1.tar.gz", hash = "sha256:96ba37c2e24b7212a77da85004c38e7c4d155d3e72a45eeaf22c1f03f607e8ab"},
+]
+
+[package.dependencies]
+numpy = {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""}
+
+[package.extras]
+bokeh = ["bokeh", "selenium"]
+docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"]
+mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.4.1)", "types-Pillow"]
+test = ["Pillow", "contourpy[test-no-images]", "matplotlib"]
+test-no-images = ["pytest", "pytest-cov", "wurlitzer"]
+
+[[package]]
+name = "cosypose"
+version = "1.0.0"
+description = "CosyPose"
+optional = true
+python-versions = ">=3.9"
+files = []
+develop = false
+
+[package.source]
+type = "directory"
+url = "happypose/pose_estimators/cosypose"
+
[[package]]
name = "coverage"
-version = "7.2.5"
+version = "7.3.2"
description = "Code coverage measurement for Python"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "coverage-7.2.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:883123d0bbe1c136f76b56276074b0c79b5817dd4238097ffa64ac67257f4b6c"},
- {file = "coverage-7.2.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d2fbc2a127e857d2f8898aaabcc34c37771bf78a4d5e17d3e1f5c30cd0cbc62a"},
- {file = "coverage-7.2.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f3671662dc4b422b15776cdca89c041a6349b4864a43aa2350b6b0b03bbcc7f"},
- {file = "coverage-7.2.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780551e47d62095e088f251f5db428473c26db7829884323e56d9c0c3118791a"},
- {file = "coverage-7.2.5-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:066b44897c493e0dcbc9e6a6d9f8bbb6607ef82367cf6810d387c09f0cd4fe9a"},
- {file = "coverage-7.2.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b9a4ee55174b04f6af539218f9f8083140f61a46eabcaa4234f3c2a452c4ed11"},
- {file = "coverage-7.2.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:706ec567267c96717ab9363904d846ec009a48d5f832140b6ad08aad3791b1f5"},
- {file = "coverage-7.2.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ae453f655640157d76209f42c62c64c4d4f2c7f97256d3567e3b439bd5c9b06c"},
- {file = "coverage-7.2.5-cp310-cp310-win32.whl", hash = "sha256:f81c9b4bd8aa747d417407a7f6f0b1469a43b36a85748145e144ac4e8d303cb5"},
- {file = "coverage-7.2.5-cp310-cp310-win_amd64.whl", hash = "sha256:dc945064a8783b86fcce9a0a705abd7db2117d95e340df8a4333f00be5efb64c"},
- {file = "coverage-7.2.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:40cc0f91c6cde033da493227797be2826cbf8f388eaa36a0271a97a332bfd7ce"},
- {file = "coverage-7.2.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a66e055254a26c82aead7ff420d9fa8dc2da10c82679ea850d8feebf11074d88"},
- {file = "coverage-7.2.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c10fbc8a64aa0f3ed136b0b086b6b577bc64d67d5581acd7cc129af52654384e"},
- {file = "coverage-7.2.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a22cbb5ede6fade0482111fa7f01115ff04039795d7092ed0db43522431b4f2"},
- {file = "coverage-7.2.5-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:292300f76440651529b8ceec283a9370532f4ecba9ad67d120617021bb5ef139"},
- {file = "coverage-7.2.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7ff8f3fb38233035028dbc93715551d81eadc110199e14bbbfa01c5c4a43f8d8"},
- {file = "coverage-7.2.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:a08c7401d0b24e8c2982f4e307124b671c6736d40d1c39e09d7a8687bddf83ed"},
- {file = "coverage-7.2.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ef9659d1cda9ce9ac9585c045aaa1e59223b143f2407db0eaee0b61a4f266fb6"},
- {file = "coverage-7.2.5-cp311-cp311-win32.whl", hash = "sha256:30dcaf05adfa69c2a7b9f7dfd9f60bc8e36b282d7ed25c308ef9e114de7fc23b"},
- {file = "coverage-7.2.5-cp311-cp311-win_amd64.whl", hash = "sha256:97072cc90f1009386c8a5b7de9d4fc1a9f91ba5ef2146c55c1f005e7b5c5e068"},
- {file = "coverage-7.2.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:bebea5f5ed41f618797ce3ffb4606c64a5de92e9c3f26d26c2e0aae292f015c1"},
- {file = "coverage-7.2.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:828189fcdda99aae0d6bf718ea766b2e715eabc1868670a0a07bf8404bf58c33"},
- {file = "coverage-7.2.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e8a95f243d01ba572341c52f89f3acb98a3b6d1d5d830efba86033dd3687ade"},
- {file = "coverage-7.2.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8834e5f17d89e05697c3c043d3e58a8b19682bf365048837383abfe39adaed5"},
- {file = "coverage-7.2.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d1f25ee9de21a39b3a8516f2c5feb8de248f17da7eead089c2e04aa097936b47"},
- {file = "coverage-7.2.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1637253b11a18f453e34013c665d8bf15904c9e3c44fbda34c643fbdc9d452cd"},
- {file = "coverage-7.2.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8e575a59315a91ccd00c7757127f6b2488c2f914096077c745c2f1ba5b8c0969"},
- {file = "coverage-7.2.5-cp37-cp37m-win32.whl", hash = "sha256:509ecd8334c380000d259dc66feb191dd0a93b21f2453faa75f7f9cdcefc0718"},
- {file = "coverage-7.2.5-cp37-cp37m-win_amd64.whl", hash = "sha256:12580845917b1e59f8a1c2ffa6af6d0908cb39220f3019e36c110c943dc875b0"},
- {file = "coverage-7.2.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b5016e331b75310610c2cf955d9f58a9749943ed5f7b8cfc0bb89c6134ab0a84"},
- {file = "coverage-7.2.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:373ea34dca98f2fdb3e5cb33d83b6d801007a8074f992b80311fc589d3e6b790"},
- {file = "coverage-7.2.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a063aad9f7b4c9f9da7b2550eae0a582ffc7623dca1c925e50c3fbde7a579771"},
- {file = "coverage-7.2.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38c0a497a000d50491055805313ed83ddba069353d102ece8aef5d11b5faf045"},
- {file = "coverage-7.2.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2b3b05e22a77bb0ae1a3125126a4e08535961c946b62f30985535ed40e26614"},
- {file = "coverage-7.2.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0342a28617e63ad15d96dca0f7ae9479a37b7d8a295f749c14f3436ea59fdcb3"},
- {file = "coverage-7.2.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf97ed82ca986e5c637ea286ba2793c85325b30f869bf64d3009ccc1a31ae3fd"},
- {file = "coverage-7.2.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c2c41c1b1866b670573657d584de413df701f482574bad7e28214a2362cb1fd1"},
- {file = "coverage-7.2.5-cp38-cp38-win32.whl", hash = "sha256:10b15394c13544fce02382360cab54e51a9e0fd1bd61ae9ce012c0d1e103c813"},
- {file = "coverage-7.2.5-cp38-cp38-win_amd64.whl", hash = "sha256:a0b273fe6dc655b110e8dc89b8ec7f1a778d78c9fd9b4bda7c384c8906072212"},
- {file = "coverage-7.2.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5c587f52c81211d4530fa6857884d37f514bcf9453bdeee0ff93eaaf906a5c1b"},
- {file = "coverage-7.2.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4436cc9ba5414c2c998eaedee5343f49c02ca93b21769c5fdfa4f9d799e84200"},
- {file = "coverage-7.2.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6599bf92f33ab041e36e06d25890afbdf12078aacfe1f1d08c713906e49a3fe5"},
- {file = "coverage-7.2.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:857abe2fa6a4973f8663e039ead8d22215d31db613ace76e4a98f52ec919068e"},
- {file = "coverage-7.2.5-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6f5cab2d7f0c12f8187a376cc6582c477d2df91d63f75341307fcdcb5d60303"},
- {file = "coverage-7.2.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:aa387bd7489f3e1787ff82068b295bcaafbf6f79c3dad3cbc82ef88ce3f48ad3"},
- {file = "coverage-7.2.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:156192e5fd3dbbcb11cd777cc469cf010a294f4c736a2b2c891c77618cb1379a"},
- {file = "coverage-7.2.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bd3b4b8175c1db502adf209d06136c000df4d245105c8839e9d0be71c94aefe1"},
- {file = "coverage-7.2.5-cp39-cp39-win32.whl", hash = "sha256:ddc5a54edb653e9e215f75de377354e2455376f416c4378e1d43b08ec50acc31"},
- {file = "coverage-7.2.5-cp39-cp39-win_amd64.whl", hash = "sha256:338aa9d9883aaaad53695cb14ccdeb36d4060485bb9388446330bef9c361c252"},
- {file = "coverage-7.2.5-pp37.pp38.pp39-none-any.whl", hash = "sha256:8877d9b437b35a85c18e3c6499b23674684bf690f5d96c1006a1ef61f9fdf0f3"},
- {file = "coverage-7.2.5.tar.gz", hash = "sha256:f99ef080288f09ffc687423b8d60978cf3a465d3f404a18d1a05474bd8575a47"},
+ {file = "coverage-7.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf"},
+ {file = "coverage-7.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda"},
+ {file = "coverage-7.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a"},
+ {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c"},
+ {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f"},
+ {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6"},
+ {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148"},
+ {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9"},
+ {file = "coverage-7.3.2-cp310-cp310-win32.whl", hash = "sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f"},
+ {file = "coverage-7.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611"},
+ {file = "coverage-7.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c"},
+ {file = "coverage-7.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074"},
+ {file = "coverage-7.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a"},
+ {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3"},
+ {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a"},
+ {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1"},
+ {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c"},
+ {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312"},
+ {file = "coverage-7.3.2-cp311-cp311-win32.whl", hash = "sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640"},
+ {file = "coverage-7.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2"},
+ {file = "coverage-7.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836"},
+ {file = "coverage-7.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63"},
+ {file = "coverage-7.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216"},
+ {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4"},
+ {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf"},
+ {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf"},
+ {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84"},
+ {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a"},
+ {file = "coverage-7.3.2-cp312-cp312-win32.whl", hash = "sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb"},
+ {file = "coverage-7.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed"},
+ {file = "coverage-7.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738"},
+ {file = "coverage-7.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2"},
+ {file = "coverage-7.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2"},
+ {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c"},
+ {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9"},
+ {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82"},
+ {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901"},
+ {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76"},
+ {file = "coverage-7.3.2-cp38-cp38-win32.whl", hash = "sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92"},
+ {file = "coverage-7.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a"},
+ {file = "coverage-7.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce"},
+ {file = "coverage-7.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9"},
+ {file = "coverage-7.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f"},
+ {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25"},
+ {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9"},
+ {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6"},
+ {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc"},
+ {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083"},
+ {file = "coverage-7.3.2-cp39-cp39-win32.whl", hash = "sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce"},
+ {file = "coverage-7.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f"},
+ {file = "coverage-7.3.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637"},
+ {file = "coverage-7.3.2.tar.gz", hash = "sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef"},
]
[package.dependencies]
@@ -1029,6 +779,155 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1
[package.extras]
toml = ["tomli"]
+[[package]]
+name = "cycler"
+version = "0.12.1"
+description = "Composable style cycles"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"},
+ {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"},
+]
+
+[package.extras]
+docs = ["ipython", "matplotlib", "numpydoc", "sphinx"]
+tests = ["pytest", "pytest-cov", "pytest-xdist"]
+
+[[package]]
+name = "cython"
+version = "3.0.4"
+description = "The Cython compiler for writing C extensions in the Python language."
+optional = true
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "Cython-3.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:096cb461bf8d913a4327d93ea38d18bc3dbc577a71d805be04754e4b2cc2c45d"},
+ {file = "Cython-3.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf671d712816b48fa2731799017ed68e5e440922d0c7e13dc825c226639ff766"},
+ {file = "Cython-3.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:beb367fd88fc6ba8c204786f680229106d99da72a60f5906c85fc8d73640b01a"},
+ {file = "Cython-3.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6619264ed43d8d8819d4f1cdb8a62ab66f87e92f06f3ff3e2533fd95a9945e59"},
+ {file = "Cython-3.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c0fb9e7cf9db38918f19a803fab9bc7b2ed3f33a9e8319c616c464a0a8501b8d"},
+ {file = "Cython-3.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c214f6e88ecdc8ff5d13f0914890445fdaad21bddc34a90cd14aeb3ad5e55e2e"},
+ {file = "Cython-3.0.4-cp310-cp310-win32.whl", hash = "sha256:c9b1322f0d8ce5445fcc3a625b966f10c4182190026713e217d6f38d29930cb1"},
+ {file = "Cython-3.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:146bfaab567157f4aa34114a37e3f98a3d9c4527ee99d4fd730cab56482bd3cf"},
+ {file = "Cython-3.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c8e0f98d950987b0f9d5e10c41236bef5cb4fba701c6e680af0b9734faa3a85e"},
+ {file = "Cython-3.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fe227d6d8e2ea030e82abc8a3e361e31447b66849f8c069caa783999e54a8f2"},
+ {file = "Cython-3.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6da74000a672eac0d7cf02adc140b2f9c7d54eae6c196e615a1b5deb694d9203"},
+ {file = "Cython-3.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48cda82eb82ad2014d2ad194442ed3c46156366be98e4e02f3e29742cdbf94a0"},
+ {file = "Cython-3.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4355a2cb03b257773c0d2bb6af9818c72e836a9b09986e28f52e323d87b1fc67"},
+ {file = "Cython-3.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:10b426adc3027d66303f5c7aa8b254d10ed80827ff5cce9e892d550b708dc044"},
+ {file = "Cython-3.0.4-cp311-cp311-win32.whl", hash = "sha256:28de18f0d07eb34e2dd7b022ac30beab0fdd277846d07b7a08e69e6294f0762b"},
+ {file = "Cython-3.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:9d31d76ed777a8a85be3f8f7f1cfef09b3bc33f6ec4abee1067dcef107f49778"},
+ {file = "Cython-3.0.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d5a55749509c7f9f8a33bf9cc02cf76fd6564fcb38f486e43d2858145d735953"},
+ {file = "Cython-3.0.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58cdfdd942cf5ffcee974aabfe9b9e26c0c1538fd31c1b03596d40405f7f4d40"},
+ {file = "Cython-3.0.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b906997e7b98d7d29b84d10a5318993eba1aaff82ff7e1a0ac00254307913d7"},
+ {file = "Cython-3.0.4-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f24114e1777604a28ae1c7a56a2c9964655f1031edecc448ad51e5abb19a279b"},
+ {file = "Cython-3.0.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:07d0e69959f267b79ffd18ece8599711ad2f3d3ed1eddd0d4812d2a97de2b912"},
+ {file = "Cython-3.0.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f7fcd93d15deceb2747b10266a39deccd94f257d610f3bbd52a7e16bc5908eda"},
+ {file = "Cython-3.0.4-cp312-cp312-win32.whl", hash = "sha256:0aa2a6bb3ff67794d8d1dafaed22913adcbb327e96eca3ac44e2f3ba4a0ae446"},
+ {file = "Cython-3.0.4-cp312-cp312-win_amd64.whl", hash = "sha256:0021350f6d7022a37f598320460b84b2c0daccf6bb65641bbdbc8b990bdf4ad2"},
+ {file = "Cython-3.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b72c426df1586f967b1c61d2f8236702d75c6bbf34abdc258a59e09155a16414"},
+ {file = "Cython-3.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a9262408f05eef039981479b38b38252d5b853992e5bc54a2d2dd05a2a0178e"},
+ {file = "Cython-3.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28af4e7dff1742cb0f0a4823102c89c62a2d94115b68f718145fcfe0763c6e21"},
+ {file = "Cython-3.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4e8c144e2c5814e46868d1f81e2f4265ca1f314a8187d0420cd76e9563294cf8"},
+ {file = "Cython-3.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:19a64bf2591272348ab08bcd4a5f884259cc3186f008c9038b8ec7d01f847fd5"},
+ {file = "Cython-3.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:fc96efa617184b8581a02663e261b41c13a605da8ef4ba1ed735bf46184f815e"},
+ {file = "Cython-3.0.4-cp36-cp36m-win32.whl", hash = "sha256:15d52f7f9d08b264c042aa508bf457f53654b55f533e0262e146002b1c15d1cd"},
+ {file = "Cython-3.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:0650460b5fd6f16da4186e0a769b47db5532601e306f3b5d17941283d5e36d24"},
+ {file = "Cython-3.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b3ddfc6f05410095ec11491dde05f50973e501567d21cbfcf5832d95f141878a"},
+ {file = "Cython-3.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a0b92adfcac68dcf549daddec83c87a86995caa6f87bfb6f72de4797e1a6ad6"},
+ {file = "Cython-3.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ada3659608795bb36930d9a206b8dd6b865d85e2999a02ce8b34f3195d88301"},
+ {file = "Cython-3.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:061dec1be8d8b601b160582609a78eb08324a4ccf21bee0d04853a3e9dfcbefd"},
+ {file = "Cython-3.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:bc42004f181373cd3b39bbacfb71a5b0606ed6e4c199c940cca2212ba0f79525"},
+ {file = "Cython-3.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:f124ac9ee41e1bfdfb16f53f1db85de296cd2144a4e8fdee8c3560a8fe9b6d5d"},
+ {file = "Cython-3.0.4-cp37-cp37m-win32.whl", hash = "sha256:48b35ab009227ee6188991b5666aae1936b82a944f707c042cef267709de12b5"},
+ {file = "Cython-3.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:861979428f749faa9883dc4e11e8c3fc2c29bd0031cf49661604459b53ea7c66"},
+ {file = "Cython-3.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c7a7dd7c50d07718a5ac2bdea166085565f7217cf1e030cc07c22a8b80a406a7"},
+ {file = "Cython-3.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d40d4135f76fb0ed4caa2d422fdb4231616615698709d3c421ecc733f1ac7ca0"},
+ {file = "Cython-3.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:207f53893ca22d8c8f5db533f38382eb7ddc2d0b4ab51699bf052423a6febdad"},
+ {file = "Cython-3.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0422a40a58dcfbb54c8b4e125461d741031ff046bc678475cc7a6c801d2a7721"},
+ {file = "Cython-3.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ef4b144c5b29b4ea0b40c401458b86df8d75382b2e5d03e9f67f607c05b516a9"},
+ {file = "Cython-3.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0612439f810cc281e51fead69de0545c4d9772a1e82149c119d1aafc1f6210ba"},
+ {file = "Cython-3.0.4-cp38-cp38-win32.whl", hash = "sha256:b86871862bd65806ba0d0aa2b9c77fcdcc6cbd8d36196688f4896a34bb626334"},
+ {file = "Cython-3.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:6603a287188dcbc36358a73a7be43e8a2ecf0c6a06835bdfdd1b113943afdd6f"},
+ {file = "Cython-3.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0fc9e974419cc0393072b1e9a669f71c3b34209636d2005ff8620687daa82b8c"},
+ {file = "Cython-3.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e84988d384dfba678387ea7e4f68786c3703543018d473605d9299c69a07f197"},
+ {file = "Cython-3.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36299ffd5663203c25d3a76980f077e23b6d4f574d142f0f43943f57be445639"},
+ {file = "Cython-3.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8529cf09919263a6826adc04c5dde9f1406dd7920929b16be19ee9848110e353"},
+ {file = "Cython-3.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8692249732d62e049df3884fa601b70fad3358703e137aceeb581e5860e7d9b7"},
+ {file = "Cython-3.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f234bc46043856d118ebd94b13ea29df674503bc94ced3d81ca46a1ad5b5b9ae"},
+ {file = "Cython-3.0.4-cp39-cp39-win32.whl", hash = "sha256:c2215f436ce3cce49e6e318cb8f7253cfc4d3bea690777c2a5dd52ae93342504"},
+ {file = "Cython-3.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:003ccc40e0867770db0018274977d1874e4df64983d5e3e36937f107e0b2fdf6"},
+ {file = "Cython-3.0.4-py2.py3-none-any.whl", hash = "sha256:e5e2859f97e9cceb8e70b0934c56157038b8b083245898593008162a70536d7e"},
+ {file = "Cython-3.0.4.tar.gz", hash = "sha256:2e379b491ee985d31e5faaf050f79f4a8f59f482835906efe4477b33b4fbe9ff"},
+]
+
+[[package]]
+name = "dash"
+version = "2.14.0"
+description = "A Python framework for building reactive web-apps. Developed by Plotly."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "dash-2.14.0-py3-none-any.whl", hash = "sha256:b88ffb53cb1bf54c12780ecf89943bb901c8914b3c075002e46519c9b17d7a72"},
+ {file = "dash-2.14.0.tar.gz", hash = "sha256:bd28be70be24ae1d1f764b8217a03da35e9ed895406686d24dfb6ed4e331e5a9"},
+]
+
+[package.dependencies]
+ansi2html = "*"
+dash-core-components = "2.0.0"
+dash-html-components = "2.0.0"
+dash-table = "5.0.0"
+Flask = ">=1.0.4,<2.3.0"
+importlib-metadata = {version = "*", markers = "python_version >= \"3.7\""}
+nest-asyncio = "*"
+plotly = ">=5.0.0"
+requests = "*"
+retrying = "*"
+setuptools = "*"
+typing-extensions = ">=4.1.1"
+Werkzeug = "<2.3.0"
+
+[package.extras]
+celery = ["celery[redis] (>=5.1.2)", "importlib-metadata (<5)", "redis (>=3.5.3)"]
+ci = ["black (==21.6b0)", "black (==22.3.0)", "dash-dangerously-set-inner-html", "dash-flow-example (==0.0.5)", "flake8 (==3.9.2)", "flaky (==3.7.0)", "flask-talisman (==1.0.0)", "isort (==4.3.21)", "jupyterlab (<4.0.0)", "mimesis", "mock (==4.0.3)", "numpy (<=1.25.2)", "openpyxl", "orjson (==3.5.4)", "orjson (==3.6.7)", "pandas (==1.1.5)", "pandas (>=1.4.0)", "preconditions", "pyarrow", "pyarrow (<3)", "pylint (==2.13.5)", "pytest-mock", "pytest-rerunfailures", "pytest-sugar (==0.9.6)", "xlrd (<2)", "xlrd (>=2.0.1)"]
+compress = ["flask-compress"]
+dev = ["PyYAML (>=5.4.1)", "coloredlogs (>=15.0.1)", "fire (>=0.4.0)"]
+diskcache = ["diskcache (>=5.2.1)", "multiprocess (>=0.70.12)", "psutil (>=5.8.0)"]
+testing = ["beautifulsoup4 (>=4.8.2)", "cryptography (<3.4)", "dash-testing-stub (>=0.0.2)", "lxml (>=4.6.2)", "multiprocess (>=0.70.12)", "percy (>=2.0.2)", "psutil (>=5.8.0)", "pytest (>=6.0.2)", "requests[security] (>=2.21.0)", "selenium (>=3.141.0,<=4.2.0)", "waitress (>=1.4.4)"]
+
+[[package]]
+name = "dash-core-components"
+version = "2.0.0"
+description = "Core component suite for Dash"
+optional = false
+python-versions = "*"
+files = [
+ {file = "dash_core_components-2.0.0-py3-none-any.whl", hash = "sha256:52b8e8cce13b18d0802ee3acbc5e888cb1248a04968f962d63d070400af2e346"},
+ {file = "dash_core_components-2.0.0.tar.gz", hash = "sha256:c6733874af975e552f95a1398a16c2ee7df14ce43fa60bb3718a3c6e0b63ffee"},
+]
+
+[[package]]
+name = "dash-html-components"
+version = "2.0.0"
+description = "Vanilla HTML components for Dash"
+optional = false
+python-versions = "*"
+files = [
+ {file = "dash_html_components-2.0.0-py3-none-any.whl", hash = "sha256:b42cc903713c9706af03b3f2548bda4be7307a7cf89b7d6eae3da872717d1b63"},
+ {file = "dash_html_components-2.0.0.tar.gz", hash = "sha256:8703a601080f02619a6390998e0b3da4a5daabe97a1fd7a9cebc09d015f26e50"},
+]
+
+[[package]]
+name = "dash-table"
+version = "5.0.0"
+description = "Dash table"
+optional = false
+python-versions = "*"
+files = [
+ {file = "dash_table-5.0.0-py3-none-any.whl", hash = "sha256:19036fa352bb1c11baf38068ec62d172f0515f73ca3276c79dee49b95ddc16c9"},
+ {file = "dash_table-5.0.0.tar.gz", hash = "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308"},
+]
+
[[package]]
name = "decorator"
version = "5.1.1"
@@ -1042,13 +941,13 @@ files = [
[[package]]
name = "distlib"
-version = "0.3.6"
+version = "0.3.7"
description = "Distribution utilities"
optional = false
python-versions = "*"
files = [
- {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"},
- {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"},
+ {file = "distlib-0.3.7-py2.py3-none-any.whl", hash = "sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057"},
+ {file = "distlib-0.3.7.tar.gz", hash = "sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8"},
]
[[package]]
@@ -1064,204 +963,321 @@ files = [
[[package]]
name = "eigenpy"
-version = "2.9.2"
+version = "3.1.1"
description = "Bindings between Numpy and Eigen using Boost.Python"
optional = false
-python-versions = ">= 3.7"
-files = [
- {file = "eigenpy-2.9.2-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1547dbc74bc125f077fcb8550a787418fc433f5996ac0e086e1bd6229f77b9a1"},
- {file = "eigenpy-2.9.2-0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a5f2cb1bd633220a4fd98424ef27ab7f5892448fd1e0edd48206fe4a67d8741a"},
- {file = "eigenpy-2.9.2-0-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:0e6af292b8a619f67e9a340a771f2288da513b8d7f23cbe659b98b62a11a32f6"},
- {file = "eigenpy-2.9.2-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:51add81d77e3a750fd85f9ca34fa78ecf2ffaa372039872609dfe23cc4565d87"},
- {file = "eigenpy-2.9.2-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:36da1f5090764ff0d1a126dca973c93eaf8dfb4aa17a53503b61e9cc4cc445c7"},
- {file = "eigenpy-2.9.2-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c387e61f6ef99d30c764dbf40c4b2bdb24e2dca059b9e8fe0bde32103ff0e7f3"},
- {file = "eigenpy-2.9.2-0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e40eb0872993daed2a65fd7e03d733e76cee04343844007de69c28fab19a2f0"},
- {file = "eigenpy-2.9.2-0-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:3bf05f3f75eab9e2caf4335bc087d9110959978f4075a112db3349ba0c0d5881"},
- {file = "eigenpy-2.9.2-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:0881a27411fb2d2b80f0bcc6886fe9195e63e5a365be7db6e197e5b3299e395f"},
- {file = "eigenpy-2.9.2-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:298e30e035506e16313b158e07dbba8321958647998a989e02a6dabfd5ed206b"},
- {file = "eigenpy-2.9.2-0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:db5012448f76c4615904159a4abe74f413f099b34dae2da19f22411367b93889"},
- {file = "eigenpy-2.9.2-0-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:ca43503ca96f76cf852c17b81cf71aa138ebb1649aed0f8b1aa0de305a3a643b"},
- {file = "eigenpy-2.9.2-0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:60323d5033e7f44188b5637b864833060a6afcb5316d6f9f0443badd2e1e01b0"},
- {file = "eigenpy-2.9.2-0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a60f33ec12a7790bf077a079a1686c84cba42738d5e5f38e1e4c0fca971d123e"},
- {file = "eigenpy-2.9.2-0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91e39194ef4a735b624bc1c0a5077b1128308567d40f41b3645cd6ecf4589cbd"},
- {file = "eigenpy-2.9.2-0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1e7b11117039991261e3f7b585d54b8ed0ca4c46fa5650d75ad6a275d8509507"},
- {file = "eigenpy-2.9.2-0-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:e75a4b69323958e6bc971f2284836bf4c1f1630b5f73801592c1f6d3557977a3"},
- {file = "eigenpy-2.9.2-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:6fc58fc477f6d4b5388c6ebf7117e2a5864e70cc82a44a64724959fd47a2b1a2"},
- {file = "eigenpy-2.9.2-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:105384933ad64647c89afb52a6d7d1db2b81dc6dfa866649bf1309bd61aaa86a"},
- {file = "eigenpy-2.9.2-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a6f758c6d01aef5ca152ea4a8b0b11d956424a2fa0c278566ff03b84a4af65e5"},
- {file = "eigenpy-2.9.2-0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d3c6f820b03995b08bca4df844f18306b5fcfc81201a9f45803924e14c6ee037"},
- {file = "eigenpy-2.9.2-0-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:00c0a8a36f078bcfcd0cdd040bb75f0604a2c0d3193378cb7bd8485491873270"},
- {file = "eigenpy-2.9.2-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:422c01b24fe4d3a8a641dd0ef512283c9e871e5b4164745c3c4dab6624de7e44"},
- {file = "eigenpy-2.9.2-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d07dd9cf5c74787c026b4940247a7a6b56c29c43fce2562d71e4350e41c78c78"},
- {file = "eigenpy-2.9.2-0-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:08e9e68efe13e6d7a2c1fc04ab93a9f6c073f78dbaed782deac343934bed2721"},
- {file = "eigenpy-2.9.2-0-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:74e67ce45657ba93b78ab5f13c892e509ee1356e1fb936b13fe7e064b3e763ac"},
- {file = "eigenpy-2.9.2-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:392ad0bc10314d7bcd3bdf282ea594b9519c46d431c64398bf6e9697105e4946"},
- {file = "eigenpy-2.9.2-0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:4a8a24803d693679bd0dd856da852937f3f677944e4713589f157d6e38e23bb3"},
- {file = "eigenpy-2.9.2-0-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:c04fbc03a8d0772113db1f51601a69062e39fe0f6f57fa0337080e2736013f57"},
- {file = "eigenpy-2.9.2-0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a50d87e063b320c72b7633d0f29e9c4fd4a2c674e95b537e64226324bb7d90e4"},
- {file = "eigenpy-2.9.2-0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b7fafe8617b19284d5ece3674c9424dac2c84b18fc65528bb3c87f2919a03ba9"},
- {file = "eigenpy-2.9.2-0-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:6c20cc4fad759aa4a2076c07ea80c4fdd3885b99479588afea3cd105f85380ff"},
- {file = "eigenpy-2.9.2-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b3c6396e314c6b6aadc0f5d15819192cb5d80dd679605bc6aa882ba0baaf8a33"},
- {file = "eigenpy-2.9.2-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:43522e71af7537468f0ec0aa0ac530ab6cb3c075bc92abe80df14836fb5f8640"},
- {file = "eigenpy-2.9.2-1-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:aa6ff3913db34b6e2fbaf5fbc54421e6c9a16c8ea4e03dd26a8fa56dbe1e222c"},
- {file = "eigenpy-2.9.2-1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:3153943498d4dd91ee81b0e0a1cf8f7df4d0ffa7ef133d4d756136c439b0a4c2"},
- {file = "eigenpy-2.9.2-1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:37fdfcae853fc82b332ca8fc5baa127f11a0aa392213d3c4c64ad3afcaacc744"},
- {file = "eigenpy-2.9.2-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c861f004ebd7c93ad66886690fd3712d1758f694cea160c6b467a6d2225b847e"},
- {file = "eigenpy-2.9.2-1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8ffb237576d67d8971c3e6f6c70a971c8df4a918b1d60b168b0e62cbf9bdc04f"},
- {file = "eigenpy-2.9.2-1-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:71c8ac600338b742c7bf7df403005e8d5516c484031c20963b88acd6a20a1548"},
- {file = "eigenpy-2.9.2-1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:d53061c3bfbf0f98d52dee7c99dc171c08321d1d577271fd2237afc7ed9e4f55"},
- {file = "eigenpy-2.9.2-1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:731ba6ce470e8b90ff71b2112d3e8dabd49e31404e161bc20a7fa7f9d48ad29d"},
- {file = "eigenpy-2.9.2-1-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:6943050b1db9a09c02f92420f57d00a4d61226a594d471d3351ec59b7a665efa"},
- {file = "eigenpy-2.9.2-1-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:eca1284629feaec8c5b8a88a520b297c64400b2d568dcfb4a593701597e8c6c0"},
- {file = "eigenpy-2.9.2-1-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:beabfbbe09cec072fdd2ac17759e3cf5958fd6ee8989f186bd1e5e450b467fc2"},
- {file = "eigenpy-2.9.2-1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:b03e3af3a3b63d30e55ac395114e78ebccdd3ee8ab530ae3b1a5e07bc6560674"},
- {file = "eigenpy-2.9.2-1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:978a21d8cb15a7c6d30cd24267957e6ea98f164939db8d568cd9b264763b4eae"},
- {file = "eigenpy-2.9.2-1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6a73d1416ba221805c1686087e95d1e17333531cdeccb94e184bf8d89d110805"},
- {file = "eigenpy-2.9.2-1-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:52b6bc623b04848a42e9b1e92d20beab927dfce5087a9cb48654cc0445f90cd0"},
- {file = "eigenpy-2.9.2-1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:236ac90e7bfbc4fb6c9e37546cee7a783c91c73c97c9a58afc7f6fbf0fa7fa59"},
- {file = "eigenpy-2.9.2-1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4bf1c78a1834948db31afc48e1cd60aaae66e564c66c088fa1b40cb0f48275fc"},
- {file = "eigenpy-2.9.2-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bceb2e446d2688052f5c405ee8d9274846eacefd2709dd92fccad671e7d01cee"},
- {file = "eigenpy-2.9.2-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:910555d0619fa54c3676fb1b3179cd09d53dbc8daea01b78e211629fe53dfc60"},
- {file = "eigenpy-2.9.2-1-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:edca378032a230dc2a3dcc3a70089efd9e0cdf0aca21d6e804d30f7b9d5a2eca"},
- {file = "eigenpy-2.9.2-1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:950543a177b62411abb195102f207b787a819030c2dcaa141cdfeecd2fbf3967"},
- {file = "eigenpy-2.9.2-1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:65a1106e71939e2149175903e9a0b64d91f988e3d5b0272d560dd119a0324011"},
- {file = "eigenpy-2.9.2-1-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:a19ca57da5a8bcb6cce49165a916eb92c6fd29ea657c4d6db500ccfc2bccf860"},
- {file = "eigenpy-2.9.2-1-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:f3bda586e29913ba0b53f23a77f4d98d3385979240c65d50b43aefbf955e645b"},
- {file = "eigenpy-2.9.2-1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2e08f3e570f815c2f2f3fd84397f47a80466ddacea653a81d80850374c77849c"},
- {file = "eigenpy-2.9.2-1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:743c22e0d447ed2f5c8850e91da662d49e81f79b8bd53a6ee9d9ef5d075bccbe"},
- {file = "eigenpy-2.9.2-1-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:18d6a9e877fb23f1686ed6ee6d64ed069873afc0a0155c27d88c3393b6527da9"},
- {file = "eigenpy-2.9.2-1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:89b0dbd1cb67b5197175ae6aa31b58d48c80b5b0d063dfba1aceb3b906825c57"},
- {file = "eigenpy-2.9.2-1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:3fe447a74387bbc0db69c9c1bb9de0c8e072beeaa2ca98afd5e90b6532242a35"},
- {file = "eigenpy-2.9.2-1-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:90c015fca1964ec0521d521d85583a2523211c3f8e69b130f80d8f7854e58147"},
- {file = "eigenpy-2.9.2-3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6f7727236e4db32ab406a579c0ede3870efb6f0f61a63971d328402d33504a3a"},
- {file = "eigenpy-2.9.2-3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d4d82c1c0a9d463fab7704c6df226c08818194acfd4f10bf37d47aa61af9b598"},
- {file = "eigenpy-2.9.2-3-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:16b80b0f635db1034b2a89f9d61ac8d1f7b4fe99745e785c09c973145e547d64"},
- {file = "eigenpy-2.9.2-3-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:0c2095f75bfff71b89c01ecedd318f1bb71fb44f8159175ff5b3651f6e5d900e"},
- {file = "eigenpy-2.9.2-3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:77145f28ab02e9816007fce4ee00173e1603c21c63117bb3e1822aa2e40bdbfd"},
- {file = "eigenpy-2.9.2-3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1fd7131edc2f90359deb40e406e50b2f2f71ddf3b7a514c45bb6ba84e677f9bb"},
- {file = "eigenpy-2.9.2-3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:72d4db584082c8d5316f3fc109229c62e05a027f02177dbcbb3fe97a79688ec0"},
- {file = "eigenpy-2.9.2-3-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:a00596770a96a5afb99011e6703c98e54635ab6515d1490158c77a2b95603d88"},
- {file = "eigenpy-2.9.2-3-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:32b37442bdcc73db04c026c9c4177a1ececa38f74b8305b7a0c8cc83636801d9"},
- {file = "eigenpy-2.9.2-3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ffaeb48f3daa17fc2b18d5064bab7164edae826310abb21fd5c34d20882ca18f"},
- {file = "eigenpy-2.9.2-3-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:ea60f1b6b7aa0843a854a5bd9dd049faf596c087c1b9fe9eb4c1ce52453038bf"},
- {file = "eigenpy-2.9.2-3-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:159943e611c8eeefc4e885682f56f33277c17a1cdf91c9875e444da580d1a9a6"},
- {file = "eigenpy-2.9.2-3-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:a2a2c09e2cca059e67114b26f98f5f3c45eb32cad69dc3161c9e807443de3bbf"},
- {file = "eigenpy-2.9.2-3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:7efd34176781629ada2875ccd87be4cabc0e3a2ac9f077a6ad724be1eefa1574"},
- {file = "eigenpy-2.9.2-3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1b9848c1196b82277a132de10439a2c80805e42ecdaa6c4db39fca7b33bedbe6"},
- {file = "eigenpy-2.9.2-3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4d3e9873f33b7724e5724e4decde0078296930e2ac31748ec9a8d3eaaa890a8f"},
- {file = "eigenpy-2.9.2-3-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:1fca60b78dc83f1cd26b88908a23656d3a4e86921ee033dbb8cd64437675dd66"},
- {file = "eigenpy-2.9.2-3-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:073022bbda89aa8b76366f300e398e812e66b084b88698d97519f0814b2bb976"},
- {file = "eigenpy-2.9.2-3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:783d8dae1ea27fa8355f014d3b312cd2ec06eaaf1d361482e50d95ec0ef037f0"},
- {file = "eigenpy-2.9.2-3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0d068ab765df4d4c27f9603fcac1c3e5d5915a7a57dadeef503ae3ca349ad173"},
- {file = "eigenpy-2.9.2-3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:133047a53a8f71937130158188342e0b15ee3c23092f63bf47d6df75528136f6"},
- {file = "eigenpy-2.9.2-3-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:7a275d295068e03c7db17464a19567c55582f3c31db8d40bd248e138f762dd87"},
- {file = "eigenpy-2.9.2-3-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:155e1ad762fb83181d529a250922d33f06b0d7c80882fd00289ff28dd715613b"},
- {file = "eigenpy-2.9.2-3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fd74e21c0cbb41da70aabcac92b3fc22de319ead2e1f9d9cf88cf206fc9d00d"},
- {file = "eigenpy-2.9.2-3-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:302e7976533e8fa9feb437df8ca7ce1bc43d3429c1a1b8970170facfffded6f0"},
- {file = "eigenpy-2.9.2-3-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:6ebeaa46f27c0373034383dc83d830ec39c14479c9061712c0887451a43f1339"},
- {file = "eigenpy-2.9.2-3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f3637093f243e14292279d9bf2f82af8aa1a187910580b59c3278b89946a100e"},
- {file = "eigenpy-2.9.2-3-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3e869148cc9b18dd951f6ff6f2fa983a5c908d3a36ba5811a3bf1bd0b3dcf526"},
- {file = "eigenpy-2.9.2-3-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:e899432433dad7e06d68d2087707cea9b674806dfdd02f904a6466e828e1670f"},
- {file = "eigenpy-2.9.2-3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b0ac58a3dfe8d6e305e90dfe7636024b87817fcec333a90aa61df6e0c7c4e739"},
- {file = "eigenpy-2.9.2-3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5ba3694004fb774b9a4725d44c80d8aeda772e343994b8ebb0036ca4b5cb8e9a"},
- {file = "eigenpy-2.9.2-3-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:5fece54dc1c05620ceebf3584be35378e5b313f97500d92c1cc5b077f525161d"},
- {file = "eigenpy-2.9.2-4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3d93e3c526dcd77d122f54f5a7e36be68d9c54cbdbd0a874cb8d6da758fa25e0"},
- {file = "eigenpy-2.9.2-4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6251007e44beb65bb40632488b96006e2999b48ff81f7488bd76cc0fdf2737e8"},
- {file = "eigenpy-2.9.2-4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:215ef59e65f2dd53adc548bdcb2705374598339cc265a1404a5ec112f2b82a5d"},
- {file = "eigenpy-2.9.2-4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af1ce527b780f1db08247b3d2620f8992995bb640b38d6efc4a206e2356d3bb4"},
- {file = "eigenpy-2.9.2-4-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:310029b36e07fc5d895ad53df420b5ce12986a3f990d3b8c93fe89d887c98aab"},
- {file = "eigenpy-2.9.2-4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5799e126c159650909d3e37a72bec908fc42dc2c87369c499fbed4d5e1c2ec60"},
- {file = "eigenpy-2.9.2-5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c25a7c39174f1f73fe8b267ac38b3ef5e7d44cf05074e8d92df7919f6f552916"},
- {file = "eigenpy-2.9.2-5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7729161abf18ddea02d312775f13a28a62b15a0d853e082a8bc724cd6bc29ec4"},
- {file = "eigenpy-2.9.2-5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ac9e642ec760488ffd1f783855121376cb67f542ff5ec64282141d96092d1eb0"},
- {file = "eigenpy-2.9.2-5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f1271b8bcb0926e271e85ffd38d2520f7d8087926141a96774dca36785dc1d54"},
- {file = "eigenpy-2.9.2-5-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:88980c5bb47e83aa5954a1c1c1722dbc0befb3b12e3e073f6515626a686fb847"},
- {file = "eigenpy-2.9.2-5-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:aed39be55585526f0fc934ad5f22ea3848749cde0fba28e66f0e2e82417fd492"},
+python-versions = ">= 3.8"
+files = [
+ {file = "eigenpy-3.1.1-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:836f7c2ac73ab5729d7068a89347cf3173b97cd298219f23651f3c8343827feb"},
+ {file = "eigenpy-3.1.1-0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:448e7f2db0687c19cb4c5894295ec3d734d77ca2c59cee662e0678caccbce48f"},
+ {file = "eigenpy-3.1.1-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:319ae7fbfbdda85602cc9884aeda6f51f4105072e1835c3b91eca430588dd155"},
+ {file = "eigenpy-3.1.1-0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:069ae1a19040236bce54d690ffb1ee8e54bcc7f88ceb4b27f05d8f9dee9c05a9"},
+ {file = "eigenpy-3.1.1-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bce638aa84c3fb5180cce411114b18b2b378228b0282506b32fae444238f3fe9"},
+ {file = "eigenpy-3.1.1-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c246991b8b8dd38a6415188fc5fc1655ef91fc1a26978771fe29337bbfe67229"},
+ {file = "eigenpy-3.1.1-0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:78f9b8b6f5803e5675dd9767f5f9017d511b1c7688f5fa3fe21f26d7cfa92281"},
+ {file = "eigenpy-3.1.1-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:b1f29c7ba8904b92b5382dcffb243eb3e269603b66de804de30fb04427c37620"},
+ {file = "eigenpy-3.1.1-0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:4d9cc6631774e0cc7820131d739f12989f080ba6ace95d4cd4dff306c881094a"},
+ {file = "eigenpy-3.1.1-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2be2e72c208edb15a687e603c8b11bb07fe1415406982b4f8e49c11be7ec7a51"},
+ {file = "eigenpy-3.1.1-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:c5ec2d372b5717ca84d506809d83e0cd9201d586425be6c52c4c01298a3093b1"},
+ {file = "eigenpy-3.1.1-0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:f7ccacdf8a4f9dbf86a1dacaa000f00d2431502b1d9bd6a8aad91625a4cb06fe"},
+ {file = "eigenpy-3.1.1-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6d36079f4c24d76b59bbcb4a188b40c744115c3c54f38e591ec8ef20fbb1026a"},
+ {file = "eigenpy-3.1.1-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:13213fc1a7c90ec4dd3f9c08a161e82b7b00f1ed6134e2c76767b57e18f276fc"},
+ {file = "eigenpy-3.1.1-0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e081b8ffe4b119516281fe425a9832980fbf065b935fae1614affc04d090f69c"},
+ {file = "eigenpy-3.1.1-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:4294e7f579444b3d79194ec1d2e9afbbf33a7f766edde62c271e8df2d64b02cc"},
+ {file = "eigenpy-3.1.1-0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:59cb830716ad46323ef3301c403a4f96fd1402437d7ec0316e0dd0b5a4591d48"},
+ {file = "eigenpy-3.1.1-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:df6df4dc4db2787943c5af15a8db661cac966aa9438c02137140d198744aa8dd"},
+ {file = "eigenpy-3.1.1-0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1200f2c481433ad3dd1ad4d70f01c2275e034d395abd9a49da960c4f7acbf130"},
+ {file = "eigenpy-3.1.1-0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:27ae2ac839672082d8696a0e67d2bcf8b4f9927d87d0f2e61a624512e0642169"},
+ {file = "eigenpy-3.1.1-0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2df1aab9e7a7f5cbf2b208ebd1d3b050b4d6d27f894452b074e2ba8bbcc9958c"},
+ {file = "eigenpy-3.1.1-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ae4ec200b4c71fd60e82bf257d984ba4d28b178e029d87999d1910c80ab91ab2"},
+ {file = "eigenpy-3.1.1-0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:7e8fe6796f075cc802520938e7d9ae62133b750b9c1da6cf88cab8935dce0663"},
+ {file = "eigenpy-3.1.1-0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:10875544d5393d00551528194d0e4a9efaccab2fe07d7ac6dd36ea197989d9e4"},
+ {file = "eigenpy-3.1.1-0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:270fd94f7b30d9d3f0e40b0ba84ca95481259f800d9993702c317a53bf2c2b35"},
+ {file = "eigenpy-3.1.1-0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:6d24c906eaef7ff502a04e56afd797f909b24e2c263f0556be3ff1c451a2e6ab"},
]
[package.dependencies]
cmeel = "*"
-cmeel-boost = "*"
+cmeel-boost = ">=1.82,<2.0"
+
+[package.extras]
+build = ["cmeel-boost (>=1.82.0,<1.83.0)", "cmeel-eigen"]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.1.3"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"},
+ {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
[[package]]
name = "executing"
-version = "1.2.0"
+version = "2.0.0"
description = "Get the currently executing AST node of a frame, and other information"
optional = false
python-versions = "*"
files = [
- {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"},
- {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"},
+ {file = "executing-2.0.0-py2.py3-none-any.whl", hash = "sha256:06df6183df67389625f4e763921c6cf978944721abf3e714000200aab95b0657"},
+ {file = "executing-2.0.0.tar.gz", hash = "sha256:0ff053696fdeef426cda5bd18eacd94f82c91f49823a2e9090124212ceea9b08"},
+]
+
+[package.extras]
+tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"]
+
+[[package]]
+name = "fastjsonschema"
+version = "2.18.1"
+description = "Fastest Python implementation of JSON schema"
+optional = false
+python-versions = "*"
+files = [
+ {file = "fastjsonschema-2.18.1-py3-none-any.whl", hash = "sha256:aec6a19e9f66e9810ab371cc913ad5f4e9e479b63a7072a2cd060a9369e329a8"},
+ {file = "fastjsonschema-2.18.1.tar.gz", hash = "sha256:06dc8680d937628e993fa0cd278f196d20449a1adc087640710846b324d422ea"},
]
[package.extras]
-tests = ["asttokens", "littleutils", "pytest", "rich"]
+devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"]
[[package]]
name = "filelock"
-version = "3.10.0"
+version = "3.12.4"
description = "A platform independent file lock."
optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "filelock-3.12.4-py3-none-any.whl", hash = "sha256:08c21d87ded6e2b9da6728c3dff51baf1dcecf973b768ef35bcbc3447edb9ad4"},
+ {file = "filelock-3.12.4.tar.gz", hash = "sha256:2e6f249f1f3654291606e046b09f1fd5eac39b360664c27f5aad072012f8bcbd"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"]
+typing = ["typing-extensions (>=4.7.1)"]
+
+[[package]]
+name = "flask"
+version = "2.2.5"
+description = "A simple framework for building complex web applications."
+optional = false
python-versions = ">=3.7"
files = [
- {file = "filelock-3.10.0-py3-none-any.whl", hash = "sha256:e90b34656470756edf8b19656785c5fea73afa1953f3e1b0d645cef11cab3182"},
- {file = "filelock-3.10.0.tar.gz", hash = "sha256:3199fd0d3faea8b911be52b663dfccceb84c95949dd13179aa21436d1a79c4ce"},
+ {file = "Flask-2.2.5-py3-none-any.whl", hash = "sha256:58107ed83443e86067e41eff4631b058178191a355886f8e479e347fa1285fdf"},
+ {file = "Flask-2.2.5.tar.gz", hash = "sha256:edee9b0a7ff26621bd5a8c10ff484ae28737a2410d99b0bb9a6850c7fb977aa0"},
+]
+
+[package.dependencies]
+click = ">=8.0"
+importlib-metadata = {version = ">=3.6.0", markers = "python_version < \"3.10\""}
+itsdangerous = ">=2.0"
+Jinja2 = ">=3.0"
+Werkzeug = ">=2.2.2"
+
+[package.extras]
+async = ["asgiref (>=3.2)"]
+dotenv = ["python-dotenv"]
+
+[[package]]
+name = "fonttools"
+version = "4.43.1"
+description = "Tools to manipulate font files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fonttools-4.43.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bf11e2cca121df35e295bd34b309046c29476ee739753bc6bc9d5050de319273"},
+ {file = "fonttools-4.43.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10b3922875ffcba636674f406f9ab9a559564fdbaa253d66222019d569db869c"},
+ {file = "fonttools-4.43.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f727c3e3d08fd25352ed76cc3cb61486f8ed3f46109edf39e5a60fc9fecf6ca"},
+ {file = "fonttools-4.43.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad0b3f6342cfa14be996971ea2b28b125ad681c6277c4cd0fbdb50340220dfb6"},
+ {file = "fonttools-4.43.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3b7ad05b2beeebafb86aa01982e9768d61c2232f16470f9d0d8e385798e37184"},
+ {file = "fonttools-4.43.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4c54466f642d2116686268c3e5f35ebb10e49b0d48d41a847f0e171c785f7ac7"},
+ {file = "fonttools-4.43.1-cp310-cp310-win32.whl", hash = "sha256:1e09da7e8519e336239fbd375156488a4c4945f11c4c5792ee086dd84f784d02"},
+ {file = "fonttools-4.43.1-cp310-cp310-win_amd64.whl", hash = "sha256:1cf9e974f63b1080b1d2686180fc1fbfd3bfcfa3e1128695b5de337eb9075cef"},
+ {file = "fonttools-4.43.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5db46659cfe4e321158de74c6f71617e65dc92e54980086823a207f1c1c0e24b"},
+ {file = "fonttools-4.43.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1952c89a45caceedf2ab2506d9a95756e12b235c7182a7a0fff4f5e52227204f"},
+ {file = "fonttools-4.43.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c36da88422e0270fbc7fd959dc9749d31a958506c1d000e16703c2fce43e3d0"},
+ {file = "fonttools-4.43.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bbbf8174501285049e64d174e29f9578495e1b3b16c07c31910d55ad57683d8"},
+ {file = "fonttools-4.43.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d4071bd1c183b8d0b368cc9ed3c07a0f6eb1bdfc4941c4c024c49a35429ac7cd"},
+ {file = "fonttools-4.43.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d21099b411e2006d3c3e1f9aaf339e12037dbf7bf9337faf0e93ec915991f43b"},
+ {file = "fonttools-4.43.1-cp311-cp311-win32.whl", hash = "sha256:b84a1c00f832feb9d0585ca8432fba104c819e42ff685fcce83537e2e7e91204"},
+ {file = "fonttools-4.43.1-cp311-cp311-win_amd64.whl", hash = "sha256:9a2f0aa6ca7c9bc1058a9d0b35483d4216e0c1bbe3962bc62ce112749954c7b8"},
+ {file = "fonttools-4.43.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4d9740e3783c748521e77d3c397dc0662062c88fd93600a3c2087d3d627cd5e5"},
+ {file = "fonttools-4.43.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:884ef38a5a2fd47b0c1291647b15f4e88b9de5338ffa24ee52c77d52b4dfd09c"},
+ {file = "fonttools-4.43.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9648518ef687ba818db3fcc5d9aae27a369253ac09a81ed25c3867e8657a0680"},
+ {file = "fonttools-4.43.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95e974d70238fc2be5f444fa91f6347191d0e914d5d8ae002c9aa189572cc215"},
+ {file = "fonttools-4.43.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:34f713dad41aa21c637b4e04fe507c36b986a40f7179dcc86402237e2d39dcd3"},
+ {file = "fonttools-4.43.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:360201d46165fc0753229afe785900bc9596ee6974833124f4e5e9f98d0f592b"},
+ {file = "fonttools-4.43.1-cp312-cp312-win32.whl", hash = "sha256:bb6d2f8ef81ea076877d76acfb6f9534a9c5f31dc94ba70ad001267ac3a8e56f"},
+ {file = "fonttools-4.43.1-cp312-cp312-win_amd64.whl", hash = "sha256:25d3da8a01442cbc1106490eddb6d31d7dffb38c1edbfabbcc8db371b3386d72"},
+ {file = "fonttools-4.43.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8da417431bfc9885a505e86ba706f03f598c85f5a9c54f67d63e84b9948ce590"},
+ {file = "fonttools-4.43.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:51669b60ee2a4ad6c7fc17539a43ffffc8ef69fd5dbed186a38a79c0ac1f5db7"},
+ {file = "fonttools-4.43.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748015d6f28f704e7d95cd3c808b483c5fb87fd3eefe172a9da54746ad56bfb6"},
+ {file = "fonttools-4.43.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7a58eb5e736d7cf198eee94844b81c9573102ae5989ebcaa1d1a37acd04b33d"},
+ {file = "fonttools-4.43.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6bb5ea9076e0e39defa2c325fc086593ae582088e91c0746bee7a5a197be3da0"},
+ {file = "fonttools-4.43.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5f37e31291bf99a63328668bb83b0669f2688f329c4c0d80643acee6e63cd933"},
+ {file = "fonttools-4.43.1-cp38-cp38-win32.whl", hash = "sha256:9c60ecfa62839f7184f741d0509b5c039d391c3aff71dc5bc57b87cc305cff3b"},
+ {file = "fonttools-4.43.1-cp38-cp38-win_amd64.whl", hash = "sha256:fe9b1ec799b6086460a7480e0f55c447b1aca0a4eecc53e444f639e967348896"},
+ {file = "fonttools-4.43.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13a9a185259ed144def3682f74fdcf6596f2294e56fe62dfd2be736674500dba"},
+ {file = "fonttools-4.43.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2adca1b46d69dce4a37eecc096fe01a65d81a2f5c13b25ad54d5430ae430b13"},
+ {file = "fonttools-4.43.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18eefac1b247049a3a44bcd6e8c8fd8b97f3cad6f728173b5d81dced12d6c477"},
+ {file = "fonttools-4.43.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2062542a7565091cea4cc14dd99feff473268b5b8afdee564f7067dd9fff5860"},
+ {file = "fonttools-4.43.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18a2477c62a728f4d6e88c45ee9ee0229405e7267d7d79ce1f5ce0f3e9f8ab86"},
+ {file = "fonttools-4.43.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a7a06f8d95b7496e53af80d974d63516ffb263a468e614978f3899a6df52d4b3"},
+ {file = "fonttools-4.43.1-cp39-cp39-win32.whl", hash = "sha256:10003ebd81fec0192c889e63a9c8c63f88c7d72ae0460b7ba0cd2a1db246e5ad"},
+ {file = "fonttools-4.43.1-cp39-cp39-win_amd64.whl", hash = "sha256:e117a92b07407a061cde48158c03587ab97e74e7d73cb65e6aadb17af191162a"},
+ {file = "fonttools-4.43.1-py3-none-any.whl", hash = "sha256:4f88cae635bfe4bbbdc29d479a297bb525a94889184bb69fa9560c2d4834ddb9"},
+ {file = "fonttools-4.43.1.tar.gz", hash = "sha256:17dbc2eeafb38d5d0e865dcce16e313c58265a6d2d20081c435f84dc5a9d8212"},
]
[package.extras]
-docs = ["furo (>=2022.12.7)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
-testing = ["covdefaults (>=2.3)", "coverage (>=7.2.1)", "pytest (>=7.2.2)", "pytest-cov (>=4)", "pytest-timeout (>=2.1)"]
+all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"]
+graphite = ["lz4 (>=1.7.4.2)"]
+interpolatable = ["munkres", "scipy"]
+lxml = ["lxml (>=4.0,<5)"]
+pathops = ["skia-pathops (>=0.5.0)"]
+plot = ["matplotlib"]
+repacker = ["uharfbuzz (>=0.23.0)"]
+symfont = ["sympy"]
+type1 = ["xattr"]
+ufo = ["fs (>=2.2.0,<3)"]
+unicode = ["unicodedata2 (>=15.0.0)"]
+woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"]
+
+[[package]]
+name = "freetype-py"
+version = "2.4.0"
+description = "Freetype python bindings"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "freetype-py-2.4.0.zip", hash = "sha256:8ad81195d2f8f339aba61700cebfbd77defad149c51f59b75a2a5e37833ae12e"},
+ {file = "freetype_py-2.4.0-py3-none-macosx_10_9_universal2.whl", hash = "sha256:3e0f5a91bc812f42d98a92137e86bac4ed037a29e43dafdb76d716d5732189e8"},
+ {file = "freetype_py-2.4.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9a3abc277f5f6d21575c0093c0c6139c161bf05b91aa6258505ab27c5001c5e"},
+ {file = "freetype_py-2.4.0-py3-none-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ce931f581d5038c4fea1f3d314254e0264e92441a5fdaef6817fe77b7bb888d3"},
+ {file = "freetype_py-2.4.0-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:c6276d92ac401c8ce02ea391fc854de413b01a8d835fb394ee5eb6f04fc947f5"},
+ {file = "freetype_py-2.4.0-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:9614f68876e9c62e821dfa811dd6160e00279d9d98cf60118cb264be48da1472"},
+ {file = "freetype_py-2.4.0-py3-none-win_amd64.whl", hash = "sha256:a2620788d4f0c00bd75fee2dfca61635ab0da856131598c96e2355d5257f70e5"},
+]
+
+[[package]]
+name = "h11"
+version = "0.14.0"
+description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
+ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+]
[[package]]
name = "hpp-fcl"
-version = "2.3.0"
+version = "2.3.6"
description = "An extension of the Flexible Collision Library"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "hpp_fcl-2.3.0-2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4e34f87f68558e921e5eb0ad78b21438422386958df0c26289b8a128304511b8"},
- {file = "hpp_fcl-2.3.0-2-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:fe5b868f086c5964ea68fbd11f9a5f4d3781a6b5f2f870786fb3a7a95d948d82"},
- {file = "hpp_fcl-2.3.0-2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:f5d4fd013fa188c70bb757f2d700df45bdfa0aabdcdb2b7c32777f0678b6970e"},
- {file = "hpp_fcl-2.3.0-2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b8f3f3e4f0e047034dfe5c0c9976376a3c0ce20b7b066285ab9aee873f6b3cb9"},
- {file = "hpp_fcl-2.3.0-2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:95fcf134217d18796f4f9df0b1b2fe63d76151603509cfb0205ad1f2b847b40c"},
- {file = "hpp_fcl-2.3.0-2-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:c6937bdb4c0d7b655a22dadbcff6ade7611a3816453a775e4b8744516d48f67c"},
- {file = "hpp_fcl-2.3.0-2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:1fc8d0d25fb3438c78636ff4393c82899e8582de1c12176aa73dceb55692b1b0"},
- {file = "hpp_fcl-2.3.0-2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7e7048c5deeaa2a00091e867a953cf45ca9491648b5fec7dfa95c5adb2cd964d"},
- {file = "hpp_fcl-2.3.0-2-cp37-cp37m-macosx_12_0_x86_64.whl", hash = "sha256:98be1c911517cf45ef6bf89faf380f48f5dddadddaefdcee814863251021d09a"},
- {file = "hpp_fcl-2.3.0-2-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:40762aef654887f4a3c2fad298e7f3498ce6bf6a5ba4e6283ac41b14e37ae7a4"},
- {file = "hpp_fcl-2.3.0-2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:08513fe402ce18c51b5c3b9af6937bc758f9122141d1ae5c93d2f92079caf0b2"},
- {file = "hpp_fcl-2.3.0-2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4cc5fb3508ebbf5c2d62d18dc54877d801bbbd865319a019f423c0bd33d6dd84"},
- {file = "hpp_fcl-2.3.0-2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8d596d7569a812b1915f2d0ee19eaa7aad107f04472aa9afb75b84f524db5903"},
- {file = "hpp_fcl-2.3.0-2-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:c707e02d6ac04425137c1f648602351855e9511f6cba3f03f3c7916323c84067"},
- {file = "hpp_fcl-2.3.0-2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:793868821495686e882a2e066752295e412e39cc37d818848c39f1cda000f120"},
- {file = "hpp_fcl-2.3.0-2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3640aa723823e4adc2abdb6580c48e23c5932dbff8649ee52320c4f540b57954"},
- {file = "hpp_fcl-2.3.0-2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b3d81c3c5001db7e2e46deebd8b43e7e33d87c59eba08053f7fc296e6250d870"},
- {file = "hpp_fcl-2.3.0-2-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:d7feaa498ded442afc5327b2f9156446a26f9ab904dc27d4d4ab78f54b351a0f"},
- {file = "hpp_fcl-2.3.0-2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c974c88f4b2dcba1ca5909ece0054a2d99116d9ff7b194232b094a1142f9a23f"},
- {file = "hpp_fcl-2.3.0-2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:10f36172c8fc2f9ada72439512260c0c2e57af66d2d3e504a13b501b212f8f0c"},
- {file = "hpp_fcl-2.3.0-2-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:a23a554027fbb6abe0b75b4c4314b077c410b16f8254f3b0b9fea7d9f9cd430e"},
- {file = "hpp_fcl-2.3.0-2-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:419a6b968b4399baf8e1d3ee9df5c5f5031bc50f0c011695d79a9ecfb176851d"},
- {file = "hpp_fcl-2.3.0-2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a4b860396acd9ad203a75152366b03f6b0c8caa02194183b4e11a418fab8bc32"},
- {file = "hpp_fcl-2.3.0-2-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:7280488094ee4fb682a0236194ba58175ff8c7cdc5e5564b0eedf75f641889f5"},
- {file = "hpp_fcl-2.3.0-2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e29e8192ad9674a79e20d7d4e3b48e91e6bf005a2a885d96b7bb8090b05f097e"},
- {file = "hpp_fcl-2.3.0-2-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:cc979c22268c93aa3273bdcf8a9a75538911e83a210aa845f2fbd3bfd426311c"},
+ {file = "hpp_fcl-2.3.6-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:64b060824ce2408b64a78997bcb1bc3ff2f3e11f39c906d0d582ba46a8fd473d"},
+ {file = "hpp_fcl-2.3.6-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:5e17deb57b17cdba6f9d5c7b1e0c73920eade8b0776fdf5242c67da61dc170f3"},
+ {file = "hpp_fcl-2.3.6-0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:e1d68def3c3c64b0ccd80bf286d0351a9ec5ee4786c01d65c1fa09035f0ebe4e"},
+ {file = "hpp_fcl-2.3.6-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c88f1ee9e86958d2f8825e85d24d07e0d608acc06e03c7247d1929c48313d517"},
+ {file = "hpp_fcl-2.3.6-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d0b740200e42ed5efcc502bc0074ec538795357cb6fc8d63f6fcbb6e1ff22ddb"},
+ {file = "hpp_fcl-2.3.6-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:ea905ac1922bc1fc7652b2df0cfb4738df6f39edbfd7ea0a4ea9b465b2bb1f2b"},
+ {file = "hpp_fcl-2.3.6-0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d71b0a8725bbd1496058b3482071689f4174918f724a3b9fcb1c7d2c248cb976"},
+ {file = "hpp_fcl-2.3.6-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a9297aa48fa9791fd09fb87c6f5e70339efde841a2dff317d892edf609ac4b14"},
+ {file = "hpp_fcl-2.3.6-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:2685b0122efa9649beb1849b68307d3b3625d616db832d66d69cffaf7beda3ac"},
+ {file = "hpp_fcl-2.3.6-0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a0ed6ac84050ca001f1d7594baed1cada62bf5801dd2d6bd74de90131fd12220"},
+ {file = "hpp_fcl-2.3.6-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2490ff95306d5d2263fc3e814c79ad445bd569840d3faf56a190a178b6ed571e"},
+ {file = "hpp_fcl-2.3.6-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:1129a0371017986dcae0b4482a56c13f93d97b22cb66d79edc53e34bac36a4af"},
+ {file = "hpp_fcl-2.3.6-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:62218062ad56cbce7baf15581e1bbcedafd94a23de409e2f860c871110a9005a"},
+ {file = "hpp_fcl-2.3.6-0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:120bc6bdf2d981deaa92ca5bf7f3f190562505c1e061ece7e60b32a941cef122"},
+ {file = "hpp_fcl-2.3.6-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92ee877c864851c32ba96d9967dd2a868d65479ee8151c4e04b2f1576dd57c2e"},
+ {file = "hpp_fcl-2.3.6-0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9c4d27ed660e7dff72e33abe9490aa1cf72d44841efe293bc98fbd85e975016a"},
+ {file = "hpp_fcl-2.3.6-0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:18450e5b686eb8411fe54e18be4b420f047f54be3db8fb493805d10c8c955d4a"},
+ {file = "hpp_fcl-2.3.6-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3545559da02366cee5e9f06e317afc18ea1f21ff1eaecbbc470a67d84910ff25"},
+ {file = "hpp_fcl-2.3.6-0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b6113300acbf3e936a4d0a0fe8960cc6223a98b51ba0c6f7a0b87ed23aba8606"},
+ {file = "hpp_fcl-2.3.6-0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:26c261efc43dd405d326e92497a14f9b2503ac701cd7caaa8b825b62d6bda73a"},
]
[package.dependencies]
cmeel = "*"
-cmeel-assimp = "*"
-cmeel-octomap = "*"
-eigenpy = "*"
+cmeel-assimp = ">=5.2.5.1,<6"
+cmeel-boost = ">=1.82.0,<1.83.0"
+cmeel-octomap = ">=1.9.8.2,<2"
+eigenpy = ">=3.1,<4"
+
+[package.extras]
+build = ["eigenpy[build] (>=3.1,<4)"]
+
+[[package]]
+name = "hsluv"
+version = "5.0.4"
+description = "Human-friendly HSL"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "hsluv-5.0.4-py2.py3-none-any.whl", hash = "sha256:0138bd10038e2ee1b13eecae9a7d49d4ec8c320b1d7eb4f860832c792e3e4567"},
+ {file = "hsluv-5.0.4.tar.gz", hash = "sha256:2281f946427a882010042844a38c7bbe9e0d0aaf9d46babe46366ed6f169b72e"},
+]
+
+[[package]]
+name = "httpcore"
+version = "0.18.0"
+description = "A minimal low-level HTTP client."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "httpcore-0.18.0-py3-none-any.whl", hash = "sha256:adc5398ee0a476567bf87467063ee63584a8bce86078bf748e48754f60202ced"},
+ {file = "httpcore-0.18.0.tar.gz", hash = "sha256:13b5e5cd1dca1a6636a6aaea212b19f4f85cd88c366a2b82304181b769aab3c9"},
+]
+
+[package.dependencies]
+anyio = ">=3.0,<5.0"
+certifi = "*"
+h11 = ">=0.13,<0.15"
+sniffio = "==1.*"
+
+[package.extras]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+
+[[package]]
+name = "httpx"
+version = "0.25.0"
+description = "The next generation HTTP client."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "httpx-0.25.0-py3-none-any.whl", hash = "sha256:181ea7f8ba3a82578be86ef4171554dd45fec26a02556a744db029a0a27b7100"},
+ {file = "httpx-0.25.0.tar.gz", hash = "sha256:47ecda285389cb32bb2691cc6e069e3ab0205956f681c5b2ad2325719751d875"},
+]
+
+[package.dependencies]
+certifi = "*"
+httpcore = ">=0.18.0,<0.19.0"
+idna = "*"
+sniffio = "*"
+
+[package.extras]
+brotli = ["brotli", "brotlicffi"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
[[package]]
name = "identify"
-version = "2.5.21"
+version = "2.5.30"
description = "File identification library for Python"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "identify-2.5.21-py2.py3-none-any.whl", hash = "sha256:69edcaffa8e91ae0f77d397af60f148b6b45a8044b2cc6d99cafa5b04793ff00"},
- {file = "identify-2.5.21.tar.gz", hash = "sha256:7671a05ef9cfaf8ff63b15d45a91a1147a03aaccb2976d4e9bd047cbbc508471"},
+ {file = "identify-2.5.30-py2.py3-none-any.whl", hash = "sha256:afe67f26ae29bab007ec21b03d4114f41316ab9dd15aa8736a167481e108da54"},
+ {file = "identify-2.5.30.tar.gz", hash = "sha256:f302a4256a15c849b91cfcdcec052a8ce914634b2f77ae87dad29cd749f2d88d"},
]
[package.extras]
@@ -1278,6 +1294,37 @@ files = [
{file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
]
+[[package]]
+name = "imageio"
+version = "2.31.5"
+description = "Library for reading and writing a wide range of image, video, scientific, and volumetric data formats."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "imageio-2.31.5-py3-none-any.whl", hash = "sha256:97f68e12ba676f2f4b541684ed81f7f3370dc347e8321bc68ee34d37b2dbac9f"},
+ {file = "imageio-2.31.5.tar.gz", hash = "sha256:d8e53f9cd4054880276a3dac0a28c85ba7874084856a55a0294a8ae6ed7f3a8e"},
+]
+
+[package.dependencies]
+numpy = "*"
+pillow = ">=8.3.2"
+
+[package.extras]
+all-plugins = ["astropy", "av", "imageio-ffmpeg", "psutil", "tifffile"]
+all-plugins-pypy = ["av", "imageio-ffmpeg", "psutil", "tifffile"]
+build = ["wheel"]
+dev = ["black", "flake8", "fsspec[github]", "pytest", "pytest-cov"]
+docs = ["numpydoc", "pydata-sphinx-theme", "sphinx (<6)"]
+ffmpeg = ["imageio-ffmpeg", "psutil"]
+fits = ["astropy"]
+full = ["astropy", "av", "black", "flake8", "fsspec[github]", "gdal", "imageio-ffmpeg", "itk", "numpydoc", "psutil", "pydata-sphinx-theme", "pytest", "pytest-cov", "sphinx (<6)", "tifffile", "wheel"]
+gdal = ["gdal"]
+itk = ["itk"]
+linting = ["black", "flake8"]
+pyav = ["av"]
+test = ["fsspec[github]", "pytest", "pytest-cov"]
+tifffile = ["tifffile"]
+
[[package]]
name = "imagesize"
version = "1.4.1"
@@ -1291,13 +1338,13 @@ files = [
[[package]]
name = "importlib-metadata"
-version = "6.1.0"
+version = "6.8.0"
description = "Read metadata from Python packages"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "importlib_metadata-6.1.0-py3-none-any.whl", hash = "sha256:ff80f3b5394912eb1b108fcfd444dc78b7f1f3e16b16188054bd01cb9cb86f09"},
- {file = "importlib_metadata-6.1.0.tar.gz", hash = "sha256:43ce9281e097583d758c2c708c4376371261a02c34682491a8e98352365aad20"},
+ {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"},
+ {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"},
]
[package.dependencies]
@@ -1306,17 +1353,35 @@ zipp = ">=0.5"
[package.extras]
docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
perf = ["ipython"]
-testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
+testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
+
+[[package]]
+name = "importlib-resources"
+version = "6.1.0"
+description = "Read resources from Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_resources-6.1.0-py3-none-any.whl", hash = "sha256:aa50258bbfa56d4e33fbd8aa3ef48ded10d1735f11532b8df95388cc6bdb7e83"},
+ {file = "importlib_resources-6.1.0.tar.gz", hash = "sha256:9d48dcccc213325e810fd723e7fbb45ccb39f6cf5c31f00cf2b965f5f10f3cb9"},
+]
+
+[package.dependencies]
+zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"]
[[package]]
name = "ipython"
-version = "8.12.2"
+version = "8.16.1"
description = "IPython: Productive Interactive Computing"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "ipython-8.12.2-py3-none-any.whl", hash = "sha256:ea8801f15dfe4ffb76dea1b09b847430ffd70d827b41735c64a0638a04103bfc"},
- {file = "ipython-8.12.2.tar.gz", hash = "sha256:c7b80eb7f5a855a88efc971fda506ff7a91c280b42cdae26643e0f601ea281ea"},
+ {file = "ipython-8.16.1-py3-none-any.whl", hash = "sha256:0852469d4d579d9cd613c220af7bf0c9cc251813e12be647cb9d463939db9b1e"},
+ {file = "ipython-8.16.1.tar.gz", hash = "sha256:ad52f58fca8f9f848e256c629eff888efc0528c12fe0f8ec14f33205f23ef938"},
]
[package.dependencies]
@@ -1324,6 +1389,7 @@ appnope = {version = "*", markers = "sys_platform == \"darwin\""}
backcall = "*"
colorama = {version = "*", markers = "sys_platform == \"win32\""}
decorator = "*"
+exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
jedi = ">=0.16"
matplotlib-inline = "*"
pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""}
@@ -1335,9 +1401,9 @@ traitlets = ">=5"
typing-extensions = {version = "*", markers = "python_version < \"3.10\""}
[package.extras]
-all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"]
+all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"]
black = ["black"]
-doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"]
+doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"]
kernel = ["ipykernel"]
nbconvert = ["nbconvert"]
nbformat = ["nbformat"]
@@ -1348,23 +1414,34 @@ test = ["pytest (<7.1)", "pytest-asyncio", "testpath"]
test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"]
[[package]]
-name = "jedi"
-version = "0.18.2"
-description = "An autocompletion tool for Python that can be used for text editors."
+name = "itsdangerous"
+version = "2.1.2"
+description = "Safely pass data to untrusted environments and back."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"},
+ {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"},
+]
+
+[[package]]
+name = "jedi"
+version = "0.19.1"
+description = "An autocompletion tool for Python that can be used for text editors."
optional = false
python-versions = ">=3.6"
files = [
- {file = "jedi-0.18.2-py2.py3-none-any.whl", hash = "sha256:203c1fd9d969ab8f2119ec0a3342e0b49910045abe6af0a3ae83a5764d54639e"},
- {file = "jedi-0.18.2.tar.gz", hash = "sha256:bae794c30d07f6d910d32a7048af09b5a39ed740918da923c6b780790ebac612"},
+ {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"},
+ {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"},
]
[package.dependencies]
-parso = ">=0.8.0,<0.9.0"
+parso = ">=0.8.3,<0.9.0"
[package.extras]
docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"]
-qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
-testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
+qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"]
+testing = ["Django", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
[[package]]
name = "jinja2"
@@ -1383,15 +1460,209 @@ MarkupSafe = ">=2.0"
[package.extras]
i18n = ["Babel (>=2.7)"]
+[[package]]
+name = "joblib"
+version = "1.3.2"
+description = "Lightweight pipelining with Python functions"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "joblib-1.3.2-py3-none-any.whl", hash = "sha256:ef4331c65f239985f3f2220ecc87db222f08fd22097a3dd5698f693875f8cbb9"},
+ {file = "joblib-1.3.2.tar.gz", hash = "sha256:92f865e621e17784e7955080b6d042489e3b8e294949cc44c6eac304f59772b1"},
+]
+
+[[package]]
+name = "jsonschema"
+version = "4.19.1"
+description = "An implementation of JSON Schema validation for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jsonschema-4.19.1-py3-none-any.whl", hash = "sha256:cd5f1f9ed9444e554b38ba003af06c0a8c2868131e56bfbef0550fb450c0330e"},
+ {file = "jsonschema-4.19.1.tar.gz", hash = "sha256:ec84cc37cfa703ef7cd4928db24f9cb31428a5d0fa77747b8b51a847458e0bbf"},
+]
+
+[package.dependencies]
+attrs = ">=22.2.0"
+jsonschema-specifications = ">=2023.03.6"
+referencing = ">=0.28.4"
+rpds-py = ">=0.7.1"
+
+[package.extras]
+format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
+
+[[package]]
+name = "jsonschema-specifications"
+version = "2023.7.1"
+description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"},
+ {file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"},
+]
+
+[package.dependencies]
+referencing = ">=0.28.0"
+
+[[package]]
+name = "jupyter-core"
+version = "5.4.0"
+description = "Jupyter core package. A base package on which Jupyter projects rely."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jupyter_core-5.4.0-py3-none-any.whl", hash = "sha256:66e252f675ac04dcf2feb6ed4afb3cd7f68cf92f483607522dc251f32d471571"},
+ {file = "jupyter_core-5.4.0.tar.gz", hash = "sha256:e4b98344bb94ee2e3e6c4519a97d001656009f9cb2b7f2baf15b3c205770011d"},
+]
+
+[package.dependencies]
+platformdirs = ">=2.5"
+pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""}
+traitlets = ">=5.3"
+
+[package.extras]
+docs = ["myst-parser", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"]
+test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"]
+
+[[package]]
+name = "kiwisolver"
+version = "1.4.5"
+description = "A fast implementation of the Cassowary constraint solver"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:146d14bebb7f1dc4d5fbf74f8a6cb15ac42baadee8912eb84ac0b3b2a3dc6ac3"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6ef7afcd2d281494c0a9101d5c571970708ad911d028137cd558f02b851c08b4"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9eaa8b117dc8337728e834b9c6e2611f10c79e38f65157c4c38e9400286f5cb1"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ec20916e7b4cbfb1f12380e46486ec4bcbaa91a9c448b97023fde0d5bbf9e4ff"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39b42c68602539407884cf70d6a480a469b93b81b7701378ba5e2328660c847a"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa12042de0171fad672b6c59df69106d20d5596e4f87b5e8f76df757a7c399aa"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a40773c71d7ccdd3798f6489aaac9eee213d566850a9533f8d26332d626b82c"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:19df6e621f6d8b4b9c4d45f40a66839294ff2bb235e64d2178f7522d9170ac5b"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:83d78376d0d4fd884e2c114d0621624b73d2aba4e2788182d286309ebdeed770"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e391b1f0a8a5a10ab3b9bb6afcfd74f2175f24f8975fb87ecae700d1503cdee0"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:852542f9481f4a62dbb5dd99e8ab7aedfeb8fb6342349a181d4036877410f525"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59edc41b24031bc25108e210c0def6f6c2191210492a972d585a06ff246bb79b"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-win32.whl", hash = "sha256:a6aa6315319a052b4ee378aa171959c898a6183f15c1e541821c5c59beaa0238"},
+ {file = "kiwisolver-1.4.5-cp310-cp310-win_amd64.whl", hash = "sha256:d0ef46024e6a3d79c01ff13801cb19d0cad7fd859b15037aec74315540acc276"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:11863aa14a51fd6ec28688d76f1735f8f69ab1fabf388851a595d0721af042f5"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8ab3919a9997ab7ef2fbbed0cc99bb28d3c13e6d4b1ad36e97e482558a91be90"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fcc700eadbbccbf6bc1bcb9dbe0786b4b1cb91ca0dcda336eef5c2beed37b797"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dfdd7c0b105af050eb3d64997809dc21da247cf44e63dc73ff0fd20b96be55a9"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76c6a5964640638cdeaa0c359382e5703e9293030fe730018ca06bc2010c4437"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bbea0db94288e29afcc4c28afbf3a7ccaf2d7e027489c449cf7e8f83c6346eb9"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ceec1a6bc6cab1d6ff5d06592a91a692f90ec7505d6463a88a52cc0eb58545da"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040c1aebeda72197ef477a906782b5ab0d387642e93bda547336b8957c61022e"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:f91de7223d4c7b793867797bacd1ee53bfe7359bd70d27b7b58a04efbb9436c8"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:faae4860798c31530dd184046a900e652c95513796ef51a12bc086710c2eec4d"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0157420efcb803e71d1b28e2c287518b8808b7cf1ab8af36718fd0a2c453eb0"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:06f54715b7737c2fecdbf140d1afb11a33d59508a47bf11bb38ecf21dc9ab79f"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fdb7adb641a0d13bdcd4ef48e062363d8a9ad4a182ac7647ec88f695e719ae9f"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-win32.whl", hash = "sha256:bb86433b1cfe686da83ce32a9d3a8dd308e85c76b60896d58f082136f10bffac"},
+ {file = "kiwisolver-1.4.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c08e1312a9cf1074d17b17728d3dfce2a5125b2d791527f33ffbe805200a355"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:32d5cf40c4f7c7b3ca500f8985eb3fb3a7dfc023215e876f207956b5ea26632a"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f846c260f483d1fd217fe5ed7c173fb109efa6b1fc8381c8b7552c5781756192"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5ff5cf3571589b6d13bfbfd6bcd7a3f659e42f96b5fd1c4830c4cf21d4f5ef45"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7269d9e5f1084a653d575c7ec012ff57f0c042258bf5db0954bf551c158466e7"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da802a19d6e15dffe4b0c24b38b3af68e6c1a68e6e1d8f30148c83864f3881db"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3aba7311af82e335dd1e36ffff68aaca609ca6290c2cb6d821a39aa075d8e3ff"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:763773d53f07244148ccac5b084da5adb90bfaee39c197554f01b286cf869228"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2270953c0d8cdab5d422bee7d2007f043473f9d2999631c86a223c9db56cbd16"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d099e745a512f7e3bbe7249ca835f4d357c586d78d79ae8f1dcd4d8adeb9bda9"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:74db36e14a7d1ce0986fa104f7d5637aea5c82ca6326ed0ec5694280942d1162"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:7e5bab140c309cb3a6ce373a9e71eb7e4873c70c2dda01df6820474f9889d6d4"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:0f114aa76dc1b8f636d077979c0ac22e7cd8f3493abbab152f20eb8d3cda71f3"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:88a2df29d4724b9237fc0c6eaf2a1adae0cdc0b3e9f4d8e7dc54b16812d2d81a"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-win32.whl", hash = "sha256:72d40b33e834371fd330fb1472ca19d9b8327acb79a5821d4008391db8e29f20"},
+ {file = "kiwisolver-1.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:2c5674c4e74d939b9d91dda0fae10597ac7521768fec9e399c70a1f27e2ea2d9"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3a2b053a0ab7a3960c98725cfb0bf5b48ba82f64ec95fe06f1d06c99b552e130"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cd32d6c13807e5c66a7cbb79f90b553642f296ae4518a60d8d76243b0ad2898"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59ec7b7c7e1a61061850d53aaf8e93db63dce0c936db1fda2658b70e4a1be709"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da4cfb373035def307905d05041c1d06d8936452fe89d464743ae7fb8371078b"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2400873bccc260b6ae184b2b8a4fec0e4082d30648eadb7c3d9a13405d861e89"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1b04139c4236a0f3aff534479b58f6f849a8b351e1314826c2d230849ed48985"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:4e66e81a5779b65ac21764c295087de82235597a2293d18d943f8e9e32746265"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:7931d8f1f67c4be9ba1dd9c451fb0eeca1a25b89e4d3f89e828fe12a519b782a"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b3f7e75f3015df442238cca659f8baa5f42ce2a8582727981cbfa15fee0ee205"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:bbf1d63eef84b2e8c89011b7f2235b1e0bf7dacc11cac9431fc6468e99ac77fb"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4c380469bd3f970ef677bf2bcba2b6b0b4d5c75e7a020fb863ef75084efad66f"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-win32.whl", hash = "sha256:9408acf3270c4b6baad483865191e3e582b638b1654a007c62e3efe96f09a9a3"},
+ {file = "kiwisolver-1.4.5-cp37-cp37m-win_amd64.whl", hash = "sha256:5b94529f9b2591b7af5f3e0e730a4e0a41ea174af35a4fd067775f9bdfeee01a"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:11c7de8f692fc99816e8ac50d1d1aef4f75126eefc33ac79aac02c099fd3db71"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:53abb58632235cd154176ced1ae8f0d29a6657aa1aa9decf50b899b755bc2b93"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:88b9f257ca61b838b6f8094a62418421f87ac2a1069f7e896c36a7d86b5d4c29"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3195782b26fc03aa9c6913d5bad5aeb864bdc372924c093b0f1cebad603dd712"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fc579bf0f502e54926519451b920e875f433aceb4624a3646b3252b5caa9e0b6"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5a580c91d686376f0f7c295357595c5a026e6cbc3d77b7c36e290201e7c11ecb"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cfe6ab8da05c01ba6fbea630377b5da2cd9bcbc6338510116b01c1bc939a2c18"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:d2e5a98f0ec99beb3c10e13b387f8db39106d53993f498b295f0c914328b1333"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a51a263952b1429e429ff236d2f5a21c5125437861baeed77f5e1cc2d2c7c6da"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:3edd2fa14e68c9be82c5b16689e8d63d89fe927e56debd6e1dbce7a26a17f81b"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:74d1b44c6cfc897df648cc9fdaa09bc3e7679926e6f96df05775d4fb3946571c"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:76d9289ed3f7501012e05abb8358bbb129149dbd173f1f57a1bf1c22d19ab7cc"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:92dea1ffe3714fa8eb6a314d2b3c773208d865a0e0d35e713ec54eea08a66250"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-win32.whl", hash = "sha256:5c90ae8c8d32e472be041e76f9d2f2dbff4d0b0be8bd4041770eddb18cf49a4e"},
+ {file = "kiwisolver-1.4.5-cp38-cp38-win_amd64.whl", hash = "sha256:c7940c1dc63eb37a67721b10d703247552416f719c4188c54e04334321351ced"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:9407b6a5f0d675e8a827ad8742e1d6b49d9c1a1da5d952a67d50ef5f4170b18d"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15568384086b6df3c65353820a4473575dbad192e35010f622c6ce3eebd57af9"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0dc9db8e79f0036e8173c466d21ef18e1befc02de8bf8aa8dc0813a6dc8a7046"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cdc8a402aaee9a798b50d8b827d7ecf75edc5fb35ea0f91f213ff927c15f4ff0"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6c3bd3cde54cafb87d74d8db50b909705c62b17c2099b8f2e25b461882e544ff"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:955e8513d07a283056b1396e9a57ceddbd272d9252c14f154d450d227606eb54"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:346f5343b9e3f00b8db8ba359350eb124b98c99efd0b408728ac6ebf38173958"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9098e0049e88c6a24ff64545cdfc50807818ba6c1b739cae221bbbcbc58aad3"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:00bd361b903dc4bbf4eb165f24d1acbee754fce22ded24c3d56eec268658a5cf"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7b8b454bac16428b22560d0a1cf0a09875339cab69df61d7805bf48919415901"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:f1d072c2eb0ad60d4c183f3fb44ac6f73fb7a8f16a2694a91f988275cbf352f9"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:31a82d498054cac9f6d0b53d02bb85811185bcb477d4b60144f915f3b3126342"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6512cb89e334e4700febbffaaa52761b65b4f5a3cf33f960213d5656cea36a77"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-win32.whl", hash = "sha256:9db8ea4c388fdb0f780fe91346fd438657ea602d58348753d9fb265ce1bca67f"},
+ {file = "kiwisolver-1.4.5-cp39-cp39-win_amd64.whl", hash = "sha256:59415f46a37f7f2efeec758353dd2eae1b07640d8ca0f0c42548ec4125492635"},
+ {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5c7b3b3a728dc6faf3fc372ef24f21d1e3cee2ac3e9596691d746e5a536de920"},
+ {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:620ced262a86244e2be10a676b646f29c34537d0d9cc8eb26c08f53d98013390"},
+ {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:378a214a1e3bbf5ac4a8708304318b4f890da88c9e6a07699c4ae7174c09a68d"},
+ {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf7be1207676ac608a50cd08f102f6742dbfc70e8d60c4db1c6897f62f71523"},
+ {file = "kiwisolver-1.4.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ba55dce0a9b8ff59495ddd050a0225d58bd0983d09f87cfe2b6aec4f2c1234e4"},
+ {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd32ea360bcbb92d28933fc05ed09bffcb1704ba3fc7942e81db0fd4f81a7892"},
+ {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7139af55d1688f8b960ee9ad5adafc4ac17c1c473fe07133ac092310d76544"},
+ {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:dced8146011d2bc2e883f9bd68618b8247387f4bbec46d7392b3c3b032640126"},
+ {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9bf3325c47b11b2e51bca0824ea217c7cd84491d8ac4eefd1e409705ef092bd"},
+ {file = "kiwisolver-1.4.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5794cf59533bc3f1b1c821f7206a3617999db9fbefc345360aafe2e067514929"},
+ {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e368f200bbc2e4f905b8e71eb38b3c04333bddaa6a2464a6355487b02bb7fb09"},
+ {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5d706eba36b4c4d5bc6c6377bb6568098765e990cfc21ee16d13963fab7b3e7"},
+ {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85267bd1aa8880a9c88a8cb71e18d3d64d2751a790e6ca6c27b8ccc724bcd5ad"},
+ {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:210ef2c3a1f03272649aff1ef992df2e724748918c4bc2d5a90352849eb40bea"},
+ {file = "kiwisolver-1.4.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:11d011a7574eb3b82bcc9c1a1d35c1d7075677fdd15de527d91b46bd35e935ee"},
+ {file = "kiwisolver-1.4.5.tar.gz", hash = "sha256:e57e563a57fb22a142da34f38acc2fc1a5c864bc29ca1517a88abc963e60d6ec"},
+]
+
+[[package]]
+name = "lazy-loader"
+version = "0.3"
+description = "lazy_loader"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "lazy_loader-0.3-py3-none-any.whl", hash = "sha256:1e9e76ee8631e264c62ce10006718e80b2cfc74340d17d1031e0f84af7478554"},
+ {file = "lazy_loader-0.3.tar.gz", hash = "sha256:3b68898e34f5b2a29daaaac172c6555512d0f32074f147e2254e4a6d9d838f37"},
+]
+
+[package.extras]
+lint = ["pre-commit (>=3.3)"]
+test = ["pytest (>=7.4)", "pytest-cov (>=4.1)"]
+
[[package]]
name = "markdown-it-py"
-version = "2.2.0"
+version = "3.0.0"
description = "Python port of markdown-it. Markdown parsing, done right!"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "markdown-it-py-2.2.0.tar.gz", hash = "sha256:7c9a5e412688bc771c67432cbfebcdd686c93ce6484913dccf06cb5a0bea35a1"},
- {file = "markdown_it_py-2.2.0-py3-none-any.whl", hash = "sha256:5a35f8d1870171d9acc47b99612dc146129b631baf04970128b568f190d0cc30"},
+ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
+ {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
]
[package.dependencies]
@@ -1404,68 +1675,128 @@ compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0
linkify = ["linkify-it-py (>=1,<3)"]
plugins = ["mdit-py-plugins"]
profiling = ["gprof2dot"]
-rtd = ["attrs", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
+rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
[[package]]
name = "markupsafe"
-version = "2.1.2"
+version = "2.1.3"
description = "Safely add untrusted strings to HTML/XML markup."
optional = false
python-versions = ">=3.7"
files = [
- {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"},
- {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"},
- {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"},
- {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"},
- {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"},
- {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"},
- {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"},
- {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"},
- {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"},
- {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"},
- {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"},
- {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"},
- {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"},
- {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"},
- {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"},
- {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"},
- {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"},
- {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"},
- {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"},
- {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"},
- {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"},
- {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"},
- {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"},
+ {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"},
+]
+
+[[package]]
+name = "matplotlib"
+version = "3.8.0"
+description = "Python plotting package"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "matplotlib-3.8.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c4940bad88a932ddc69734274f6fb047207e008389489f2b6f77d9ca485f0e7a"},
+ {file = "matplotlib-3.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a33bd3045c7452ca1fa65676d88ba940867880e13e2546abb143035fa9072a9d"},
+ {file = "matplotlib-3.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ea6886e93401c22e534bbfd39201ce8931b75502895cfb115cbdbbe2d31f287"},
+ {file = "matplotlib-3.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d670b9348e712ec176de225d425f150dc8e37b13010d85233c539b547da0be39"},
+ {file = "matplotlib-3.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7b37b74f00c4cb6af908cb9a00779d97d294e89fd2145ad43f0cdc23f635760c"},
+ {file = "matplotlib-3.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:0e723f5b96f3cd4aad99103dc93e9e3cdc4f18afdcc76951f4857b46f8e39d2d"},
+ {file = "matplotlib-3.8.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:5dc945a9cb2deb7d197ba23eb4c210e591d52d77bf0ba27c35fc82dec9fa78d4"},
+ {file = "matplotlib-3.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8b5a1bf27d078453aa7b5b27f52580e16360d02df6d3dc9504f3d2ce11f6309"},
+ {file = "matplotlib-3.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f25ffb6ad972cdffa7df8e5be4b1e3cadd2f8d43fc72085feb1518006178394"},
+ {file = "matplotlib-3.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eee482731c8c17d86d9ddb5194d38621f9b0f0d53c99006275a12523ab021732"},
+ {file = "matplotlib-3.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:36eafe2128772195b373e1242df28d1b7ec6c04c15b090b8d9e335d55a323900"},
+ {file = "matplotlib-3.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:061ee58facb3580cd2d046a6d227fb77e9295599c5ec6ad069f06b5821ad1cfc"},
+ {file = "matplotlib-3.8.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:3cc3776836d0f4f22654a7f2d2ec2004618d5cf86b7185318381f73b80fd8a2d"},
+ {file = "matplotlib-3.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6c49a2bd6981264bddcb8c317b6bd25febcece9e2ebfcbc34e7f4c0c867c09dc"},
+ {file = "matplotlib-3.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ed11654fc83cd6cfdf6170b453e437674a050a452133a064d47f2f1371f8d3"},
+ {file = "matplotlib-3.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae97fdd6996b3a25da8ee43e3fc734fff502f396801063c6b76c20b56683196"},
+ {file = "matplotlib-3.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:87df75f528020a6299f76a1d986c0ed4406e3b2bd44bc5e306e46bca7d45e53e"},
+ {file = "matplotlib-3.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:90d74a95fe055f73a6cd737beecc1b81c26f2893b7a3751d52b53ff06ca53f36"},
+ {file = "matplotlib-3.8.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c3499c312f5def8f362a2bf761d04fa2d452b333f3a9a3f58805273719bf20d9"},
+ {file = "matplotlib-3.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:31e793c8bd4ea268cc5d3a695c27b30650ec35238626961d73085d5e94b6ab68"},
+ {file = "matplotlib-3.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d5ee602ef517a89d1f2c508ca189cfc395dd0b4a08284fb1b97a78eec354644"},
+ {file = "matplotlib-3.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5de39dc61ca35342cf409e031f70f18219f2c48380d3886c1cf5ad9f17898e06"},
+ {file = "matplotlib-3.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:dd386c80a98b5f51571b9484bf6c6976de383cd2a8cd972b6a9562d85c6d2087"},
+ {file = "matplotlib-3.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:f691b4ef47c7384d0936b2e8ebdeb5d526c81d004ad9403dfb9d4c76b9979a93"},
+ {file = "matplotlib-3.8.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:0b11f354aae62a2aa53ec5bb09946f5f06fc41793e351a04ff60223ea9162955"},
+ {file = "matplotlib-3.8.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f54b9fb87ca5acbcdd0f286021bedc162e1425fa5555ebf3b3dfc167b955ad9"},
+ {file = "matplotlib-3.8.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:60a6e04dfd77c0d3bcfee61c3cd335fff1b917c2f303b32524cd1235e194ef99"},
+ {file = "matplotlib-3.8.0.tar.gz", hash = "sha256:df8505e1c19d5c2c26aff3497a7cbd3ccfc2e97043d1e4db3e76afa399164b69"},
]
+[package.dependencies]
+contourpy = ">=1.0.1"
+cycler = ">=0.10"
+fonttools = ">=4.22.0"
+importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""}
+kiwisolver = ">=1.0.1"
+numpy = ">=1.21,<2"
+packaging = ">=20.0"
+pillow = ">=6.2.0"
+pyparsing = ">=2.3.1"
+python-dateutil = ">=2.7"
+setuptools_scm = ">=7"
+
[[package]]
name = "matplotlib-inline"
version = "0.1.6"
@@ -1482,21 +1813,21 @@ traitlets = "*"
[[package]]
name = "mdit-py-plugins"
-version = "0.3.5"
+version = "0.4.0"
description = "Collection of plugins for markdown-it-py"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "mdit-py-plugins-0.3.5.tar.gz", hash = "sha256:eee0adc7195e5827e17e02d2a258a2ba159944a0748f59c5099a4a27f78fcf6a"},
- {file = "mdit_py_plugins-0.3.5-py3-none-any.whl", hash = "sha256:ca9a0714ea59a24b2b044a1831f48d817dd0c817e84339f20e7889f392d77c4e"},
+ {file = "mdit_py_plugins-0.4.0-py3-none-any.whl", hash = "sha256:b51b3bb70691f57f974e257e367107857a93b36f322a9e6d44ca5bf28ec2def9"},
+ {file = "mdit_py_plugins-0.4.0.tar.gz", hash = "sha256:d8ab27e9aed6c38aa716819fedfde15ca275715955f8a185a8e1cf90fb1d2c1b"},
]
[package.dependencies]
-markdown-it-py = ">=1.0.0,<3.0.0"
+markdown-it-py = ">=1.0.0,<4.0.0"
[package.extras]
code-style = ["pre-commit"]
-rtd = ["attrs", "myst-parser (>=0.16.1,<0.17.0)", "sphinx-book-theme (>=0.1.0,<0.2.0)"]
+rtd = ["myst-parser", "sphinx-book-theme"]
testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
[[package]]
@@ -1523,39 +1854,88 @@ files = [
[[package]]
name = "myst-parser"
-version = "1.0.0"
+version = "2.0.0"
description = "An extended [CommonMark](https://spec.commonmark.org/) compliant parser,"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "myst-parser-1.0.0.tar.gz", hash = "sha256:502845659313099542bd38a2ae62f01360e7dd4b1310f025dd014dfc0439cdae"},
- {file = "myst_parser-1.0.0-py3-none-any.whl", hash = "sha256:69fb40a586c6fa68995e6521ac0a525793935db7e724ca9bac1d33be51be9a4c"},
+ {file = "myst_parser-2.0.0-py3-none-any.whl", hash = "sha256:7c36344ae39c8e740dad7fdabf5aa6fc4897a813083c6cc9990044eb93656b14"},
+ {file = "myst_parser-2.0.0.tar.gz", hash = "sha256:ea929a67a6a0b1683cdbe19b8d2e724cd7643f8aa3e7bb18dd65beac3483bead"},
]
[package.dependencies]
-docutils = ">=0.15,<0.20"
+docutils = ">=0.16,<0.21"
jinja2 = "*"
-markdown-it-py = ">=1.0.0,<3.0.0"
-mdit-py-plugins = ">=0.3.4,<0.4.0"
+markdown-it-py = ">=3.0,<4.0"
+mdit-py-plugins = ">=0.4,<1.0"
pyyaml = "*"
-sphinx = ">=5,<7"
+sphinx = ">=6,<8"
[package.extras]
code-style = ["pre-commit (>=3.0,<4.0)"]
-linkify = ["linkify-it-py (>=1.0,<2.0)"]
-rtd = ["ipython", "pydata-sphinx-theme (==v0.13.0rc4)", "sphinx-autodoc2 (>=0.4.2,<0.5.0)", "sphinx-book-theme (==1.0.0rc2)", "sphinx-copybutton", "sphinx-design2", "sphinx-pyscript", "sphinx-tippy (>=0.3.1)", "sphinx-togglebutton", "sphinxext-opengraph (>=0.7.5,<0.8.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"]
+linkify = ["linkify-it-py (>=2.0,<3.0)"]
+rtd = ["ipython", "pydata-sphinx-theme (==v0.13.0rc4)", "sphinx-autodoc2 (>=0.4.2,<0.5.0)", "sphinx-book-theme (==1.0.0rc2)", "sphinx-copybutton", "sphinx-design2", "sphinx-pyscript", "sphinx-tippy (>=0.3.1)", "sphinx-togglebutton", "sphinxext-opengraph (>=0.8.2,<0.9.0)", "sphinxext-rediraffe (>=0.2.7,<0.3.0)"]
testing = ["beautifulsoup4", "coverage[toml]", "pytest (>=7,<8)", "pytest-cov", "pytest-param-files (>=0.3.4,<0.4.0)", "pytest-regressions", "sphinx-pytest"]
testing-docutils = ["pygments", "pytest (>=7,<8)", "pytest-param-files (>=0.3.4,<0.4.0)"]
+[[package]]
+name = "nbformat"
+version = "5.7.0"
+description = "The Jupyter Notebook format"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "nbformat-5.7.0-py3-none-any.whl", hash = "sha256:1b05ec2c552c2f1adc745f4eddce1eac8ca9ffd59bb9fd859e827eaa031319f9"},
+ {file = "nbformat-5.7.0.tar.gz", hash = "sha256:1d4760c15c1a04269ef5caf375be8b98dd2f696e5eb9e603ec2bf091f9b0d3f3"},
+]
+
+[package.dependencies]
+fastjsonschema = "*"
+jsonschema = ">=2.6"
+jupyter-core = "*"
+traitlets = ">=5.1"
+
+[package.extras]
+test = ["check-manifest", "pep440", "pre-commit", "pytest", "testpath"]
+
+[[package]]
+name = "nest-asyncio"
+version = "1.5.8"
+description = "Patch asyncio to allow nested event loops"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "nest_asyncio-1.5.8-py3-none-any.whl", hash = "sha256:accda7a339a70599cb08f9dd09a67e0c2ef8d8d6f4c07f96ab203f2ae254e48d"},
+ {file = "nest_asyncio-1.5.8.tar.gz", hash = "sha256:25aa2ca0d2a5b5531956b9e273b45cf664cae2b145101d73b86b199978d48fdb"},
+]
+
+[[package]]
+name = "networkx"
+version = "3.2"
+description = "Python package for creating and manipulating graphs and networks"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "networkx-3.2-py3-none-any.whl", hash = "sha256:8b25f564bd28f94ac821c58b04ae1a3109e73b001a7d476e4bb0d00d63706bf8"},
+ {file = "networkx-3.2.tar.gz", hash = "sha256:bda29edf392d9bfa5602034c767d28549214ec45f620081f0b74dc036a1fbbc1"},
+]
+
+[package.extras]
+default = ["matplotlib (>=3.5)", "numpy (>=1.22)", "pandas (>=1.4)", "scipy (>=1.9,!=1.11.0,!=1.11.1)"]
+developer = ["changelist (==0.4)", "mypy (>=1.1)", "pre-commit (>=3.2)", "rtoml"]
+doc = ["nb2plots (>=0.7)", "nbconvert (<7.9)", "numpydoc (>=1.6)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.14)", "sphinx (>=7)", "sphinx-gallery (>=0.14)", "texext (>=0.6.7)"]
+extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.11)", "sympy (>=1.10)"]
+test = ["pytest (>=7.2)", "pytest-cov (>=4.0)"]
+
[[package]]
name = "nodeenv"
-version = "1.7.0"
+version = "1.8.0"
description = "Node.js virtual environment builder"
optional = false
python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*"
files = [
- {file = "nodeenv-1.7.0-py2.py3-none-any.whl", hash = "sha256:27083a7b96a25f2f5e1d8cb4b6317ee8aeda3bdd121394e5ac54e498028a042e"},
- {file = "nodeenv-1.7.0.tar.gz", hash = "sha256:e0e7f7dfb85fc5394c6fe1e8fa98131a2473e04311a45afb6508f7cf1836fa2b"},
+ {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"},
+ {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"},
]
[package.dependencies]
@@ -1563,50 +1943,151 @@ setuptools = "*"
[[package]]
name = "numpy"
-version = "1.24.2"
+version = "1.24.4"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.8"
files = [
- {file = "numpy-1.24.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef70b4fc1e872ebddc38cddacc87c19a3709c0e3e5d20bf3954c147b1dd941d"},
- {file = "numpy-1.24.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d2859428712785e8a8b7d2b3ef0a1d1565892367b32f915c4a4df44d0e64f5"},
- {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524630f71631be2dabe0c541e7675db82651eb998496bbe16bc4f77f0772253"},
- {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a51725a815a6188c662fb66fb32077709a9ca38053f0274640293a14fdd22978"},
- {file = "numpy-1.24.2-cp310-cp310-win32.whl", hash = "sha256:2620e8592136e073bd12ee4536149380695fbe9ebeae845b81237f986479ffc9"},
- {file = "numpy-1.24.2-cp310-cp310-win_amd64.whl", hash = "sha256:97cf27e51fa078078c649a51d7ade3c92d9e709ba2bfb97493007103c741f1d0"},
- {file = "numpy-1.24.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7de8fdde0003f4294655aa5d5f0a89c26b9f22c0a58790c38fae1ed392d44a5a"},
- {file = "numpy-1.24.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4173bde9fa2a005c2c6e2ea8ac1618e2ed2c1c6ec8a7657237854d42094123a0"},
- {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cecaed30dc14123020f77b03601559fff3e6cd0c048f8b5289f4eeabb0eb281"},
- {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a23f8440561a633204a67fb44617ce2a299beecf3295f0d13c495518908e910"},
- {file = "numpy-1.24.2-cp311-cp311-win32.whl", hash = "sha256:e428c4fbfa085f947b536706a2fc349245d7baa8334f0c5723c56a10595f9b95"},
- {file = "numpy-1.24.2-cp311-cp311-win_amd64.whl", hash = "sha256:557d42778a6869c2162deb40ad82612645e21d79e11c1dc62c6e82a2220ffb04"},
- {file = "numpy-1.24.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d0a2db9d20117bf523dde15858398e7c0858aadca7c0f088ac0d6edd360e9ad2"},
- {file = "numpy-1.24.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c72a6b2f4af1adfe193f7beb91ddf708ff867a3f977ef2ec53c0ffb8283ab9f5"},
- {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e6bd0ec49a44d7690ecb623a8eac5ab8a923bce0bea6293953992edf3a76a"},
- {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eabd64ddb96a1239791da78fa5f4e1693ae2dadc82a76bc76a14cbb2b966e96"},
- {file = "numpy-1.24.2-cp38-cp38-win32.whl", hash = "sha256:e3ab5d32784e843fc0dd3ab6dcafc67ef806e6b6828dc6af2f689be0eb4d781d"},
- {file = "numpy-1.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:76807b4063f0002c8532cfeac47a3068a69561e9c8715efdad3c642eb27c0756"},
- {file = "numpy-1.24.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4199e7cfc307a778f72d293372736223e39ec9ac096ff0a2e64853b866a8e18a"},
- {file = "numpy-1.24.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:adbdce121896fd3a17a77ab0b0b5eedf05a9834a18699db6829a64e1dfccca7f"},
- {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889b2cc88b837d86eda1b17008ebeb679d82875022200c6e8e4ce6cf549b7acb"},
- {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64bb98ac59b3ea3bf74b02f13836eb2e24e48e0ab0145bbda646295769bd780"},
- {file = "numpy-1.24.2-cp39-cp39-win32.whl", hash = "sha256:63e45511ee4d9d976637d11e6c9864eae50e12dc9598f531c035265991910468"},
- {file = "numpy-1.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:a77d3e1163a7770164404607b7ba3967fb49b24782a6ef85d9b5f54126cc39e5"},
- {file = "numpy-1.24.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92011118955724465fb6853def593cf397b4a1367495e0b59a7e69d40c4eb71d"},
- {file = "numpy-1.24.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9006288bcf4895917d02583cf3411f98631275bc67cce355a7f39f8c14338fa"},
- {file = "numpy-1.24.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:150947adbdfeceec4e5926d956a06865c1c690f2fd902efede4ca6fe2e657c3f"},
- {file = "numpy-1.24.2.tar.gz", hash = "sha256:003a9f530e880cb2cd177cba1af7220b9aa42def9c4afc2a2fc3ee6be7eb2b22"},
+ {file = "numpy-1.24.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c0bfb52d2169d58c1cdb8cc1f16989101639b34c7d3ce60ed70b19c63eba0b64"},
+ {file = "numpy-1.24.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ed094d4f0c177b1b8e7aa9cba7d6ceed51c0e569a5318ac0ca9a090680a6a1b1"},
+ {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79fc682a374c4a8ed08b331bef9c5f582585d1048fa6d80bc6c35bc384eee9b4"},
+ {file = "numpy-1.24.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ffe43c74893dbf38c2b0a1f5428760a1a9c98285553c89e12d70a96a7f3a4d6"},
+ {file = "numpy-1.24.4-cp310-cp310-win32.whl", hash = "sha256:4c21decb6ea94057331e111a5bed9a79d335658c27ce2adb580fb4d54f2ad9bc"},
+ {file = "numpy-1.24.4-cp310-cp310-win_amd64.whl", hash = "sha256:b4bea75e47d9586d31e892a7401f76e909712a0fd510f58f5337bea9572c571e"},
+ {file = "numpy-1.24.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f136bab9c2cfd8da131132c2cf6cc27331dd6fae65f95f69dcd4ae3c3639c810"},
+ {file = "numpy-1.24.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e2926dac25b313635e4d6cf4dc4e51c8c0ebfed60b801c799ffc4c32bf3d1254"},
+ {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:222e40d0e2548690405b0b3c7b21d1169117391c2e82c378467ef9ab4c8f0da7"},
+ {file = "numpy-1.24.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7215847ce88a85ce39baf9e89070cb860c98fdddacbaa6c0da3ffb31b3350bd5"},
+ {file = "numpy-1.24.4-cp311-cp311-win32.whl", hash = "sha256:4979217d7de511a8d57f4b4b5b2b965f707768440c17cb70fbf254c4b225238d"},
+ {file = "numpy-1.24.4-cp311-cp311-win_amd64.whl", hash = "sha256:b7b1fc9864d7d39e28f41d089bfd6353cb5f27ecd9905348c24187a768c79694"},
+ {file = "numpy-1.24.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1452241c290f3e2a312c137a9999cdbf63f78864d63c79039bda65ee86943f61"},
+ {file = "numpy-1.24.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:04640dab83f7c6c85abf9cd729c5b65f1ebd0ccf9de90b270cd61935eef0197f"},
+ {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5425b114831d1e77e4b5d812b69d11d962e104095a5b9c3b641a218abcc050e"},
+ {file = "numpy-1.24.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd80e219fd4c71fc3699fc1dadac5dcf4fd882bfc6f7ec53d30fa197b8ee22dc"},
+ {file = "numpy-1.24.4-cp38-cp38-win32.whl", hash = "sha256:4602244f345453db537be5314d3983dbf5834a9701b7723ec28923e2889e0bb2"},
+ {file = "numpy-1.24.4-cp38-cp38-win_amd64.whl", hash = "sha256:692f2e0f55794943c5bfff12b3f56f99af76f902fc47487bdfe97856de51a706"},
+ {file = "numpy-1.24.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2541312fbf09977f3b3ad449c4e5f4bb55d0dbf79226d7724211acc905049400"},
+ {file = "numpy-1.24.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9667575fb6d13c95f1b36aca12c5ee3356bf001b714fc354eb5465ce1609e62f"},
+ {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3a86ed21e4f87050382c7bc96571755193c4c1392490744ac73d660e8f564a9"},
+ {file = "numpy-1.24.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d11efb4dbecbdf22508d55e48d9c8384db795e1b7b51ea735289ff96613ff74d"},
+ {file = "numpy-1.24.4-cp39-cp39-win32.whl", hash = "sha256:6620c0acd41dbcb368610bb2f4d83145674040025e5536954782467100aa8835"},
+ {file = "numpy-1.24.4-cp39-cp39-win_amd64.whl", hash = "sha256:befe2bf740fd8373cf56149a5c23a0f601e82869598d41f8e188a0e9869926f8"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:31f13e25b4e304632a4619d0e0777662c2ffea99fcae2029556b17d8ff958aef"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95f7ac6540e95bc440ad77f56e520da5bf877f87dca58bd095288dce8940532a"},
+ {file = "numpy-1.24.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e98f220aa76ca2a977fe435f5b04d7b3470c0a2e6312907b37ba6068f26787f2"},
+ {file = "numpy-1.24.4.tar.gz", hash = "sha256:80f5e3a4e498641401868df4208b74581206afbee7cf7b8329daae82676d9463"},
+]
+
+[[package]]
+name = "omegaconf"
+version = "2.3.0"
+description = "A flexible configuration library"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b"},
+ {file = "omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7"},
+]
+
+[package.dependencies]
+antlr4-python3-runtime = "==4.9.*"
+PyYAML = ">=5.1.0"
+
+[[package]]
+name = "open3d"
+version = "0.17.0"
+description = "Open3D: A Modern Library for 3D Data Processing."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "open3d-0.17.0-1fix6008-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:428dad435f74d143e6d0526d030c9737fc01fe981bf9c5e1d4daa436807984e1"},
+ {file = "open3d-0.17.0-1fix6008-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:88671846d8a2c699786a0f4312aa47abc916d6c76fb815e0752f6c65970a6668"},
+ {file = "open3d-0.17.0-1fix6008-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:20cd8ed07f1364fe54b15e993d011e70cff85959de7157017f7bc2b139788a89"},
+ {file = "open3d-0.17.0-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:1847d72bfb2d037a84e54c466d227f7374c59e9eb2ebe8ac728b0b9605062dc4"},
+ {file = "open3d-0.17.0-cp310-cp310-macosx_13_0_arm64.whl", hash = "sha256:cf7aac7eda4c922867c4dd9b1641b10d3d0b1494879251ed17b3118f9df140f2"},
+ {file = "open3d-0.17.0-cp310-cp310-manylinux_2_27_aarch64.whl", hash = "sha256:e28e515f4850380ab4f25842bce5f3d4c63b5565f9f8d2df141059b62250ff9e"},
+ {file = "open3d-0.17.0-cp310-cp310-manylinux_2_27_x86_64.whl", hash = "sha256:3dc300697320bb688246c5d09f681045815c33222568573f1964b2d756431657"},
+ {file = "open3d-0.17.0-cp310-cp310-win_amd64.whl", hash = "sha256:114caef89b04168e635f38edd509208023390b241ea4a50fd47b52e9f69b8a76"},
+ {file = "open3d-0.17.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:57e3dfe8800bc4c53a75998969bc271423676ffe518d5e1a37dc4b69dc201d06"},
+ {file = "open3d-0.17.0-cp37-cp37m-manylinux_2_27_aarch64.whl", hash = "sha256:c5dca18602ca92f2c0d53023f17159b38abdbb16ae3eecb858299118aaa88099"},
+ {file = "open3d-0.17.0-cp37-cp37m-manylinux_2_27_x86_64.whl", hash = "sha256:454b693c3af7b03839e76bf3abd7a15df1454bf2c2753bbc58934f9d44d1f8db"},
+ {file = "open3d-0.17.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ec7d7c0bc33040bf700a36d26fdca5d1f0aad11b376f859712165a8cb91fa6c5"},
+ {file = "open3d-0.17.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:1200d7549dcc0957276b5ea7d9aafc593339638e2c84b97433a20832bf4f0b72"},
+ {file = "open3d-0.17.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:843b01743cebd8bb8b66a1e1d471e8c834ed494817d092874a9f2914c94ab6cc"},
+ {file = "open3d-0.17.0-cp38-cp38-manylinux_2_27_aarch64.whl", hash = "sha256:6182f52c8313885d0e4ba3dfafbb4be255bc7a7881e0ae27917cd12a0ce89442"},
+ {file = "open3d-0.17.0-cp38-cp38-manylinux_2_27_x86_64.whl", hash = "sha256:50f0b476a41276cc315ac3871806cc192a33e6a3635b12f8e1d3f00ab18a4656"},
+ {file = "open3d-0.17.0-cp38-cp38-win_amd64.whl", hash = "sha256:01257c73fc8b6257b0e2413c0707d569f356efc3ceb9b9764b55cac47f614563"},
+ {file = "open3d-0.17.0-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:9010a9b23efd309535472bbb1428fb565ec2d0aaa0767d9f99a878edc9061280"},
+ {file = "open3d-0.17.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:73c1f7e44e6254c5211fb0b5a315c353c3aaca16cb7eda4e2526ded1e6e41cd8"},
+ {file = "open3d-0.17.0-cp39-cp39-manylinux_2_27_aarch64.whl", hash = "sha256:60ad0ec46e27366fcc48260767790aea6dc398b12f690910c728c1367064470c"},
+ {file = "open3d-0.17.0-cp39-cp39-manylinux_2_27_x86_64.whl", hash = "sha256:5ab9235a6b2a31396643e2f0cadc665738d433545738d80f85be3d3cfb2ecc12"},
+ {file = "open3d-0.17.0-cp39-cp39-win_amd64.whl", hash = "sha256:35ae11a287b71df6e0a723135e83402d08883e5e708ab2051b92447af025121c"},
+]
+
+[package.dependencies]
+configargparse = "*"
+dash = ">=2.6.0"
+nbformat = "5.7.0"
+numpy = ">=1.18.0"
+werkzeug = ">=2.2.3"
+
+[[package]]
+name = "opencv-contrib-python"
+version = "4.8.1.78"
+description = "Wrapper package for OpenCV python bindings."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "opencv-contrib-python-4.8.1.78.tar.gz", hash = "sha256:81804332299d656905d4f404fcec5f400d692c652d7a47926b7a441272ce795b"},
+ {file = "opencv_contrib_python-4.8.1.78-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:8d97192471c7d42532103ecebf8ad9d9534b7cd655ffadbccacb9ff3d4d49b40"},
+ {file = "opencv_contrib_python-4.8.1.78-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:8d6feb39d4af2cd1e8919110229bfedd13d4798a089bbe88fbd1a001b664d552"},
+ {file = "opencv_contrib_python-4.8.1.78-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6cad1720ac701cb3742f48f95bef5cfa288b916b6ac5700f63d5809e3ad5999e"},
+ {file = "opencv_contrib_python-4.8.1.78-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3fb62bc5967a79bce7c576ef94dea0b9172e52bab91630103e042cb5f29a148a"},
+ {file = "opencv_contrib_python-4.8.1.78-cp37-abi3-win32.whl", hash = "sha256:f8737cf3055a6156c66c75432ed28ee3c1d52532b17d91ed73d508ae351b3e66"},
+ {file = "opencv_contrib_python-4.8.1.78-cp37-abi3-win_amd64.whl", hash = "sha256:377936b02dcf82dc70261101381a8ad82b03d1298f185886be298d74fe35c328"},
+]
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""},
+ {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""},
+ {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\""},
+ {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""},
+]
+
+[[package]]
+name = "opencv-python"
+version = "4.8.1.78"
+description = "Wrapper package for OpenCV python bindings."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "opencv-python-4.8.1.78.tar.gz", hash = "sha256:cc7adbbcd1112877a39274106cb2752e04984bc01a031162952e97450d6117f6"},
+ {file = "opencv_python-4.8.1.78-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:91d5f6f5209dc2635d496f6b8ca6573ecdad051a09e6b5de4c399b8e673c60da"},
+ {file = "opencv_python-4.8.1.78-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:bc31f47e05447da8b3089faa0a07ffe80e114c91ce0b171e6424f9badbd1c5cd"},
+ {file = "opencv_python-4.8.1.78-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9814beca408d3a0eca1bae7e3e5be68b07c17ecceb392b94170881216e09b319"},
+ {file = "opencv_python-4.8.1.78-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4c406bdb41eb21ea51b4e90dfbc989c002786c3f601c236a99c59a54670a394"},
+ {file = "opencv_python-4.8.1.78-cp37-abi3-win32.whl", hash = "sha256:a7aac3900fbacf55b551e7b53626c3dad4c71ce85643645c43e91fcb19045e47"},
+ {file = "opencv_python-4.8.1.78-cp37-abi3-win_amd64.whl", hash = "sha256:b983197f97cfa6fcb74e1da1802c7497a6f94ed561aba6980f1f33123f904956"},
+]
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""},
+ {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""},
+ {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\""},
+ {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""},
]
[[package]]
name = "packaging"
-version = "23.0"
+version = "23.2"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.7"
files = [
- {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"},
- {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"},
+ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"},
+ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
]
[[package]]
@@ -1694,6 +2175,70 @@ files = [
{file = "panda3d-1.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:839d8b2724a149d56a31f621c6397a1fed597a7dacee478922c711426160990e"},
]
+[[package]]
+name = "pandas"
+version = "2.1.1"
+description = "Powerful data structures for data analysis, time series, and statistics"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "pandas-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58d997dbee0d4b64f3cb881a24f918b5f25dd64ddf31f467bb9b67ae4c63a1e4"},
+ {file = "pandas-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:02304e11582c5d090e5a52aec726f31fe3f42895d6bfc1f28738f9b64b6f0614"},
+ {file = "pandas-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffa8f0966de2c22de408d0e322db2faed6f6e74265aa0856f3824813cf124363"},
+ {file = "pandas-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1f84c144dee086fe4f04a472b5cd51e680f061adf75c1ae4fc3a9275560f8f4"},
+ {file = "pandas-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75ce97667d06d69396d72be074f0556698c7f662029322027c226fd7a26965cb"},
+ {file = "pandas-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:4c3f32fd7c4dccd035f71734df39231ac1a6ff95e8bdab8d891167197b7018d2"},
+ {file = "pandas-2.1.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e2959720b70e106bb1d8b6eadd8ecd7c8e99ccdbe03ee03260877184bb2877d"},
+ {file = "pandas-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:25e8474a8eb258e391e30c288eecec565bfed3e026f312b0cbd709a63906b6f8"},
+ {file = "pandas-2.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8bd1685556f3374520466998929bade3076aeae77c3e67ada5ed2b90b4de7f0"},
+ {file = "pandas-2.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc3657869c7902810f32bd072f0740487f9e030c1a3ab03e0af093db35a9d14e"},
+ {file = "pandas-2.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:05674536bd477af36aa2effd4ec8f71b92234ce0cc174de34fd21e2ee99adbc2"},
+ {file = "pandas-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:b407381258a667df49d58a1b637be33e514b07f9285feb27769cedb3ab3d0b3a"},
+ {file = "pandas-2.1.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c747793c4e9dcece7bb20156179529898abf505fe32cb40c4052107a3c620b49"},
+ {file = "pandas-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3bcad1e6fb34b727b016775bea407311f7721db87e5b409e6542f4546a4951ea"},
+ {file = "pandas-2.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f5ec7740f9ccb90aec64edd71434711f58ee0ea7f5ed4ac48be11cfa9abf7317"},
+ {file = "pandas-2.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29deb61de5a8a93bdd033df328441a79fcf8dd3c12d5ed0b41a395eef9cd76f0"},
+ {file = "pandas-2.1.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4f99bebf19b7e03cf80a4e770a3e65eee9dd4e2679039f542d7c1ace7b7b1daa"},
+ {file = "pandas-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:84e7e910096416adec68075dc87b986ff202920fb8704e6d9c8c9897fe7332d6"},
+ {file = "pandas-2.1.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:366da7b0e540d1b908886d4feb3d951f2f1e572e655c1160f5fde28ad4abb750"},
+ {file = "pandas-2.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e50e72b667415a816ac27dfcfe686dc5a0b02202e06196b943d54c4f9c7693e"},
+ {file = "pandas-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc1ab6a25da197f03ebe6d8fa17273126120874386b4ac11c1d687df288542dd"},
+ {file = "pandas-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0dbfea0dd3901ad4ce2306575c54348d98499c95be01b8d885a2737fe4d7a98"},
+ {file = "pandas-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0489b0e6aa3d907e909aef92975edae89b1ee1654db5eafb9be633b0124abe97"},
+ {file = "pandas-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:4cdb0fab0400c2cb46dafcf1a0fe084c8bb2480a1fa8d81e19d15e12e6d4ded2"},
+ {file = "pandas-2.1.1.tar.gz", hash = "sha256:fecb198dc389429be557cde50a2d46da8434a17fe37d7d41ff102e3987fd947b"},
+]
+
+[package.dependencies]
+numpy = {version = ">=1.22.4", markers = "python_version < \"3.11\""}
+python-dateutil = ">=2.8.2"
+pytz = ">=2020.1"
+tzdata = ">=2022.1"
+
+[package.extras]
+all = ["PyQt5 (>=5.15.6)", "SQLAlchemy (>=1.4.36)", "beautifulsoup4 (>=4.11.1)", "bottleneck (>=1.3.4)", "dataframe-api-compat (>=0.1.7)", "fastparquet (>=0.8.1)", "fsspec (>=2022.05.0)", "gcsfs (>=2022.05.0)", "html5lib (>=1.1)", "hypothesis (>=6.46.1)", "jinja2 (>=3.1.2)", "lxml (>=4.8.0)", "matplotlib (>=3.6.1)", "numba (>=0.55.2)", "numexpr (>=2.8.0)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pandas-gbq (>=0.17.5)", "psycopg2 (>=2.9.3)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.5)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "pyxlsb (>=1.0.9)", "qtpy (>=2.2.0)", "s3fs (>=2022.05.0)", "scipy (>=1.8.1)", "tables (>=3.7.0)", "tabulate (>=0.8.10)", "xarray (>=2022.03.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)", "zstandard (>=0.17.0)"]
+aws = ["s3fs (>=2022.05.0)"]
+clipboard = ["PyQt5 (>=5.15.6)", "qtpy (>=2.2.0)"]
+compression = ["zstandard (>=0.17.0)"]
+computation = ["scipy (>=1.8.1)", "xarray (>=2022.03.0)"]
+consortium-standard = ["dataframe-api-compat (>=0.1.7)"]
+excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.10)", "pyxlsb (>=1.0.9)", "xlrd (>=2.0.1)", "xlsxwriter (>=3.0.3)"]
+feather = ["pyarrow (>=7.0.0)"]
+fss = ["fsspec (>=2022.05.0)"]
+gcp = ["gcsfs (>=2022.05.0)", "pandas-gbq (>=0.17.5)"]
+hdf5 = ["tables (>=3.7.0)"]
+html = ["beautifulsoup4 (>=4.11.1)", "html5lib (>=1.1)", "lxml (>=4.8.0)"]
+mysql = ["SQLAlchemy (>=1.4.36)", "pymysql (>=1.0.2)"]
+output-formatting = ["jinja2 (>=3.1.2)", "tabulate (>=0.8.10)"]
+parquet = ["pyarrow (>=7.0.0)"]
+performance = ["bottleneck (>=1.3.4)", "numba (>=0.55.2)", "numexpr (>=2.8.0)"]
+plot = ["matplotlib (>=3.6.1)"]
+postgresql = ["SQLAlchemy (>=1.4.36)", "psycopg2 (>=2.9.3)"]
+spss = ["pyreadstat (>=1.1.5)"]
+sql-other = ["SQLAlchemy (>=1.4.36)"]
+test = ["hypothesis (>=6.46.1)", "pytest (>=7.3.2)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"]
+xml = ["lxml (>=4.8.0)"]
+
[[package]]
name = "parso"
version = "0.8.3"
@@ -1711,13 +2256,13 @@ testing = ["docopt", "pytest (<6.0.0)"]
[[package]]
name = "pathspec"
-version = "0.11.1"
+version = "0.11.2"
description = "Utility library for gitignore style pattern matching of file paths."
optional = false
python-versions = ">=3.7"
files = [
- {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"},
- {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"},
+ {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"},
+ {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"},
]
[[package]]
@@ -1745,96 +2290,192 @@ files = [
{file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"},
]
+[[package]]
+name = "pillow"
+version = "10.1.0"
+description = "Python Imaging Library (Fork)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"},
+ {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"},
+ {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"},
+ {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"},
+ {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"},
+ {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"},
+ {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"},
+ {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"},
+ {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"},
+ {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"},
+ {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"},
+ {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"},
+ {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"},
+ {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"},
+ {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"},
+ {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"},
+ {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"},
+ {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"},
+ {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"},
+ {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"},
+ {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"},
+ {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"},
+ {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"},
+ {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"},
+ {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"},
+ {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"},
+ {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"},
+ {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"},
+ {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"},
+ {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"},
+ {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"},
+ {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"},
+ {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"},
+ {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"},
+ {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"},
+ {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"},
+ {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"},
+ {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"},
+ {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"},
+ {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"},
+ {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"},
+ {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"},
+ {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"},
+ {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"},
+ {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"},
+ {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"},
+ {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"},
+ {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"},
+ {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"},
+ {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"},
+ {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"},
+ {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"},
+ {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"},
+ {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"},
+]
+
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+
[[package]]
name = "pin"
-version = "2.6.17"
+version = "2.6.20"
description = "A fast and flexible implementation of Rigid Body Dynamics algorithms and their analytical derivatives"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "pin-2.6.17-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:727578d93b5ed8d9860076f5177772e23b0dde04a5dc4f8125c863f042881a48"},
- {file = "pin-2.6.17-0-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:f677effd16cad143dedb223330414d726da26a8ef6cc5fb923c60d80c306adc4"},
- {file = "pin-2.6.17-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:883936f7d17f46812a8f1a59355193c3f67b49a432fb8770e4db5cf94c87be2c"},
- {file = "pin-2.6.17-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:13b366b82260acf60c2de1a08638bcc2f4a1036fba39360d339c3bface461f9d"},
- {file = "pin-2.6.17-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bc657f8043a708182262fac6e84f6613fce942dd82c187c9c4efe4c4e40a97b2"},
- {file = "pin-2.6.17-0-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:b051353d97640966cb090612921d4f1589b699cd99aead99937c34eda188ced2"},
- {file = "pin-2.6.17-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5fd26ce39e91b56d11f2b3fb09e4abfe03d511cdc4df7af5e7fc9288aa6e3812"},
- {file = "pin-2.6.17-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3dcedd41c32a0912e85386f145e1d1eb7b1cc2cdb755613e8e3c9f39d3bf4dcd"},
- {file = "pin-2.6.17-0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:24c9372b5ce1e5fbfec94c5934fc7e8d770fa86d0afa95b8a9912302ed133a42"},
- {file = "pin-2.6.17-0-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:bd611b751e5e3270dca6d4ea64c2666cdef12da4475a7d6cdab4191a9d73608f"},
- {file = "pin-2.6.17-0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:61eeecb45191bcfb2867b12958d9c9fa5004fcadfd3c2b33d25c248bf426417c"},
- {file = "pin-2.6.17-0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:061f81ea35f622be58ca727789e508b662b90f32caf4f66d4a7c814119067c59"},
- {file = "pin-2.6.17-0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec286a3236504952d09ea2b863e1bc55b8b52a2cab66e1b22f84ed28f9501578"},
- {file = "pin-2.6.17-0-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:c59e4fbbabcbcdb7577644de00b5484bf34c142490e024760d53da5252638e80"},
- {file = "pin-2.6.17-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:9722d96902b4bf24555b52015337662e1c543b8d6ac6bf9adbb73710cdd07b33"},
- {file = "pin-2.6.17-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d6bfac0e4b3cbe09b7e9d003754cb39095af67417981a5a70b46441e03bee038"},
- {file = "pin-2.6.17-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a127677895962f6b4da71e72ec1056fa014644ed8e4146b79eb5fe2bfb8963d6"},
- {file = "pin-2.6.17-0-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:421106a04c2675d99d5c8f73d04d36596ed0326cc0de756c1ef49ce4620f981b"},
- {file = "pin-2.6.17-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:928b118295502e330b608b0de4a26f8dd6e1c64cf9c1ec5a19bc80a7b35bd7eb"},
- {file = "pin-2.6.17-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:aed7ce9c811b337020c96c1b4ac8851f846f03221894dedf38c44392a197e234"},
- {file = "pin-2.6.17-0-pp37-pypy37_pp73-macosx_12_0_x86_64.whl", hash = "sha256:68bfa660edcf20c1eb58e7ccb7a850c5c55ab3613213857b5aa84735dfb07d9c"},
- {file = "pin-2.6.17-0-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:34b6119582d00a45158ba2d4499174d084649f9f4ade5945c1fc654e21a72329"},
- {file = "pin-2.6.17-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8c317060f3abe10af8c378226feeb5d341ddeb1207acd06224f7e2aa505faf88"},
- {file = "pin-2.6.17-0-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:6d6fa8ad32473f04fd60891fe394af2d187c1e0415ee0f8a8d4d408546d9084b"},
- {file = "pin-2.6.17-0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8199aac50baa64217944e8c81d23679f7f944311586ad7a141df76d32eb8b31b"},
- {file = "pin-2.6.17-0-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:4557725e70918c135df63f8a9dc764e29ddb7c018c289f761441b770d456d16d"},
- {file = "pin-2.6.17-2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1935f18cd30de2be7b32f4ea76ceb0cdd265e93f3a92095d31a392df98f9c907"},
- {file = "pin-2.6.17-2-cp310-cp310-manylinux_2_17_x86_64.whl", hash = "sha256:cc603e72bd7f5cfd60fae7f63136cc0d7f895d709b6a9e9ab354dac4a8c79833"},
- {file = "pin-2.6.17-2-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:9f6af1c7f2d604d053745065c03d674532f44b642b3fffb223945116aecb0217"},
- {file = "pin-2.6.17-2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0a85cc5551590242d3867fe7e20107c803b91d4062123e8bc6f51808ffacdc3e"},
- {file = "pin-2.6.17-2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8c9fd9d911fa25706e9ce4a4589979a26543d387d459c7f2d456e219272a1fb3"},
- {file = "pin-2.6.17-2-cp311-cp311-manylinux_2_17_x86_64.whl", hash = "sha256:17c397bc6ab568d527bcdd1506fcd2b070db00b998d1afac28062d12691f11e8"},
- {file = "pin-2.6.17-2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:a980887aa3b0153b230f22905efc7187b36697eccf91c2ac7cbafc9b27597c90"},
- {file = "pin-2.6.17-2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bc3361ad2348e5d06a4857bcbf2a8df3422df447a729a13593b3f7e38bcf8daf"},
- {file = "pin-2.6.17-2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:61abb1d4833f5900863c04661713dcc1397bfd06d43d7a28f8892aa86c66f663"},
- {file = "pin-2.6.17-2-cp37-cp37m-manylinux_2_17_x86_64.whl", hash = "sha256:058b1634913cacd974c005b5d8c61f3d0a2bba75fab0ac134e9982483bee34e4"},
- {file = "pin-2.6.17-2-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:feea84fcba1ff8ecaa9602dd341f6b43d2101478906526853cc574f9ec2a5322"},
- {file = "pin-2.6.17-2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:57b4ab4a1c2c1db72c984736d14b1f2e63b2275da84da98e182bc0af623d11fc"},
- {file = "pin-2.6.17-2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c64783f80916013b57dcf8209a2aa4ce15cfe023dbd098054f2eee03c86f5f25"},
- {file = "pin-2.6.17-2-cp38-cp38-manylinux_2_17_x86_64.whl", hash = "sha256:1f127533605622a9d97fa5dcd1bb6f367198440789cdd966d0f00096e54e628e"},
- {file = "pin-2.6.17-2-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:12d57e71a9d278e891cc9c115cf71104f56c1d6666bb235c980de2870d526225"},
- {file = "pin-2.6.17-2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:68067ece49596c9c1ab5939e0d6d2474f5d7dddee1e796c2ae14d4e41b03ba52"},
- {file = "pin-2.6.17-2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bfac96b83de6bc5a6a37927d40e8b67701028138bb393ff8f88ecd3fe38ac5e4"},
- {file = "pin-2.6.17-2-cp39-cp39-manylinux_2_17_x86_64.whl", hash = "sha256:8c91cf6aeb9415493a0aaf47c687bc93439f7940034b6f8e2d1cb9493335acfa"},
- {file = "pin-2.6.17-2-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c1f4f3eed52e6ccf72ebc10125db9a11230bbdba9a8660aedba5c81913bb4594"},
- {file = "pin-2.6.17-2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f784452b74d2f01bf9a46ab02b38a55b50e143995b515ce7a4ca44eb77d8e4eb"},
- {file = "pin-2.6.17-2-pp37-pypy37_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:fad6a5f5fb2c29422a118dfd22371ea276ed86dbcbd1f817883b7cebce1a42ef"},
- {file = "pin-2.6.17-2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:26f6dc44d798d1c36565f6879f83124ef32057913e5e9ec023395b0a1a06a6d6"},
- {file = "pin-2.6.17-2-pp38-pypy38_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:ee7658e46dbedccfc8d6651c89e89a40d594ca7519d716a07d3afcaa85db61bf"},
- {file = "pin-2.6.17-2-pp39-pypy39_pp73-manylinux_2_17_x86_64.whl", hash = "sha256:1f518048ceb973e95dc51f14c63c30c7ff828543555921484c4a7577e8b25a78"},
+ {file = "pin-2.6.20-0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:fed2deb3f68d4a1244376c5fc9587692e1a791ec5dcd9d5fc8ba49f3630b9d73"},
+ {file = "pin-2.6.20-0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:f132edeb647ac541368758896221eea387c84548e7934ad4f3a809e43989362e"},
+ {file = "pin-2.6.20-0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:859dbb50c5c4236bbfb2bc995b46a5bf80b93bbdebcaf2cfc7c9a1607dd62817"},
+ {file = "pin-2.6.20-0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7d698477b8057bb24654dfab924779fc21614afb2aab4c08359046a78b2ccfa5"},
+ {file = "pin-2.6.20-0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:830b2c03e2b797ae60c9be401b34cda8b2de3785ec9fc75d96b42e5a3454db33"},
+ {file = "pin-2.6.20-0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:3e1661e219bac285215465f726f9affcfbc777eda853638e224e5f9bec023e7e"},
+ {file = "pin-2.6.20-0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:d22de4fa2a00d9b77000b59bd878b30e4ca9f547713450d460f9331335e8e5c1"},
+ {file = "pin-2.6.20-0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:aa6c88fe0f87d0ff02f59e295c06a53f822cccc7be19fd37c1c36ce6387b02ff"},
+ {file = "pin-2.6.20-0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:82081c7e266684318364068ecb965fe8b930fc856c151d49d53bbe1ec2c1e9db"},
+ {file = "pin-2.6.20-0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:2bf28e403ce0945d6854d004ff88e41c2d258ca8d7ffd7ec06377ddd4c2e52aa"},
+ {file = "pin-2.6.20-0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b51d824131437dd62aa62a236cbe8893090366720a8e82a457ff31f5df410d26"},
+ {file = "pin-2.6.20-0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fd7bc26ca6f76418edd636441e02d139f6c837fea08460ae8f498dbf917b1d5f"},
+ {file = "pin-2.6.20-0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:26631d3a6068482e477190aeb952d69736bbb151cc2e550545bfa15228fe4d23"},
+ {file = "pin-2.6.20-0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e998ccc98f93a2941735706a245be62875824c586d23f0b3307abc083f84755a"},
+ {file = "pin-2.6.20-0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7e1efedc6fb56dad076490f04bc0d46f53b3b1ed320e09ea5373eaed44d3c50a"},
+ {file = "pin-2.6.20-0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:150ece3325a7b4ba4b9b065f1a8ead309d2cf4c0a1120324abb48573d2bdcae4"},
+ {file = "pin-2.6.20-0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0ce9d40fdd7e075bec98e5269e6a2a4bb725c0a22ba978aae92d9bdb9071d8d5"},
+ {file = "pin-2.6.20-0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:777bf0c1124de203c4f6c3e1a22aa188e238644c914a0bd5c68631c2f47f428f"},
+ {file = "pin-2.6.20-0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:a3076505c9ffd1102df73756cd43d8ca2708fa1c2b5ff32bc076fd5e23b655f5"},
+ {file = "pin-2.6.20-0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ebff0d4ecfcfcefaa9f6e0aaaa6bc7907e845c9411ca968c377382104586727d"},
+ {file = "pin-2.6.20-1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72c95cea598bf08e45ff70d6841581b0b45a8ae6b45a5cbd18472fdb343e47bd"},
+ {file = "pin-2.6.20-1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ebbc9ce0c1397edf4bc6c6e45fdf547c5878dc2a486bb9b7faae42d36f9d5cd5"},
+ {file = "pin-2.6.20-1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:f3e041d80ea9ce14121aa22934a9cdb9873908d2f694527a262b53cec7f944eb"},
+ {file = "pin-2.6.20-1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:031912fd5fc17f0a0cabeda232348bd10238ba89463461ca42f97894c90ce0af"},
+ {file = "pin-2.6.20-1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:efacbca1ab4b9448f389eb27f768cefa3dc776f0e74764ed90ef4c6cbccf8e7c"},
+ {file = "pin-2.6.20-1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b9fbbe3d67ddda918c749e9abed35945780d2abe71f4163e05f01245d7117ec6"},
+ {file = "pin-2.6.20-1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:08fdbb99f59b06f6fc29623b32bd8f051e9f69be55bf485f15bfd72ffe7c24f5"},
+ {file = "pin-2.6.20-1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:9f7b32525a2bcff61d59a96d9cb29a4ec759407b1cf94508c1ec43f8612fd304"},
+ {file = "pin-2.6.20-1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:05d4081a7ac57d374f4764bed7f14e63ab286d99d5b6cc9ff5e0e0fa1cbc348c"},
+ {file = "pin-2.6.20-1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:10f2abb19674aaf1a8df998fe38a805a45fbee321a98dd8a3ede909a946a5697"},
+ {file = "pin-2.6.20-1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:1f2b0589b334fb6785f9175dab06e55b456e65778937548311c3c351dcdc3b6a"},
+ {file = "pin-2.6.20-1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:d9b2d4f513efd84a62227b990634ee731cc31448f0bd2fda24d4f224848425ca"},
+ {file = "pin-2.6.20-1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f1a51350e13becdb5de95d3bc0238b25201652e90f877062b9efcbe3cfe3a1a3"},
+ {file = "pin-2.6.20-1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c6e21d63068bbe75d519c7e61fa8aa8b853992bbd35538c1439438a5151bfccf"},
+ {file = "pin-2.6.20-1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:60ddfe38ebf7ed7bc7b28e38c67ac4ec0a2498a7b68db5f023cb106f5af18a4a"},
+ {file = "pin-2.6.20-1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8870d53d5f590a9540ac33693456f54f6d2ebc3c6c9de607b3dea7487277e2f5"},
+ {file = "pin-2.6.20-1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:ab9e368a332535d72b31bf62313fe53d261656b9079a11bd2ba0ebec53777598"},
+ {file = "pin-2.6.20-1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:881cc63c90e591086fd97a26719e79f750b060163a21c7358c2943961fd084ae"},
+ {file = "pin-2.6.20-1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0c432eff833029593d55ed57711ecbd5215382436f9080a402b480ef399b069f"},
+ {file = "pin-2.6.20-1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:90c179194a60b0605da2a828a9ebe07e2e48fb4ac6e601fac68c9b05d6dd5dbe"},
+ {file = "pin-2.6.20-1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e511820ad288d2fa4820c41556efefce52c9a640fe4c5f4a275d2352188c9dad"},
+ {file = "pin-2.6.20-1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:abe836e7fbe9166b9f6b9ab2ce47e5a39de87f71fa4f88a54850933ab55a8961"},
+ {file = "pin-2.6.20-1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:d8ef02e487a480eb21935c117f9cdd29e52f13becf9090829b42cd8dffa73454"},
+ {file = "pin-2.6.20-1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:9bcfeacd73762b2ebfffff32c76d9934086d3e12a525868933ddd23262635712"},
+ {file = "pin-2.6.20-1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:18db5d2cb29b332ad418a9292393697d17312c70b1b0ea49cf3ed9e2029d67b1"},
+ {file = "pin-2.6.20-1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:60252cd5e51282b8e938b21f96ee8b3e7d9a0a842ad7584f980a86af1ee0596b"},
]
[package.dependencies]
cmeel = "*"
-cmeel-console-bridge = "*"
-cmeel-tinyxml = "*"
-cmeel-urdfdom = "*"
-hpp-fcl = "*"
+cmeel-boost = ">=1.82.0,<1.83.0"
+cmeel-console-bridge = ">=1.0.2.2,<2"
+cmeel-tinyxml = ">=2.6.2.2,<3"
+cmeel-urdfdom = ">=3.1.0.3,<4"
+hpp-fcl = ">=2.3.4,<4"
+
+[package.extras]
+build = ["cmeel-urdfdom[build] (>=3.1.0.3,<4)", "hpp-fcl[build] (>=2.3.4,<4)"]
[[package]]
name = "platformdirs"
-version = "3.1.1"
+version = "3.11.0"
description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
optional = false
python-versions = ">=3.7"
files = [
- {file = "platformdirs-3.1.1-py3-none-any.whl", hash = "sha256:e5986afb596e4bb5bde29a79ac9061aa955b94fca2399b7aaac4090860920dd8"},
- {file = "platformdirs-3.1.1.tar.gz", hash = "sha256:024996549ee88ec1a9aa99ff7f8fc819bb59e2c3477b410d90a16d32d6e707aa"},
+ {file = "platformdirs-3.11.0-py3-none-any.whl", hash = "sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e"},
+ {file = "platformdirs-3.11.0.tar.gz", hash = "sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3"},
]
[package.extras]
-docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
-test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"]
+docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"]
+
+[[package]]
+name = "plotly"
+version = "5.18.0"
+description = "An open-source, interactive data visualization library for Python"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "plotly-5.18.0-py3-none-any.whl", hash = "sha256:23aa8ea2f4fb364a20d34ad38235524bd9d691bf5299e800bca608c31e8db8de"},
+ {file = "plotly-5.18.0.tar.gz", hash = "sha256:360a31e6fbb49d12b007036eb6929521343d6bee2236f8459915821baefa2cbb"},
+]
+
+[package.dependencies]
+packaging = "*"
+tenacity = ">=6.2.0"
+
+[[package]]
+name = "plyfile"
+version = "1.0.1"
+description = "PLY file reader/writer"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "plyfile-1.0.1-py3-none-any.whl", hash = "sha256:57620f14269c6ef41c5782f4547894e0bac2473e2c6f326c4d7fc75ad1ae6fea"},
+ {file = "plyfile-1.0.1.tar.gz", hash = "sha256:4ceaedf1ed92b3a26b766fc8c56cda1b9b2390ec299b16dede2e5fd45097261a"},
+]
+
+[package.dependencies]
+numpy = ">=1.17"
[[package]]
name = "pre-commit"
-version = "3.2.1"
+version = "3.5.0"
description = "A framework for managing and maintaining multi-language pre-commit hooks."
optional = false
python-versions = ">=3.8"
files = [
- {file = "pre_commit-3.2.1-py2.py3-none-any.whl", hash = "sha256:a06a7fcce7f420047a71213c175714216498b49ebc81fe106f7716ca265f5bb6"},
- {file = "pre_commit-3.2.1.tar.gz", hash = "sha256:b5aee7d75dbba21ee161ba641b01e7ae10c5b91967ebf7b2ab0dfae12d07e1f1"},
+ {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"},
+ {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"},
]
[package.dependencies]
@@ -1846,13 +2487,13 @@ virtualenv = ">=20.10.0"
[[package]]
name = "prompt-toolkit"
-version = "3.0.38"
+version = "3.0.39"
description = "Library for building powerful interactive command lines in Python"
optional = false
python-versions = ">=3.7.0"
files = [
- {file = "prompt_toolkit-3.0.38-py3-none-any.whl", hash = "sha256:45ea77a2f7c60418850331366c81cf6b5b9cf4c7fd34616f733c5427e6abbb1f"},
- {file = "prompt_toolkit-3.0.38.tar.gz", hash = "sha256:23ac5d50538a9a38c8bde05fecb47d0b403ecd0662857a86f886f798563d5b9b"},
+ {file = "prompt_toolkit-3.0.39-py3-none-any.whl", hash = "sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88"},
+ {file = "prompt_toolkit-3.0.39.tar.gz", hash = "sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac"},
]
[package.dependencies]
@@ -1883,6 +2524,20 @@ files = [
[package.extras]
tests = ["pytest"]
+[[package]]
+name = "pybind11"
+version = "2.11.1"
+description = "Seamless operability between C++11 and Python"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pybind11-2.11.1-py3-none-any.whl", hash = "sha256:33cdd02a6453380dd71cc70357ce388ad1ee8d32bd0e38fc22b273d050aa29b3"},
+ {file = "pybind11-2.11.1.tar.gz", hash = "sha256:00cd59116a6e8155aecd9174f37ba299d1d397ed4a6b86ac1dfe01b3e40f2cc4"},
+]
+
+[package.extras]
+global = ["pybind11-global (==2.11.1)"]
+
[[package]]
name = "pybullet"
version = "3.2.5"
@@ -1902,140 +2557,697 @@ files = [
[[package]]
name = "pygments"
-version = "2.14.0"
+version = "2.16.1"
description = "Pygments is a syntax highlighting package written in Python."
optional = false
-python-versions = ">=3.6"
+python-versions = ">=3.7"
files = [
- {file = "Pygments-2.14.0-py3-none-any.whl", hash = "sha256:fa7bd7bd2771287c0de303af8bfdfc731f51bd2c6a47ab69d117138893b82717"},
- {file = "Pygments-2.14.0.tar.gz", hash = "sha256:b3ed06a9e8ac9a9aae5a6f5dbe78a8a58655d17b43b93c078f094ddc476ae297"},
+ {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"},
+ {file = "Pygments-2.16.1.tar.gz", hash = "sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29"},
]
[package.extras]
plugins = ["importlib-metadata"]
+[[package]]
+name = "pyopengl"
+version = "3.1.0"
+description = "Standard OpenGL bindings for Python"
+optional = true
+python-versions = "*"
+files = [
+ {file = "PyOpenGL-3.1.0.tar.gz", hash = "sha256:9b47c5c3a094fa518ca88aeed35ae75834d53e4285512c61879f67a48c94ddaf"},
+ {file = "PyOpenGL-3.1.0.zip", hash = "sha256:efa4e39a49b906ccbe66758812ca81ced13a6f26931ab2ba2dba2750c016c0d0"},
+]
+
+[[package]]
+name = "pyparsing"
+version = "3.1.1"
+description = "pyparsing module - Classes and methods to define and execute parsing grammars"
+optional = false
+python-versions = ">=3.6.8"
+files = [
+ {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"},
+ {file = "pyparsing-3.1.1.tar.gz", hash = "sha256:ede28a1a32462f5a9705e07aea48001a08f7cf81a021585011deba701581a0db"},
+]
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
+[[package]]
+name = "pypng"
+version = "0.20220715.0"
+description = "Pure Python library for saving and loading PNG images"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pypng-0.20220715.0-py3-none-any.whl", hash = "sha256:4a43e969b8f5aaafb2a415536c1a8ec7e341cd6a3f957fd5b5f32a4cfeed902c"},
+ {file = "pypng-0.20220715.0.tar.gz", hash = "sha256:739c433ba96f078315de54c0db975aee537cbc3e1d0ae4ed9aab0ca1e427e2c1"},
+]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+description = "Extensions to the standard Python datetime module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
[[package]]
name = "pytz"
-version = "2022.7.1"
+version = "2023.3.post1"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
files = [
- {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"},
- {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"},
+ {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"},
+ {file = "pytz-2023.3.post1.tar.gz", hash = "sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b"},
+]
+
+[[package]]
+name = "pywavelets"
+version = "1.4.1"
+description = "PyWavelets, wavelet transform module"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "PyWavelets-1.4.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:d854411eb5ee9cb4bc5d0e66e3634aeb8f594210f6a1bed96dbed57ec70f181c"},
+ {file = "PyWavelets-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:231b0e0b1cdc1112f4af3c24eea7bf181c418d37922a67670e9bf6cfa2d544d4"},
+ {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:754fa5085768227c4f4a26c1e0c78bc509a266d9ebd0eb69a278be7e3ece943c"},
+ {file = "PyWavelets-1.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da7b9c006171be1f9ddb12cc6e0d3d703b95f7f43cb5e2c6f5f15d3233fcf202"},
+ {file = "PyWavelets-1.4.1-cp310-cp310-win32.whl", hash = "sha256:67a0d28a08909f21400cb09ff62ba94c064882ffd9e3a6b27880a111211d59bd"},
+ {file = "PyWavelets-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:91d3d393cffa634f0e550d88c0e3f217c96cfb9e32781f2960876f1808d9b45b"},
+ {file = "PyWavelets-1.4.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:64c6bac6204327321db30b775060fbe8e8642316e6bff17f06b9f34936f88875"},
+ {file = "PyWavelets-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3f19327f2129fb7977bc59b966b4974dfd72879c093e44a7287500a7032695de"},
+ {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad987748f60418d5f4138db89d82ba0cb49b086e0cbb8fd5c3ed4a814cfb705e"},
+ {file = "PyWavelets-1.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:875d4d620eee655346e3589a16a73790cf9f8917abba062234439b594e706784"},
+ {file = "PyWavelets-1.4.1-cp311-cp311-win32.whl", hash = "sha256:7231461d7a8eb3bdc7aa2d97d9f67ea5a9f8902522818e7e2ead9c2b3408eeb1"},
+ {file = "PyWavelets-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:daf0aa79842b571308d7c31a9c43bc99a30b6328e6aea3f50388cd8f69ba7dbc"},
+ {file = "PyWavelets-1.4.1-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:ab7da0a17822cd2f6545626946d3b82d1a8e106afc4b50e3387719ba01c7b966"},
+ {file = "PyWavelets-1.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:578af438a02a86b70f1975b546f68aaaf38f28fb082a61ceb799816049ed18aa"},
+ {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb5ca8d11d3f98e89e65796a2125be98424d22e5ada360a0dbabff659fca0fc"},
+ {file = "PyWavelets-1.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:058b46434eac4c04dd89aeef6fa39e4b6496a951d78c500b6641fd5b2cc2f9f4"},
+ {file = "PyWavelets-1.4.1-cp38-cp38-win32.whl", hash = "sha256:de7cd61a88a982edfec01ea755b0740e94766e00a1ceceeafef3ed4c85c605cd"},
+ {file = "PyWavelets-1.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:7ab8d9db0fe549ab2ee0bea61f614e658dd2df419d5b75fba47baa761e95f8f2"},
+ {file = "PyWavelets-1.4.1-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:23bafd60350b2b868076d976bdd92f950b3944f119b4754b1d7ff22b7acbf6c6"},
+ {file = "PyWavelets-1.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0e56cd7a53aed3cceca91a04d62feb3a0aca6725b1912d29546c26f6ea90426"},
+ {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:030670a213ee8fefa56f6387b0c8e7d970c7f7ad6850dc048bd7c89364771b9b"},
+ {file = "PyWavelets-1.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71ab30f51ee4470741bb55fc6b197b4a2b612232e30f6ac069106f0156342356"},
+ {file = "PyWavelets-1.4.1-cp39-cp39-win32.whl", hash = "sha256:47cac4fa25bed76a45bc781a293c26ac63e8eaae9eb8f9be961758d22b58649c"},
+ {file = "PyWavelets-1.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:88aa5449e109d8f5e7f0adef85f7f73b1ab086102865be64421a3a3d02d277f4"},
+ {file = "PyWavelets-1.4.1.tar.gz", hash = "sha256:6437af3ddf083118c26d8f97ab43b0724b956c9f958e9ea788659f6a2834ba93"},
+]
+
+[package.dependencies]
+numpy = ">=1.17.3"
+
+[[package]]
+name = "pywin32"
+version = "306"
+description = "Python for Window Extensions"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"},
+ {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"},
+ {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"},
+ {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"},
+ {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"},
+ {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"},
+ {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"},
+ {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"},
+ {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"},
+ {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"},
+ {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"},
+ {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"},
+ {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"},
+ {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"},
]
[[package]]
name = "pyyaml"
-version = "6.0"
+version = "6.0.1"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.6"
files = [
- {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
- {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
- {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
- {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
- {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
- {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
- {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
- {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"},
- {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"},
- {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"},
- {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"},
- {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"},
- {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"},
- {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"},
- {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
- {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
- {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
- {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
- {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
- {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
- {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
- {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
- {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
- {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
- {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
- {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
- {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
- {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
- {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
- {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
- {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
- {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
- {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
- {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
- {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
- {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
- {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
- {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
- {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
- {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
+ {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
+ {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
+ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
+]
+
+[[package]]
+name = "referencing"
+version = "0.30.2"
+description = "JSON Referencing + Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "referencing-0.30.2-py3-none-any.whl", hash = "sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf"},
+ {file = "referencing-0.30.2.tar.gz", hash = "sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0"},
]
+[package.dependencies]
+attrs = ">=22.2.0"
+rpds-py = ">=0.7.0"
+
[[package]]
name = "requests"
-version = "2.28.2"
+version = "2.31.0"
description = "Python HTTP for Humans."
optional = false
-python-versions = ">=3.7, <4"
+python-versions = ">=3.7"
files = [
- {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
- {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
-urllib3 = ">=1.21.1,<1.27"
+urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+[[package]]
+name = "retrying"
+version = "1.3.4"
+description = "Retrying"
+optional = false
+python-versions = "*"
+files = [
+ {file = "retrying-1.3.4-py3-none-any.whl", hash = "sha256:8cc4d43cb8e1125e0ff3344e9de678fefd85db3b750b81b2240dc0183af37b35"},
+ {file = "retrying-1.3.4.tar.gz", hash = "sha256:345da8c5765bd982b1d1915deb9102fd3d1f7ad16bd84a9700b85f64d24e8f3e"},
+]
+
+[package.dependencies]
+six = ">=1.7.0"
+
+[[package]]
+name = "roma"
+version = "1.4.1"
+description = "A lightweight library to deal with 3D rotations in PyTorch."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "roma-1.4.1-py3-none-any.whl", hash = "sha256:cbee6090533d26dc5abd27f894b6eeb794f356c1aef9bcb27b62abb42a3e10a0"},
+ {file = "roma-1.4.1.tar.gz", hash = "sha256:76e3f580a63281ce0fdcc671da4e49a79b068ad59f3a5cf0fd2d6cba1d2a09f1"},
+]
+
+[[package]]
+name = "rpds-py"
+version = "0.10.6"
+description = "Python bindings to Rust's persistent data structures (rpds)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "rpds_py-0.10.6-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:6bdc11f9623870d75692cc33c59804b5a18d7b8a4b79ef0b00b773a27397d1f6"},
+ {file = "rpds_py-0.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:26857f0f44f0e791f4a266595a7a09d21f6b589580ee0585f330aaccccb836e3"},
+ {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7f5e15c953ace2e8dde9824bdab4bec50adb91a5663df08d7d994240ae6fa31"},
+ {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:61fa268da6e2e1cd350739bb61011121fa550aa2545762e3dc02ea177ee4de35"},
+ {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c48f3fbc3e92c7dd6681a258d22f23adc2eb183c8cb1557d2fcc5a024e80b094"},
+ {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0503c5b681566e8b722fe8c4c47cce5c7a51f6935d5c7012c4aefe952a35eed"},
+ {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:734c41f9f57cc28658d98270d3436dba65bed0cfc730d115b290e970150c540d"},
+ {file = "rpds_py-0.10.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a5d7ed104d158c0042a6a73799cf0eb576dfd5fc1ace9c47996e52320c37cb7c"},
+ {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e3df0bc35e746cce42579826b89579d13fd27c3d5319a6afca9893a9b784ff1b"},
+ {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:73e0a78a9b843b8c2128028864901f55190401ba38aae685350cf69b98d9f7c9"},
+ {file = "rpds_py-0.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:5ed505ec6305abd2c2c9586a7b04fbd4baf42d4d684a9c12ec6110deefe2a063"},
+ {file = "rpds_py-0.10.6-cp310-none-win32.whl", hash = "sha256:d97dd44683802000277bbf142fd9f6b271746b4846d0acaf0cefa6b2eaf2a7ad"},
+ {file = "rpds_py-0.10.6-cp310-none-win_amd64.whl", hash = "sha256:b455492cab07107bfe8711e20cd920cc96003e0da3c1f91297235b1603d2aca7"},
+ {file = "rpds_py-0.10.6-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:e8cdd52744f680346ff8c1ecdad5f4d11117e1724d4f4e1874f3a67598821069"},
+ {file = "rpds_py-0.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:66414dafe4326bca200e165c2e789976cab2587ec71beb80f59f4796b786a238"},
+ {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc435d059f926fdc5b05822b1be4ff2a3a040f3ae0a7bbbe672babb468944722"},
+ {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8e7f2219cb72474571974d29a191714d822e58be1eb171f229732bc6fdedf0ac"},
+ {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3953c6926a63f8ea5514644b7afb42659b505ece4183fdaaa8f61d978754349e"},
+ {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2bb2e4826be25e72013916eecd3d30f66fd076110de09f0e750163b416500721"},
+ {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bf347b495b197992efc81a7408e9a83b931b2f056728529956a4d0858608b80"},
+ {file = "rpds_py-0.10.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:102eac53bb0bf0f9a275b438e6cf6904904908562a1463a6fc3323cf47d7a532"},
+ {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40f93086eef235623aa14dbddef1b9fb4b22b99454cb39a8d2e04c994fb9868c"},
+ {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e22260a4741a0e7a206e175232867b48a16e0401ef5bce3c67ca5b9705879066"},
+ {file = "rpds_py-0.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f4e56860a5af16a0fcfa070a0a20c42fbb2012eed1eb5ceeddcc7f8079214281"},
+ {file = "rpds_py-0.10.6-cp311-none-win32.whl", hash = "sha256:0774a46b38e70fdde0c6ded8d6d73115a7c39d7839a164cc833f170bbf539116"},
+ {file = "rpds_py-0.10.6-cp311-none-win_amd64.whl", hash = "sha256:4a5ee600477b918ab345209eddafde9f91c0acd931f3776369585a1c55b04c57"},
+ {file = "rpds_py-0.10.6-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:5ee97c683eaface61d38ec9a489e353d36444cdebb128a27fe486a291647aff6"},
+ {file = "rpds_py-0.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0713631d6e2d6c316c2f7b9320a34f44abb644fc487b77161d1724d883662e31"},
+ {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5a53f5998b4bbff1cb2e967e66ab2addc67326a274567697379dd1e326bded7"},
+ {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6a555ae3d2e61118a9d3e549737bb4a56ff0cec88a22bd1dfcad5b4e04759175"},
+ {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:945eb4b6bb8144909b203a88a35e0a03d22b57aefb06c9b26c6e16d72e5eb0f0"},
+ {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:52c215eb46307c25f9fd2771cac8135d14b11a92ae48d17968eda5aa9aaf5071"},
+ {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1b3cd23d905589cb205710b3988fc8f46d4a198cf12862887b09d7aaa6bf9b9"},
+ {file = "rpds_py-0.10.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64ccc28683666672d7c166ed465c09cee36e306c156e787acef3c0c62f90da5a"},
+ {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:516a611a2de12fbea70c78271e558f725c660ce38e0006f75139ba337d56b1f6"},
+ {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9ff93d3aedef11f9c4540cf347f8bb135dd9323a2fc705633d83210d464c579d"},
+ {file = "rpds_py-0.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d858532212f0650be12b6042ff4378dc2efbb7792a286bee4489eaa7ba010586"},
+ {file = "rpds_py-0.10.6-cp312-none-win32.whl", hash = "sha256:3c4eff26eddac49d52697a98ea01b0246e44ca82ab09354e94aae8823e8bda02"},
+ {file = "rpds_py-0.10.6-cp312-none-win_amd64.whl", hash = "sha256:150eec465dbc9cbca943c8e557a21afdcf9bab8aaabf386c44b794c2f94143d2"},
+ {file = "rpds_py-0.10.6-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:cf693eb4a08eccc1a1b636e4392322582db2a47470d52e824b25eca7a3977b53"},
+ {file = "rpds_py-0.10.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4134aa2342f9b2ab6c33d5c172e40f9ef802c61bb9ca30d21782f6e035ed0043"},
+ {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e782379c2028a3611285a795b89b99a52722946d19fc06f002f8b53e3ea26ea9"},
+ {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2f6da6d842195fddc1cd34c3da8a40f6e99e4a113918faa5e60bf132f917c247"},
+ {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4a9fe992887ac68256c930a2011255bae0bf5ec837475bc6f7edd7c8dfa254e"},
+ {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b788276a3c114e9f51e257f2a6f544c32c02dab4aa7a5816b96444e3f9ffc336"},
+ {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa1afc70a02645809c744eefb7d6ee8fef7e2fad170ffdeacca267fd2674f13"},
+ {file = "rpds_py-0.10.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bddd4f91eede9ca5275e70479ed3656e76c8cdaaa1b354e544cbcf94c6fc8ac4"},
+ {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:775049dfa63fb58293990fc59473e659fcafd953bba1d00fc5f0631a8fd61977"},
+ {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:c6c45a2d2b68c51fe3d9352733fe048291e483376c94f7723458cfd7b473136b"},
+ {file = "rpds_py-0.10.6-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0699ab6b8c98df998c3eacf51a3b25864ca93dab157abe358af46dc95ecd9801"},
+ {file = "rpds_py-0.10.6-cp38-none-win32.whl", hash = "sha256:ebdab79f42c5961682654b851f3f0fc68e6cc7cd8727c2ac4ffff955154123c1"},
+ {file = "rpds_py-0.10.6-cp38-none-win_amd64.whl", hash = "sha256:24656dc36f866c33856baa3ab309da0b6a60f37d25d14be916bd3e79d9f3afcf"},
+ {file = "rpds_py-0.10.6-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:0898173249141ee99ffcd45e3829abe7bcee47d941af7434ccbf97717df020e5"},
+ {file = "rpds_py-0.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9e9184fa6c52a74a5521e3e87badbf9692549c0fcced47443585876fcc47e469"},
+ {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5752b761902cd15073a527b51de76bbae63d938dc7c5c4ad1e7d8df10e765138"},
+ {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:99a57006b4ec39dbfb3ed67e5b27192792ffb0553206a107e4aadb39c5004cd5"},
+ {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09586f51a215d17efdb3a5f090d7cbf1633b7f3708f60a044757a5d48a83b393"},
+ {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e225a6a14ecf44499aadea165299092ab0cba918bb9ccd9304eab1138844490b"},
+ {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2039f8d545f20c4e52713eea51a275e62153ee96c8035a32b2abb772b6fc9e5"},
+ {file = "rpds_py-0.10.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:34ad87a831940521d462ac11f1774edf867c34172010f5390b2f06b85dcc6014"},
+ {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dcdc88b6b01015da066da3fb76545e8bb9a6880a5ebf89e0f0b2e3ca557b3ab7"},
+ {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:25860ed5c4e7f5e10c496ea78af46ae8d8468e0be745bd233bab9ca99bfd2647"},
+ {file = "rpds_py-0.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7854a207ef77319ec457c1eb79c361b48807d252d94348305db4f4b62f40f7f3"},
+ {file = "rpds_py-0.10.6-cp39-none-win32.whl", hash = "sha256:e6fcc026a3f27c1282c7ed24b7fcac82cdd70a0e84cc848c0841a3ab1e3dea2d"},
+ {file = "rpds_py-0.10.6-cp39-none-win_amd64.whl", hash = "sha256:e98c4c07ee4c4b3acf787e91b27688409d918212dfd34c872201273fdd5a0e18"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:68fe9199184c18d997d2e4293b34327c0009a78599ce703e15cd9a0f47349bba"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:3339eca941568ed52d9ad0f1b8eb9fe0958fa245381747cecf2e9a78a5539c42"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a360cfd0881d36c6dc271992ce1eda65dba5e9368575663de993eeb4523d895f"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:031f76fc87644a234883b51145e43985aa2d0c19b063e91d44379cd2786144f8"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f36a9d751f86455dc5278517e8b65580eeee37d61606183897f122c9e51cef3"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:052a832078943d2b2627aea0d19381f607fe331cc0eb5df01991268253af8417"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:023574366002bf1bd751ebaf3e580aef4a468b3d3c216d2f3f7e16fdabd885ed"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:defa2c0c68734f4a82028c26bcc85e6b92cced99866af118cd6a89b734ad8e0d"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:879fb24304ead6b62dbe5034e7b644b71def53c70e19363f3c3be2705c17a3b4"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:53c43e10d398e365da2d4cc0bcaf0854b79b4c50ee9689652cdc72948e86f487"},
+ {file = "rpds_py-0.10.6-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:3777cc9dea0e6c464e4b24760664bd8831738cc582c1d8aacf1c3f546bef3f65"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:40578a6469e5d1df71b006936ce95804edb5df47b520c69cf5af264d462f2cbb"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:cf71343646756a072b85f228d35b1d7407da1669a3de3cf47f8bbafe0c8183a4"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10f32b53f424fc75ff7b713b2edb286fdbfc94bf16317890260a81c2c00385dc"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:81de24a1c51cfb32e1fbf018ab0bdbc79c04c035986526f76c33e3f9e0f3356c"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac17044876e64a8ea20ab132080ddc73b895b4abe9976e263b0e30ee5be7b9c2"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5e8a78bd4879bff82daef48c14d5d4057f6856149094848c3ed0ecaf49f5aec2"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78ca33811e1d95cac8c2e49cb86c0fb71f4d8409d8cbea0cb495b6dbddb30a55"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c63c3ef43f0b3fb00571cff6c3967cc261c0ebd14a0a134a12e83bdb8f49f21f"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:7fde6d0e00b2fd0dbbb40c0eeec463ef147819f23725eda58105ba9ca48744f4"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:79edd779cfc46b2e15b0830eecd8b4b93f1a96649bcb502453df471a54ce7977"},
+ {file = "rpds_py-0.10.6-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9164ec8010327ab9af931d7ccd12ab8d8b5dc2f4c6a16cbdd9d087861eaaefa1"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:d29ddefeab1791e3c751e0189d5f4b3dbc0bbe033b06e9c333dca1f99e1d523e"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:30adb75ecd7c2a52f5e76af50644b3e0b5ba036321c390b8e7ec1bb2a16dd43c"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd609fafdcdde6e67a139898196698af37438b035b25ad63704fd9097d9a3482"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6eef672de005736a6efd565577101277db6057f65640a813de6c2707dc69f396"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cf4393c7b41abbf07c88eb83e8af5013606b1cdb7f6bc96b1b3536b53a574b8"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad857f42831e5b8d41a32437f88d86ead6c191455a3499c4b6d15e007936d4cf"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d7360573f1e046cb3b0dceeb8864025aa78d98be4bb69f067ec1c40a9e2d9df"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d08f63561c8a695afec4975fae445245386d645e3e446e6f260e81663bfd2e38"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:f0f17f2ce0f3529177a5fff5525204fad7b43dd437d017dd0317f2746773443d"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:442626328600bde1d09dc3bb00434f5374948838ce75c41a52152615689f9403"},
+ {file = "rpds_py-0.10.6-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:e9616f5bd2595f7f4a04b67039d890348ab826e943a9bfdbe4938d0eba606971"},
+ {file = "rpds_py-0.10.6.tar.gz", hash = "sha256:4ce5a708d65a8dbf3748d2474b580d606b1b9f91b5c6ab2a316e0b0cf7a4ba50"},
+]
+
[[package]]
name = "ruff"
-version = "0.0.260"
+version = "0.0.290"
description = "An extremely fast Python linter, written in Rust."
optional = false
python-versions = ">=3.7"
files = [
- {file = "ruff-0.0.260-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:c559650b623f3fbdc39c7ed1bcb064765c666a53ee738c53d1461afbf3f23db2"},
- {file = "ruff-0.0.260-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:90ff1479e292a84c388a8a035d223247ddeea5f6760752a9142b88b6d59ac334"},
- {file = "ruff-0.0.260-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25584d1b9f445fde72651caab97e7430a4c5bfd2a0ce9af39868753826cba10d"},
- {file = "ruff-0.0.260-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8032e35357384a29791c75194a71e314031171eb0731fcaa872dfaf4c1f4470a"},
- {file = "ruff-0.0.260-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e4fa7293f97c021825b3b72f2bf53f0eb4f59625608a889678c1fc6660f412d"},
- {file = "ruff-0.0.260-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:8bec0271e2c8cd36bcf915cb9f6a93e40797a3ff3d2cda4ca87b7bed9e598472"},
- {file = "ruff-0.0.260-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e075a61aaff8ebe56172217f0ac14c5df9637b289bf161ac697445a9003d5c2"},
- {file = "ruff-0.0.260-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f8678f54eb2696481618902a10c3cb28325f3323799af99997ad6f06005ea4f5"},
- {file = "ruff-0.0.260-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57d9f0bfdef739b76aa3112b9182a214f0f34589a2659f88353492c7670fe2fe"},
- {file = "ruff-0.0.260-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ec1f77219ba5adaa194289cb82ba924ff2ed931fd00b8541d66a1724c89fbc9"},
- {file = "ruff-0.0.260-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:aae2170a7ec6f7fc4a73db30aa7aa7fce936176bf66bf85f77f69ddd1dd4a665"},
- {file = "ruff-0.0.260-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5f847b72ef994ab88e9da250c7eb5cbb3f1555b92a9f22c5ed1c27a44b7e98d6"},
- {file = "ruff-0.0.260-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:6dd705d4eff405c2b70513188fbff520f49db6df91f0d5e8258c5d469efa58bc"},
- {file = "ruff-0.0.260-py3-none-win32.whl", hash = "sha256:3866a96b2ef92c7d837ba6bf8fc9dd125a67886f1c5512ad6fa5d5fefaceff87"},
- {file = "ruff-0.0.260-py3-none-win_amd64.whl", hash = "sha256:0733d524946decbd4f1e63f7dc26820f5c1e6c31da529ba20fb995057f8e79b1"},
- {file = "ruff-0.0.260-py3-none-win_arm64.whl", hash = "sha256:12542a26f189a5a10c719bfa14d415d0511ac05e5c9ff5e79cc9d5cc50b81bc8"},
- {file = "ruff-0.0.260.tar.gz", hash = "sha256:ea8f94262f33b81c47ee9d81f455b144e94776f5c925748cb0c561a12206eae1"},
+ {file = "ruff-0.0.290-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:0e2b09ac4213b11a3520221083866a5816616f3ae9da123037b8ab275066fbac"},
+ {file = "ruff-0.0.290-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:4ca6285aa77b3d966be32c9a3cd531655b3d4a0171e1f9bf26d66d0372186767"},
+ {file = "ruff-0.0.290-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35e3550d1d9f2157b0fcc77670f7bb59154f223bff281766e61bdd1dd854e0c5"},
+ {file = "ruff-0.0.290-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d748c8bd97874f5751aed73e8dde379ce32d16338123d07c18b25c9a2796574a"},
+ {file = "ruff-0.0.290-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:982af5ec67cecd099e2ef5e238650407fb40d56304910102d054c109f390bf3c"},
+ {file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:bbd37352cea4ee007c48a44c9bc45a21f7ba70a57edfe46842e346651e2b995a"},
+ {file = "ruff-0.0.290-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d9be6351b7889462912e0b8185a260c0219c35dfd920fb490c7f256f1d8313e"},
+ {file = "ruff-0.0.290-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75cdc7fe32dcf33b7cec306707552dda54632ac29402775b9e212a3c16aad5e6"},
+ {file = "ruff-0.0.290-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb07f37f7aecdbbc91d759c0c09870ce0fb3eed4025eebedf9c4b98c69abd527"},
+ {file = "ruff-0.0.290-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:2ab41bc0ba359d3f715fc7b705bdeef19c0461351306b70a4e247f836b9350ed"},
+ {file = "ruff-0.0.290-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:150bf8050214cea5b990945b66433bf9a5e0cef395c9bc0f50569e7de7540c86"},
+ {file = "ruff-0.0.290-py3-none-musllinux_1_2_i686.whl", hash = "sha256:75386ebc15fe5467248c039f5bf6a0cfe7bfc619ffbb8cd62406cd8811815fca"},
+ {file = "ruff-0.0.290-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ac93eadf07bc4ab4c48d8bb4e427bf0f58f3a9c578862eb85d99d704669f5da0"},
+ {file = "ruff-0.0.290-py3-none-win32.whl", hash = "sha256:461fbd1fb9ca806d4e3d5c745a30e185f7cf3ca77293cdc17abb2f2a990ad3f7"},
+ {file = "ruff-0.0.290-py3-none-win_amd64.whl", hash = "sha256:f1f49f5ec967fd5778813780b12a5650ab0ebcb9ddcca28d642c689b36920796"},
+ {file = "ruff-0.0.290-py3-none-win_arm64.whl", hash = "sha256:ae5a92dfbdf1f0c689433c223f8dac0782c2b2584bd502dfdbc76475669f1ba1"},
+ {file = "ruff-0.0.290.tar.gz", hash = "sha256:949fecbc5467bb11b8db810a7fa53c7e02633856ee6bd1302b2f43adcd71b88d"},
+]
+
+[[package]]
+name = "scikit-image"
+version = "0.21.0"
+description = "Image processing in Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "scikit_image-0.21.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:978ac3302252155a8556cdfe067bad2d18d5ccef4e91c2f727bc564ed75566bc"},
+ {file = "scikit_image-0.21.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:82c22e008527e5ee26ab08e3ce919998ef164d538ff30b9e5764b223cfda06b1"},
+ {file = "scikit_image-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fd29d2631d3e975c377066acfc1f4cb2cc95e2257cf70e7fedfcb96441096e88"},
+ {file = "scikit_image-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c6c12925ceb9f3aede555921e26642d601b2d37d1617002a2636f2cb5178ae2f"},
+ {file = "scikit_image-0.21.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f538d4de77e4f3225d068d9ea2965bed3f7dda7f457a8f89634fa22ffb9ad8c"},
+ {file = "scikit_image-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ec9bab6920ac43037d7434058b67b5778d42c60f67b8679239f48a471e7ed6f8"},
+ {file = "scikit_image-0.21.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:a54720430dba833ffbb6dedd93d9f0938c5dd47d20ab9ba3e4e61c19d95f6f19"},
+ {file = "scikit_image-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e40dd102da14cdadc09210f930b4556c90ff8f99cd9d8bcccf9f73a86c44245"},
+ {file = "scikit_image-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff5719c7eb99596a39c3e1d9b564025bae78ecf1da3ee6842d34f6965b5f1474"},
+ {file = "scikit_image-0.21.0-cp311-cp311-win_amd64.whl", hash = "sha256:146c3824253eee9ff346c4ad10cb58376f91aefaf4a4bb2fe11aa21691f7de76"},
+ {file = "scikit_image-0.21.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4e1b09f81a99c9c390215929194847b3cd358550b4b65bb6e42c5393d69cb74a"},
+ {file = "scikit_image-0.21.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:9f7b5fb4a22f0d5ae0fa13beeb887c925280590145cd6d8b2630794d120ff7c7"},
+ {file = "scikit_image-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4814033717f0b6491fee252facb9df92058d6a72ab78dd6408a50f3915a88b8"},
+ {file = "scikit_image-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b0d6ed6502cca0c9719c444caafa0b8cda0f9e29e01ca42f621a240073284be"},
+ {file = "scikit_image-0.21.0-cp38-cp38-win_amd64.whl", hash = "sha256:9194cb7bc21215fde6c1b1e9685d312d2aa8f65ea9736bc6311126a91c860032"},
+ {file = "scikit_image-0.21.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54df1ddc854f37a912d42bc724e456e86858107e94048a81a27720bc588f9937"},
+ {file = "scikit_image-0.21.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:c01e3ab0a1fabfd8ce30686d4401b7ed36e6126c9d4d05cb94abf6bdc46f7ac9"},
+ {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ef5d8d1099317b7b315b530348cbfa68ab8ce32459de3c074d204166951025c"},
+ {file = "scikit_image-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78b1e96c59cab640ca5c5b22c501524cfaf34cbe0cb51ba73bd9a9ede3fb6e1d"},
+ {file = "scikit_image-0.21.0-cp39-cp39-win_amd64.whl", hash = "sha256:9cffcddd2a5594c0a06de2ae3e1e25d662745a26f94fda31520593669677c010"},
+ {file = "scikit_image-0.21.0.tar.gz", hash = "sha256:b33e823c54e6f11873ea390ee49ef832b82b9f70752c8759efd09d5a4e3d87f0"},
]
+[package.dependencies]
+imageio = ">=2.27"
+lazy_loader = ">=0.2"
+networkx = ">=2.8"
+numpy = ">=1.21.1"
+packaging = ">=21"
+pillow = ">=9.0.1"
+PyWavelets = ">=1.1.1"
+scipy = ">=1.8"
+tifffile = ">=2022.8.12"
+
+[package.extras]
+build = ["Cython (>=0.29.32)", "build", "meson-python (>=0.13)", "ninja", "numpy (>=1.21.1)", "packaging (>=21)", "pythran", "setuptools (>=67)", "spin (==0.3)", "wheel"]
+data = ["pooch (>=1.6.0)"]
+default = ["PyWavelets (>=1.1.1)", "imageio (>=2.27)", "lazy_loader (>=0.2)", "networkx (>=2.8)", "numpy (>=1.21.1)", "packaging (>=21)", "pillow (>=9.0.1)", "scipy (>=1.8)", "tifffile (>=2022.8.12)"]
+developer = ["pre-commit", "rtoml"]
+docs = ["dask[array] (>=2022.9.2)", "ipykernel", "ipywidgets", "kaleido", "matplotlib (>=3.5)", "myst-parser", "numpydoc (>=1.5)", "pandas (>=1.5)", "plotly (>=5.10)", "pooch (>=1.6)", "pydata-sphinx-theme (>=0.13)", "pytest-runner", "scikit-learn (>=0.24.0)", "seaborn (>=0.11)", "sphinx (>=5.0)", "sphinx-copybutton", "sphinx-gallery (>=0.11)", "sphinx_design (>=0.3)", "tifffile (>=2022.8.12)"]
+optional = ["SimpleITK", "astropy (>=5.0)", "cloudpickle (>=0.2.1)", "dask[array] (>=2021.1.0)", "matplotlib (>=3.5)", "pooch (>=1.6.0)", "pyamg", "scikit-learn (>=0.24.0)"]
+test = ["asv", "matplotlib (>=3.5)", "pooch (>=1.6.0)", "pytest (>=7.0)", "pytest-cov (>=2.11.0)", "pytest-faulthandler", "pytest-localserver"]
+
+[[package]]
+name = "scikit-learn"
+version = "1.3.2"
+description = "A set of python modules for machine learning and data mining"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"},
+ {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"},
+ {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"},
+ {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"},
+ {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"},
+ {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"},
+ {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"},
+ {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"},
+ {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"},
+ {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"},
+ {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"},
+ {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"},
+ {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"},
+ {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"},
+ {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"},
+ {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"},
+ {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"},
+ {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"},
+ {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"},
+ {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"},
+ {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"},
+ {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"},
+ {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"},
+ {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"},
+ {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"},
+ {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"},
+]
+
+[package.dependencies]
+joblib = ">=1.1.1"
+numpy = ">=1.17.3,<2.0"
+scipy = ">=1.5.0"
+threadpoolctl = ">=2.0.0"
+
+[package.extras]
+benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"]
+docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"]
+examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"]
+tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"]
+
+[[package]]
+name = "scipy"
+version = "1.11.3"
+description = "Fundamental algorithms for scientific computing in Python"
+optional = false
+python-versions = "<3.13,>=3.9"
+files = [
+ {file = "scipy-1.11.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:370f569c57e1d888304052c18e58f4a927338eafdaef78613c685ca2ea0d1fa0"},
+ {file = "scipy-1.11.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:9885e3e4f13b2bd44aaf2a1a6390a11add9f48d5295f7a592393ceb8991577a3"},
+ {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e04aa19acc324a1a076abb4035dabe9b64badb19f76ad9c798bde39d41025cdc"},
+ {file = "scipy-1.11.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e1a8a4657673bfae1e05e1e1d6e94b0cabe5ed0c7c144c8aa7b7dbb774ce5c1"},
+ {file = "scipy-1.11.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7abda0e62ef00cde826d441485e2e32fe737bdddee3324e35c0e01dee65e2a88"},
+ {file = "scipy-1.11.3-cp310-cp310-win_amd64.whl", hash = "sha256:033c3fd95d55012dd1148b201b72ae854d5086d25e7c316ec9850de4fe776929"},
+ {file = "scipy-1.11.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:925c6f09d0053b1c0f90b2d92d03b261e889b20d1c9b08a3a51f61afc5f58165"},
+ {file = "scipy-1.11.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:5664e364f90be8219283eeb844323ff8cd79d7acbd64e15eb9c46b9bc7f6a42a"},
+ {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00f325434b6424952fbb636506f0567898dca7b0f7654d48f1c382ea338ce9a3"},
+ {file = "scipy-1.11.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5f290cf561a4b4edfe8d1001ee4be6da60c1c4ea712985b58bf6bc62badee221"},
+ {file = "scipy-1.11.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:91770cb3b1e81ae19463b3c235bf1e0e330767dca9eb4cd73ba3ded6c4151e4d"},
+ {file = "scipy-1.11.3-cp311-cp311-win_amd64.whl", hash = "sha256:e1f97cd89c0fe1a0685f8f89d85fa305deb3067d0668151571ba50913e445820"},
+ {file = "scipy-1.11.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dfcc1552add7cb7c13fb70efcb2389d0624d571aaf2c80b04117e2755a0c5d15"},
+ {file = "scipy-1.11.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:0d3a136ae1ff0883fffbb1b05b0b2fea251cb1046a5077d0b435a1839b3e52b7"},
+ {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bae66a2d7d5768eaa33008fa5a974389f167183c87bf39160d3fefe6664f8ddc"},
+ {file = "scipy-1.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2f6dee6cbb0e263b8142ed587bc93e3ed5e777f1f75448d24fb923d9fd4dce6"},
+ {file = "scipy-1.11.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:74e89dc5e00201e71dd94f5f382ab1c6a9f3ff806c7d24e4e90928bb1aafb280"},
+ {file = "scipy-1.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:90271dbde4be191522b3903fc97334e3956d7cfb9cce3f0718d0ab4fd7d8bfd6"},
+ {file = "scipy-1.11.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a63d1ec9cadecce838467ce0631c17c15c7197ae61e49429434ba01d618caa83"},
+ {file = "scipy-1.11.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:5305792c7110e32ff155aed0df46aa60a60fc6e52cd4ee02cdeb67eaccd5356e"},
+ {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ea7f579182d83d00fed0e5c11a4aa5ffe01460444219dedc448a36adf0c3917"},
+ {file = "scipy-1.11.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c77da50c9a91e23beb63c2a711ef9e9ca9a2060442757dffee34ea41847d8156"},
+ {file = "scipy-1.11.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:15f237e890c24aef6891c7d008f9ff7e758c6ef39a2b5df264650eb7900403c0"},
+ {file = "scipy-1.11.3-cp39-cp39-win_amd64.whl", hash = "sha256:4b4bb134c7aa457e26cc6ea482b016fef45db71417d55cc6d8f43d799cdf9ef2"},
+ {file = "scipy-1.11.3.tar.gz", hash = "sha256:bba4d955f54edd61899776bad459bf7326e14b9fa1c552181f0479cc60a568cd"},
+]
+
+[package.dependencies]
+numpy = ">=1.21.6,<1.28.0"
+
+[package.extras]
+dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"]
+doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"]
+test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
+
+[[package]]
+name = "seaborn"
+version = "0.12.2"
+description = "Statistical data visualization"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "seaborn-0.12.2-py3-none-any.whl", hash = "sha256:ebf15355a4dba46037dfd65b7350f014ceb1f13c05e814eda2c9f5fd731afc08"},
+ {file = "seaborn-0.12.2.tar.gz", hash = "sha256:374645f36509d0dcab895cba5b47daf0586f77bfe3b36c97c607db7da5be0139"},
+]
+
+[package.dependencies]
+matplotlib = ">=3.1,<3.6.1 || >3.6.1"
+numpy = ">=1.17,<1.24.0 || >1.24.0"
+pandas = ">=0.25"
+
+[package.extras]
+dev = ["flake8", "flit", "mypy", "pandas-stubs", "pre-commit", "pytest", "pytest-cov", "pytest-xdist"]
+docs = ["ipykernel", "nbconvert", "numpydoc", "pydata_sphinx_theme (==0.10.0rc2)", "pyyaml", "sphinx-copybutton", "sphinx-design", "sphinx-issues"]
+stats = ["scipy (>=1.3)", "statsmodels (>=0.10)"]
+
[[package]]
name = "setuptools"
-version = "67.6.0"
+version = "68.2.2"
description = "Easily download, build, install, upgrade, and uninstall Python packages"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
+files = [
+ {file = "setuptools-68.2.2-py3-none-any.whl", hash = "sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a"},
+ {file = "setuptools-68.2.2.tar.gz", hash = "sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+
+[[package]]
+name = "setuptools-scm"
+version = "8.0.4"
+description = "the blessed package to manage your versions by scm tags"
+optional = false
+python-versions = ">=3.8"
files = [
- {file = "setuptools-67.6.0-py3-none-any.whl", hash = "sha256:b78aaa36f6b90a074c1fa651168723acbf45d14cb1196b6f02c0fd07f17623b2"},
- {file = "setuptools-67.6.0.tar.gz", hash = "sha256:2ee892cd5f29f3373097f5a814697e397cf3ce313616df0af11231e2ad118077"},
+ {file = "setuptools-scm-8.0.4.tar.gz", hash = "sha256:b5f43ff6800669595193fd09891564ee9d1d7dcb196cab4b2506d53a2e1c95c7"},
+ {file = "setuptools_scm-8.0.4-py3-none-any.whl", hash = "sha256:b47844cd2a84b83b3187a5782c71128c28b4c94cad8bfb871da2784a5cb54c4f"},
]
+[package.dependencies]
+packaging = ">=20"
+setuptools = "*"
+tomli = {version = ">=1", markers = "python_version < \"3.11\""}
+typing-extensions = "*"
+
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
-testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
-testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+docs = ["entangled-cli[rich]", "mkdocs", "mkdocs-entangled-plugin", "mkdocs-material", "mkdocstrings[python]", "pygments"]
+rich = ["rich"]
+test = ["build", "pytest", "rich", "wheel"]
+
+[[package]]
+name = "simplejson"
+version = "3.19.2"
+description = "Simple, fast, extensible JSON encoder/decoder for Python"
+optional = false
+python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "simplejson-3.19.2-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3471e95110dcaf901db16063b2e40fb394f8a9e99b3fe9ee3acc6f6ef72183a2"},
+ {file = "simplejson-3.19.2-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:3194cd0d2c959062b94094c0a9f8780ffd38417a5322450a0db0ca1a23e7fbd2"},
+ {file = "simplejson-3.19.2-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:8a390e56a7963e3946ff2049ee1eb218380e87c8a0e7608f7f8790ba19390867"},
+ {file = "simplejson-3.19.2-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1537b3dd62d8aae644f3518c407aa8469e3fd0f179cdf86c5992792713ed717a"},
+ {file = "simplejson-3.19.2-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:a8617625369d2d03766413bff9e64310feafc9fc4f0ad2b902136f1a5cd8c6b0"},
+ {file = "simplejson-3.19.2-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:2c433a412e96afb9a3ce36fa96c8e61a757af53e9c9192c97392f72871e18e69"},
+ {file = "simplejson-3.19.2-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:f1c70249b15e4ce1a7d5340c97670a95f305ca79f376887759b43bb33288c973"},
+ {file = "simplejson-3.19.2-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:287e39ba24e141b046812c880f4619d0ca9e617235d74abc27267194fc0c7835"},
+ {file = "simplejson-3.19.2-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:6f0a0b41dd05eefab547576bed0cf066595f3b20b083956b1405a6f17d1be6ad"},
+ {file = "simplejson-3.19.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f98d918f7f3aaf4b91f2b08c0c92b1774aea113334f7cde4fe40e777114dbe6"},
+ {file = "simplejson-3.19.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7d74beca677623481810c7052926365d5f07393c72cbf62d6cce29991b676402"},
+ {file = "simplejson-3.19.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7f2398361508c560d0bf1773af19e9fe644e218f2a814a02210ac2c97ad70db0"},
+ {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ad331349b0b9ca6da86064a3599c425c7a21cd41616e175ddba0866da32df48"},
+ {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:332c848f02d71a649272b3f1feccacb7e4f7e6de4a2e6dc70a32645326f3d428"},
+ {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:25785d038281cd106c0d91a68b9930049b6464288cea59ba95b35ee37c2d23a5"},
+ {file = "simplejson-3.19.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18955c1da6fc39d957adfa346f75226246b6569e096ac9e40f67d102278c3bcb"},
+ {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:11cc3afd8160d44582543838b7e4f9aa5e97865322844b75d51bf4e0e413bb3e"},
+ {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b01fda3e95d07a6148702a641e5e293b6da7863f8bc9b967f62db9461330562c"},
+ {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:778331444917108fa8441f59af45886270d33ce8a23bfc4f9b192c0b2ecef1b3"},
+ {file = "simplejson-3.19.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9eb117db8d7ed733a7317c4215c35993b815bf6aeab67523f1f11e108c040672"},
+ {file = "simplejson-3.19.2-cp310-cp310-win32.whl", hash = "sha256:39b6d79f5cbfa3eb63a869639cfacf7c41d753c64f7801efc72692c1b2637ac7"},
+ {file = "simplejson-3.19.2-cp310-cp310-win_amd64.whl", hash = "sha256:5675e9d8eeef0aa06093c1ff898413ade042d73dc920a03e8cea2fb68f62445a"},
+ {file = "simplejson-3.19.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ed628c1431100b0b65387419551e822987396bee3c088a15d68446d92f554e0c"},
+ {file = "simplejson-3.19.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:adcb3332979cbc941b8fff07181f06d2b608625edc0a4d8bc3ffc0be414ad0c4"},
+ {file = "simplejson-3.19.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:08889f2f597ae965284d7b52a5c3928653a9406d88c93e3161180f0abc2433ba"},
+ {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef7938a78447174e2616be223f496ddccdbf7854f7bf2ce716dbccd958cc7d13"},
+ {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a970a2e6d5281d56cacf3dc82081c95c1f4da5a559e52469287457811db6a79b"},
+ {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:554313db34d63eac3b3f42986aa9efddd1a481169c12b7be1e7512edebff8eaf"},
+ {file = "simplejson-3.19.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d36081c0b1c12ea0ed62c202046dca11438bee48dd5240b7c8de8da62c620e9"},
+ {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a3cd18e03b0ee54ea4319cdcce48357719ea487b53f92a469ba8ca8e39df285e"},
+ {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:66e5dc13bfb17cd6ee764fc96ccafd6e405daa846a42baab81f4c60e15650414"},
+ {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:972a7833d4a1fcf7a711c939e315721a88b988553fc770a5b6a5a64bd6ebeba3"},
+ {file = "simplejson-3.19.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3e74355cb47e0cd399ead3477e29e2f50e1540952c22fb3504dda0184fc9819f"},
+ {file = "simplejson-3.19.2-cp311-cp311-win32.whl", hash = "sha256:1dd4f692304854352c3e396e9b5f0a9c9e666868dd0bdc784e2ac4c93092d87b"},
+ {file = "simplejson-3.19.2-cp311-cp311-win_amd64.whl", hash = "sha256:9300aee2a8b5992d0f4293d88deb59c218989833e3396c824b69ba330d04a589"},
+ {file = "simplejson-3.19.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b8d940fd28eb34a7084877747a60873956893e377f15a32ad445fe66c972c3b8"},
+ {file = "simplejson-3.19.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4969d974d9db826a2c07671273e6b27bc48e940738d768fa8f33b577f0978378"},
+ {file = "simplejson-3.19.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c594642d6b13d225e10df5c16ee15b3398e21a35ecd6aee824f107a625690374"},
+ {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2f5a398b5e77bb01b23d92872255e1bcb3c0c719a3be40b8df146570fe7781a"},
+ {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:176a1b524a3bd3314ed47029a86d02d5a95cc0bee15bd3063a1e1ec62b947de6"},
+ {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3c7363a8cb8c5238878ec96c5eb0fc5ca2cb11fc0c7d2379863d342c6ee367a"},
+ {file = "simplejson-3.19.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:346820ae96aa90c7d52653539a57766f10f33dd4be609206c001432b59ddf89f"},
+ {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de9a2792612ec6def556d1dc621fd6b2073aff015d64fba9f3e53349ad292734"},
+ {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1c768e7584c45094dca4b334af361e43b0aaa4844c04945ac7d43379eeda9bc2"},
+ {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:9652e59c022e62a5b58a6f9948b104e5bb96d3b06940c6482588176f40f4914b"},
+ {file = "simplejson-3.19.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9c1a4393242e321e344213a90a1e3bf35d2f624aa8b8f6174d43e3c6b0e8f6eb"},
+ {file = "simplejson-3.19.2-cp312-cp312-win32.whl", hash = "sha256:7cb98be113911cb0ad09e5523d0e2a926c09a465c9abb0784c9269efe4f95917"},
+ {file = "simplejson-3.19.2-cp312-cp312-win_amd64.whl", hash = "sha256:6779105d2fcb7fcf794a6a2a233787f6bbd4731227333a072d8513b252ed374f"},
+ {file = "simplejson-3.19.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:061e81ea2d62671fa9dea2c2bfbc1eec2617ae7651e366c7b4a2baf0a8c72cae"},
+ {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4280e460e51f86ad76dc456acdbfa9513bdf329556ffc8c49e0200878ca57816"},
+ {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:11c39fbc4280d7420684494373b7c5904fa72a2b48ef543a56c2d412999c9e5d"},
+ {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bccb3e88ec26ffa90f72229f983d3a5d1155e41a1171190fa723d4135523585b"},
+ {file = "simplejson-3.19.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1bb5b50dc6dd671eb46a605a3e2eb98deb4a9af787a08fcdddabe5d824bb9664"},
+ {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:d94245caa3c61f760c4ce4953cfa76e7739b6f2cbfc94cc46fff6c050c2390c5"},
+ {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d0e5ffc763678d48ecc8da836f2ae2dd1b6eb2d27a48671066f91694e575173c"},
+ {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:d222a9ed082cd9f38b58923775152003765016342a12f08f8c123bf893461f28"},
+ {file = "simplejson-3.19.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8434dcdd347459f9fd9c526117c01fe7ca7b016b6008dddc3c13471098f4f0dc"},
+ {file = "simplejson-3.19.2-cp36-cp36m-win32.whl", hash = "sha256:c9ac1c2678abf9270e7228133e5b77c6c3c930ad33a3c1dfbdd76ff2c33b7b50"},
+ {file = "simplejson-3.19.2-cp36-cp36m-win_amd64.whl", hash = "sha256:92c4a4a2b1f4846cd4364855cbac83efc48ff5a7d7c06ba014c792dd96483f6f"},
+ {file = "simplejson-3.19.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0d551dc931638e2102b8549836a1632e6e7cf620af3d093a7456aa642bff601d"},
+ {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73a8a4653f2e809049999d63530180d7b5a344b23a793502413ad1ecea9a0290"},
+ {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:40847f617287a38623507d08cbcb75d51cf9d4f9551dd6321df40215128325a3"},
+ {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:be893258d5b68dd3a8cba8deb35dc6411db844a9d35268a8d3793b9d9a256f80"},
+ {file = "simplejson-3.19.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9eb3cff1b7d71aa50c89a0536f469cb8d6dcdd585d8f14fb8500d822f3bdee4"},
+ {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d0f402e787e6e7ee7876c8b05e2fe6464820d9f35ba3f172e95b5f8b699f6c7f"},
+ {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:fbbcc6b0639aa09b9649f36f1bcb347b19403fe44109948392fbb5ea69e48c3e"},
+ {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:2fc697be37585eded0c8581c4788fcfac0e3f84ca635b73a5bf360e28c8ea1a2"},
+ {file = "simplejson-3.19.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b0a3eb6dd39cce23801a50c01a0976971498da49bc8a0590ce311492b82c44b"},
+ {file = "simplejson-3.19.2-cp37-cp37m-win32.whl", hash = "sha256:49f9da0d6cd17b600a178439d7d2d57c5ef01f816b1e0e875e8e8b3b42db2693"},
+ {file = "simplejson-3.19.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c87c22bd6a987aca976e3d3e23806d17f65426191db36d40da4ae16a6a494cbc"},
+ {file = "simplejson-3.19.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9e4c166f743bb42c5fcc60760fb1c3623e8fda94f6619534217b083e08644b46"},
+ {file = "simplejson-3.19.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0a48679310e1dd5c9f03481799311a65d343748fe86850b7fb41df4e2c00c087"},
+ {file = "simplejson-3.19.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c0521e0f07cb56415fdb3aae0bbd8701eb31a9dfef47bb57206075a0584ab2a2"},
+ {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d2d5119b1d7a1ed286b8af37357116072fc96700bce3bec5bb81b2e7057ab41"},
+ {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c1467d939932901a97ba4f979e8f2642415fcf02ea12f53a4e3206c9c03bc17"},
+ {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49aaf4546f6023c44d7e7136be84a03a4237f0b2b5fb2b17c3e3770a758fc1a0"},
+ {file = "simplejson-3.19.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60848ab779195b72382841fc3fa4f71698a98d9589b0a081a9399904487b5832"},
+ {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:0436a70d8eb42bea4fe1a1c32d371d9bb3b62c637969cb33970ad624d5a3336a"},
+ {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:49e0e3faf3070abdf71a5c80a97c1afc059b4f45a5aa62de0c2ca0444b51669b"},
+ {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ff836cd4041e16003549449cc0a5e372f6b6f871eb89007ab0ee18fb2800fded"},
+ {file = "simplejson-3.19.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3848427b65e31bea2c11f521b6fc7a3145d6e501a1038529da2391aff5970f2f"},
+ {file = "simplejson-3.19.2-cp38-cp38-win32.whl", hash = "sha256:3f39bb1f6e620f3e158c8b2eaf1b3e3e54408baca96a02fe891794705e788637"},
+ {file = "simplejson-3.19.2-cp38-cp38-win_amd64.whl", hash = "sha256:0405984f3ec1d3f8777c4adc33eac7ab7a3e629f3b1c05fdded63acc7cf01137"},
+ {file = "simplejson-3.19.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:445a96543948c011a3a47c8e0f9d61e9785df2544ea5be5ab3bc2be4bd8a2565"},
+ {file = "simplejson-3.19.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4a8c3cc4f9dfc33220246760358c8265dad6e1104f25f0077bbca692d616d358"},
+ {file = "simplejson-3.19.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af9c7e6669c4d0ad7362f79cb2ab6784d71147503e62b57e3d95c4a0f222c01c"},
+ {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:064300a4ea17d1cd9ea1706aa0590dcb3be81112aac30233823ee494f02cb78a"},
+ {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9453419ea2ab9b21d925d0fd7e3a132a178a191881fab4169b6f96e118cc25bb"},
+ {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e038c615b3906df4c3be8db16b3e24821d26c55177638ea47b3f8f73615111c"},
+ {file = "simplejson-3.19.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16ca9c90da4b1f50f089e14485db8c20cbfff2d55424062791a7392b5a9b3ff9"},
+ {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1018bd0d70ce85f165185d2227c71e3b1e446186f9fa9f971b69eee223e1e3cd"},
+ {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e8dd53a8706b15bc0e34f00e6150fbefb35d2fd9235d095b4f83b3c5ed4fa11d"},
+ {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:2d022b14d7758bfb98405672953fe5c202ea8a9ccf9f6713c5bd0718eba286fd"},
+ {file = "simplejson-3.19.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:febffa5b1eda6622d44b245b0685aff6fb555ce0ed734e2d7b1c3acd018a2cff"},
+ {file = "simplejson-3.19.2-cp39-cp39-win32.whl", hash = "sha256:4edcd0bf70087b244ba77038db23cd98a1ace2f91b4a3ecef22036314d77ac23"},
+ {file = "simplejson-3.19.2-cp39-cp39-win_amd64.whl", hash = "sha256:aad7405c033d32c751d98d3a65801e2797ae77fac284a539f6c3a3e13005edc4"},
+ {file = "simplejson-3.19.2-py3-none-any.whl", hash = "sha256:bcedf4cae0d47839fee7de344f96b5694ca53c786f28b5f773d4f0b265a159eb"},
+ {file = "simplejson-3.19.2.tar.gz", hash = "sha256:9eb442a2442ce417801c912df68e1f6ccfcd41577ae7274953ab3ad24ef7d82c"},
+]
[[package]]
name = "six"
@@ -2048,6 +3260,17 @@ files = [
{file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
]
+[[package]]
+name = "sniffio"
+version = "1.3.0"
+description = "Sniff out which async library your code is running under"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
+ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
+]
+
[[package]]
name = "snowballstemmer"
version = "2.2.0"
@@ -2059,6 +3282,17 @@ files = [
{file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
]
+[[package]]
+name = "soupsieve"
+version = "2.5"
+description = "A modern CSS selector implementation for Beautiful Soup."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"},
+ {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"},
+]
+
[[package]]
name = "sphinx"
version = "6.2.1"
@@ -2096,45 +3330,54 @@ test = ["cython", "filelock", "html5lib", "pytest (>=4.6)"]
[[package]]
name = "sphinxcontrib-applehelp"
-version = "1.0.4"
+version = "1.0.7"
description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"},
- {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"},
+ {file = "sphinxcontrib_applehelp-1.0.7-py3-none-any.whl", hash = "sha256:094c4d56209d1734e7d252f6e0b3ccc090bd52ee56807a5d9315b19c122ab15d"},
+ {file = "sphinxcontrib_applehelp-1.0.7.tar.gz", hash = "sha256:39fdc8d762d33b01a7d8f026a3b7d71563ea3b72787d5f00ad8465bd9d6dfbfa"},
]
+[package.dependencies]
+Sphinx = ">=5"
+
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-devhelp"
-version = "1.0.2"
-description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document."
+version = "1.0.5"
+description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp documents"
optional = false
-python-versions = ">=3.5"
+python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"},
- {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"},
+ {file = "sphinxcontrib_devhelp-1.0.5-py3-none-any.whl", hash = "sha256:fe8009aed765188f08fcaadbb3ea0d90ce8ae2d76710b7e29ea7d047177dae2f"},
+ {file = "sphinxcontrib_devhelp-1.0.5.tar.gz", hash = "sha256:63b41e0d38207ca40ebbeabcf4d8e51f76c03e78cd61abe118cf4435c73d4212"},
]
+[package.dependencies]
+Sphinx = ">=5"
+
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-htmlhelp"
-version = "2.0.1"
+version = "2.0.4"
description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
optional = false
-python-versions = ">=3.8"
+python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"},
- {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"},
+ {file = "sphinxcontrib_htmlhelp-2.0.4-py3-none-any.whl", hash = "sha256:8001661c077a73c29beaf4a79968d0726103c5605e27db92b9ebed8bab1359e9"},
+ {file = "sphinxcontrib_htmlhelp-2.0.4.tar.gz", hash = "sha256:6c26a118a05b76000738429b724a0568dbde5b72391a688577da08f11891092a"},
]
+[package.dependencies]
+Sphinx = ">=5"
+
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["html5lib", "pytest"]
@@ -2155,43 +3398,49 @@ test = ["flake8", "mypy", "pytest"]
[[package]]
name = "sphinxcontrib-qthelp"
-version = "1.0.3"
-description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document."
+version = "1.0.6"
+description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp documents"
optional = false
-python-versions = ">=3.5"
+python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"},
- {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"},
+ {file = "sphinxcontrib_qthelp-1.0.6-py3-none-any.whl", hash = "sha256:bf76886ee7470b934e363da7a954ea2825650013d367728588732c7350f49ea4"},
+ {file = "sphinxcontrib_qthelp-1.0.6.tar.gz", hash = "sha256:62b9d1a186ab7f5ee3356d906f648cacb7a6bdb94d201ee7adf26db55092982d"},
]
+[package.dependencies]
+Sphinx = ">=5"
+
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["pytest"]
[[package]]
name = "sphinxcontrib-serializinghtml"
-version = "1.1.5"
-description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)."
+version = "1.1.9"
+description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)"
optional = false
-python-versions = ">=3.5"
+python-versions = ">=3.9"
files = [
- {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
- {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
+ {file = "sphinxcontrib_serializinghtml-1.1.9-py3-none-any.whl", hash = "sha256:9b36e503703ff04f20e9675771df105e58aa029cfcbc23b8ed716019b7416ae1"},
+ {file = "sphinxcontrib_serializinghtml-1.1.9.tar.gz", hash = "sha256:0c64ff898339e1fac29abd2bf5f11078f3ec413cfe9c046d3120d7ca65530b54"},
]
+[package.dependencies]
+Sphinx = ">=5"
+
[package.extras]
lint = ["docutils-stubs", "flake8", "mypy"]
test = ["pytest"]
[[package]]
name = "stack-data"
-version = "0.6.2"
+version = "0.6.3"
description = "Extract data from python stack frames and tracebacks for informative displays"
optional = false
python-versions = "*"
files = [
- {file = "stack_data-0.6.2-py3-none-any.whl", hash = "sha256:cbb2a53eb64e5785878201a97ed7c7b94883f48b87bfb0bbe8b623c74679e4a8"},
- {file = "stack_data-0.6.2.tar.gz", hash = "sha256:32d2dd0376772d01b6cb9fc996f3c8b57a357089dec328ed4b6553d037eaf815"},
+ {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"},
+ {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"},
]
[package.dependencies]
@@ -2202,6 +3451,48 @@ pure-eval = "*"
[package.extras]
tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"]
+[[package]]
+name = "tenacity"
+version = "8.2.3"
+description = "Retry code until it succeeds"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"},
+ {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"},
+]
+
+[package.extras]
+doc = ["reno", "sphinx", "tornado (>=4.5)"]
+
+[[package]]
+name = "threadpoolctl"
+version = "3.2.0"
+description = "threadpoolctl"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "threadpoolctl-3.2.0-py3-none-any.whl", hash = "sha256:2b7818516e423bdaebb97c723f86a7c6b0a83d3f3b0970328d66f4d9104dc032"},
+ {file = "threadpoolctl-3.2.0.tar.gz", hash = "sha256:c96a0ba3bdddeaca37dc4cc7344aafad41cdb8c313f74fdfe387a867bba93355"},
+]
+
+[[package]]
+name = "tifffile"
+version = "2023.9.26"
+description = "Read and write TIFF files"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "tifffile-2023.9.26-py3-none-any.whl", hash = "sha256:1de47fa945fddaade256e25ad4f375ae65547f3c1354063aded881c32a64cf89"},
+ {file = "tifffile-2023.9.26.tar.gz", hash = "sha256:67e355e4595aab397f8405d04afe1b4ae7c6f62a44e22d933fee1a571a48c7ae"},
+]
+
+[package.dependencies]
+numpy = "*"
+
+[package.extras]
+all = ["defusedxml", "fsspec", "imagecodecs (>=2023.8.12)", "lxml", "matplotlib", "zarr"]
+
[[package]]
name = "tomli"
version = "2.0.1"
@@ -2213,98 +3504,341 @@ files = [
{file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
]
+[[package]]
+name = "torch"
+version = "1.11.0+cpu"
+description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
+optional = true
+python-versions = ">=3.7.0"
+files = [
+ {file = "torch-1.11.0+cpu-cp310-cp310-linux_x86_64.whl", hash = "sha256:32fa00d974707c0183bc4dd0c1d69e853d0f15cc60f157b71ac5718847808943"},
+ {file = "torch-1.11.0+cpu-cp310-cp310-win_amd64.whl", hash = "sha256:bd984fa8676b2f7c9611b40af3a7c168fb90be3e29028219f822696bb357f472"},
+ {file = "torch-1.11.0+cpu-cp37-cp37m-linux_x86_64.whl", hash = "sha256:50008b82004b9d91e036cc199a57f863b6f8978b8a222176f9a4435fce181dd8"},
+ {file = "torch-1.11.0+cpu-cp37-cp37m-win_amd64.whl", hash = "sha256:7bbd8b77a59e628a7cb84289a3a26adc7e28dd7213c7f666537f26e714fb1721"},
+ {file = "torch-1.11.0+cpu-cp38-cp38-linux_x86_64.whl", hash = "sha256:22997df8f3a3f9faed40ef9e7964d1869cafa0317cc4a5b115bfdf69323e8884"},
+ {file = "torch-1.11.0+cpu-cp38-cp38-win_amd64.whl", hash = "sha256:0dbdddc7452a2c42250df369e4968b62589ab0ac1b9d14e27701eb4fc3839ad1"},
+ {file = "torch-1.11.0+cpu-cp39-cp39-linux_x86_64.whl", hash = "sha256:544c13ef120531ec2f28a3c858c06e600d514a6dfe09b4dd6fd0262088dd2fa3"},
+ {file = "torch-1.11.0+cpu-cp39-cp39-win_amd64.whl", hash = "sha256:7198bf5c69464459bd79526c6a4eaad2806db886443ee2f4e8e7a492bccf03ef"},
+]
+
+[package.dependencies]
+typing-extensions = "*"
+
+[package.source]
+type = "legacy"
+url = "https://download.pytorch.org/whl/cpu"
+reference = "torch_cpu"
+
+[[package]]
+name = "torchvision"
+version = "0.12.0+cpu"
+description = "image and video datasets and models for torch deep learning"
+optional = true
+python-versions = ">=3.7"
+files = [
+ {file = "torchvision-0.12.0+cpu-cp310-cp310-linux_x86_64.whl", hash = "sha256:1f4da68f79207e7204f77afeb8271b41e7f7b602c7a3f832c8103948c37a8a9a"},
+ {file = "torchvision-0.12.0+cpu-cp310-cp310-win_amd64.whl", hash = "sha256:adacab338dcc26690a56d6dbd8a7a7ac300b5df370231fd92c15f5be60c77352"},
+ {file = "torchvision-0.12.0+cpu-cp37-cp37m-linux_x86_64.whl", hash = "sha256:86eb9a798545e910e94af306c1802a84b1a0f090253d79ba2c6a3ce32e06bb80"},
+ {file = "torchvision-0.12.0+cpu-cp37-cp37m-win_amd64.whl", hash = "sha256:28d69e2be5823795926d098447bd49abdb686aecc33ec76b2669ee4e3167a9c3"},
+ {file = "torchvision-0.12.0+cpu-cp38-cp38-linux_x86_64.whl", hash = "sha256:f3e382460d0dc8752ab2b97f943da996f3254ee868bc9592259ab55d7e1a5852"},
+ {file = "torchvision-0.12.0+cpu-cp38-cp38-win_amd64.whl", hash = "sha256:9fcd952ae1195093a6c07a9763946fec2af869f0d4a5d0d0978f5dfddc7ea949"},
+ {file = "torchvision-0.12.0+cpu-cp39-cp39-linux_x86_64.whl", hash = "sha256:96b50c5dc8b63a5d40f07eef6f9d56a676e30dfd56b45a48c00ff60b4dd1b141"},
+ {file = "torchvision-0.12.0+cpu-cp39-cp39-win_amd64.whl", hash = "sha256:f4da5f047614f5353c0c3a3be602a021afbfacaded30fa5a41ae605cfc1545d6"},
+]
+
+[package.dependencies]
+numpy = "*"
+pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0"
+requests = "*"
+torch = "1.11.0"
+typing-extensions = "*"
+
+[package.extras]
+scipy = ["scipy"]
+
+[package.source]
+type = "legacy"
+url = "https://download.pytorch.org/whl/cpu"
+reference = "torch_cpu"
+
+[[package]]
+name = "tornado"
+version = "6.3.3"
+description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed."
+optional = false
+python-versions = ">= 3.8"
+files = [
+ {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:502fba735c84450974fec147340016ad928d29f1e91f49be168c0a4c18181e1d"},
+ {file = "tornado-6.3.3-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:805d507b1f588320c26f7f097108eb4023bbaa984d63176d1652e184ba24270a"},
+ {file = "tornado-6.3.3-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd19ca6c16882e4d37368e0152f99c099bad93e0950ce55e71daed74045908f"},
+ {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ac51f42808cca9b3613f51ffe2a965c8525cb1b00b7b2d56828b8045354f76a"},
+ {file = "tornado-6.3.3-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:71a8db65160a3c55d61839b7302a9a400074c9c753040455494e2af74e2501f2"},
+ {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ceb917a50cd35882b57600709dd5421a418c29ddc852da8bcdab1f0db33406b0"},
+ {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:7d01abc57ea0dbb51ddfed477dfe22719d376119844e33c661d873bf9c0e4a16"},
+ {file = "tornado-6.3.3-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:9dc4444c0defcd3929d5c1eb5706cbe1b116e762ff3e0deca8b715d14bf6ec17"},
+ {file = "tornado-6.3.3-cp38-abi3-win32.whl", hash = "sha256:65ceca9500383fbdf33a98c0087cb975b2ef3bfb874cb35b8de8740cf7f41bd3"},
+ {file = "tornado-6.3.3-cp38-abi3-win_amd64.whl", hash = "sha256:22d3c2fa10b5793da13c807e6fc38ff49a4f6e1e3868b0a6f4164768bb8e20f5"},
+ {file = "tornado-6.3.3.tar.gz", hash = "sha256:e7d8db41c0181c80d76c982aacc442c0783a2c54d6400fe028954201a2e032fe"},
+]
+
+[[package]]
+name = "tqdm"
+version = "4.66.1"
+description = "Fast, Extensible Progress Meter"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"},
+ {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
+notebook = ["ipywidgets (>=6)"]
+slack = ["slack-sdk"]
+telegram = ["requests"]
+
[[package]]
name = "traitlets"
-version = "5.9.0"
+version = "5.12.0"
description = "Traitlets Python configuration system"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"},
- {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"},
+ {file = "traitlets-5.12.0-py3-none-any.whl", hash = "sha256:81539f07f7aebcde2e4b5ab76727f53eabf18ad155c6ed7979a681411602fa47"},
+ {file = "traitlets-5.12.0.tar.gz", hash = "sha256:833273bf645d8ce31dcb613c56999e2e055b1ffe6d09168a164bcd91c36d5d35"},
]
[package.extras]
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
-test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"]
+test = ["argcomplete (>=3.0.3)", "mypy (>=1.6.0)", "pre-commit", "pytest (>=7.0,<7.5)", "pytest-mock", "pytest-mypy-testing"]
+
+[[package]]
+name = "transforms3d"
+version = "0.4.1"
+description = "Functions for 3D coordinate transformations"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "transforms3d-0.4.1-py3-none-any.whl", hash = "sha256:aea08776c1c915c8b424418994202aced8e46301c375ce63423d14f1d0045aa7"},
+ {file = "transforms3d-0.4.1.tar.gz", hash = "sha256:31c755266a0b0a222488b8d039f6f325cf486c52728c03e307ce047b2fad1179"},
+]
+
+[[package]]
+name = "trimesh"
+version = "3.23.5"
+description = "Import, export, process, analyze and view triangular meshes."
+optional = false
+python-versions = "*"
+files = [
+ {file = "trimesh-3.23.5-py3-none-any.whl", hash = "sha256:9cfc592c7ad6475ebfe51c90b0a1e686d627735cea6e6e18e40745be3ecfaab9"},
+ {file = "trimesh-3.23.5.tar.gz", hash = "sha256:bdfd669eccc4b3faff2328200a49408cd5ecad9f19b6022c4adb554bbb3a2621"},
+]
+
+[package.dependencies]
+numpy = "*"
+
+[package.extras]
+all = ["chardet", "colorlog", "embreex", "jsonschema", "lxml", "mapbox-earcut", "networkx", "pillow", "psutil", "pycollada", "pyglet (<2)", "python-fcl", "requests", "rtree", "scikit-image", "scipy", "setuptools", "shapely", "svg.path", "xatlas", "xxhash"]
+easy = ["chardet", "colorlog", "embreex", "jsonschema", "lxml", "mapbox-earcut", "networkx", "pillow", "pycollada", "requests", "rtree", "scipy", "setuptools", "shapely", "svg.path", "xxhash"]
+recommends = ["glooey", "meshio", "sympy"]
+test = ["autopep8 (<2)", "coveralls", "ezdxf", "pyinstrument", "pymeshlab", "pytest", "pytest-cov", "ruff"]
[[package]]
name = "typing-extensions"
-version = "4.5.0"
-description = "Backported and Experimental Type Hints for Python 3.7+"
+version = "4.8.0"
+description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
+files = [
+ {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"},
+ {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
+]
+
+[[package]]
+name = "tzdata"
+version = "2023.3"
+description = "Provider of IANA time zone data"
+optional = false
+python-versions = ">=2"
files = [
- {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"},
- {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"},
+ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"},
+ {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"},
]
[[package]]
name = "urllib3"
-version = "1.26.15"
+version = "2.0.7"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+python-versions = ">=3.7"
files = [
- {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"},
- {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"},
+ {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"},
+ {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
-secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
-socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"]
+socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
+zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "virtualenv"
-version = "20.21.0"
+version = "20.24.6"
description = "Virtual Python Environment builder"
optional = false
python-versions = ">=3.7"
files = [
- {file = "virtualenv-20.21.0-py3-none-any.whl", hash = "sha256:31712f8f2a17bd06234fa97fdf19609e789dd4e3e4bf108c3da71d710651adbc"},
- {file = "virtualenv-20.21.0.tar.gz", hash = "sha256:f50e3e60f990a0757c9b68333c9fdaa72d7188caa417f96af9e52407831a3b68"},
+ {file = "virtualenv-20.24.6-py3-none-any.whl", hash = "sha256:520d056652454c5098a00c0f073611ccbea4c79089331f60bf9d7ba247bb7381"},
+ {file = "virtualenv-20.24.6.tar.gz", hash = "sha256:02ece4f56fbf939dbbc33c0715159951d6bf14aaf5457b092e4548e1382455af"},
+]
+
+[package.dependencies]
+distlib = ">=0.3.7,<1"
+filelock = ">=3.12.2,<4"
+platformdirs = ">=3.9.1,<4"
+
+[package.extras]
+docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]
+test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"]
+
+[[package]]
+name = "vispy"
+version = "0.14.1"
+description = "Interactive visualization in Python"
+optional = true
+python-versions = ">=3.8"
+files = [
+ {file = "vispy-0.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:629274e84fd823ce81abe3024c2675323ccb8aab5a696556a0cce656d96397bb"},
+ {file = "vispy-0.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:27c100b647461e171b36090a6c68d852328d6b4e1fcc3a326a7c8119fed9b4b1"},
+ {file = "vispy-0.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f8872ec2d33e2f8579055aa37cd4427e2334ee9e6a2b6a1abc1d82c3f741f1c"},
+ {file = "vispy-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b828784549dfb5cbc6df02f3bc96d4d8cef49e752676ba12939f4400e0d6570"},
+ {file = "vispy-0.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:a4cc6b9e161051673909178892e4a30bbce1ef09782e03f90f39758d83b8694f"},
+ {file = "vispy-0.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a0ffcf972ca5d7578db8d065b00a223bfff734c6f04716ac24e8c37ee6508187"},
+ {file = "vispy-0.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e15f6448bd66ea62407e362d205a4b8cf81ecf201c626742cfd06c81d59703c1"},
+ {file = "vispy-0.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d6ff102fbb6578aa94eac893dcec95dc9d962d7fca4ec0e3e3b05a63ca8860e0"},
+ {file = "vispy-0.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62f6cdf1e318f1d64000d97727cd8f291f846cb7b4469f9302d42ecab451aa03"},
+ {file = "vispy-0.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:15b32d62ae648f439977f72173a580ba9ff922adfe156ecccccc77cc508c04c5"},
+ {file = "vispy-0.14.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d4850ab3f441e5d242338a1ceb778b76cbb0ccef004f2c44c5831bea3b68ae5a"},
+ {file = "vispy-0.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:15a4e155d27bbcffeb00f2ddff2c38d27d6bf6e0f1727361cc94a2726cc01d7f"},
+ {file = "vispy-0.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fcb84178ed7a3e24aba49c9a054405eddd87a7f6bdfaac90f57366213f3f5898"},
+ {file = "vispy-0.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4537a927365392470ab337eaf7e36511a748c24460fb7ae3c56a86512dca7feb"},
+ {file = "vispy-0.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:ac9bd77cc8ca01dd7b72280677f4b6e12623e901dffd71be776110dc34cb2a30"},
+ {file = "vispy-0.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:985718f1f17b07fd9848e31269465c81fb21cc2552e1ce965c50dbf5e466d6d7"},
+ {file = "vispy-0.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1f3fcae30a8b4b03c190e627f7af7f43e3ab9be43370d9697eed09c988d2ca4d"},
+ {file = "vispy-0.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa1234671e2a0232ee7b2a4e0d23c2c38eba7f1f92d6466f54f94ca94a7449ca"},
+ {file = "vispy-0.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9700e6b7e610276f41bfe2aaf34a856049ee5b7c43e03347b068f78042659451"},
+ {file = "vispy-0.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:a009cdcf2b6aa5bf3a4891988b3f4d92c05f0e68526b1fde727c358403a0aa12"},
+ {file = "vispy-0.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:60e1cd308f0c894faec68ba8c209f1e967f3c67e103c26768c47ea8f0d1b046b"},
+ {file = "vispy-0.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:364f6df46008bcea8bba8b7bc98be7c4d6803c32838bdea5d6ee46974ebd2436"},
+ {file = "vispy-0.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c430e2be26006a3ea30e775db88c1e6c02885d141f3243b2107c92793e4fc5b0"},
+ {file = "vispy-0.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:23ccbf235ef9bc654c49774062b9d7478b7c7fb9d21abbc64c9210ccf4821676"},
+ {file = "vispy-0.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:4f4f3f48ed18deed51578e5f05f9ecbe7a37f0e563d4b98fa159d5900bb83504"},
+ {file = "vispy-0.14.1.tar.gz", hash = "sha256:249a50979fc00a8b65109283354dcf12cf415c1a5dcf9821e113f6e590b9b93c"},
]
[package.dependencies]
-distlib = ">=0.3.6,<1"
-filelock = ">=3.4.1,<4"
-platformdirs = ">=2.4,<4"
+freetype-py = "*"
+hsluv = "*"
+kiwisolver = "*"
+numpy = "*"
+packaging = "*"
[package.extras]
-docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=22.12)"]
-test = ["covdefaults (>=2.2.2)", "coverage (>=7.1)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23)", "pytest (>=7.2.1)", "pytest-env (>=0.8.1)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.10)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)"]
+doc = ["myst-parser", "numpydoc", "pillow", "pydata-sphinx-theme", "pyopengl", "pytest", "sphinx-gallery", "sphinxcontrib-apidoc"]
+io = ["Pillow", "meshio"]
+ipython-static = ["ipython"]
+pyglet = ["pyglet (>=1.2)"]
+pyqt5 = ["pyqt5"]
+pyqt6 = ["pyqt6"]
+pyside = ["PySide"]
+pyside2 = ["PySide2"]
+pyside6 = ["PySide6"]
+sdl2 = ["PySDL2"]
+tk = ["pyopengltk"]
+wx = ["wxPython"]
[[package]]
name = "wcwidth"
-version = "0.2.6"
+version = "0.2.8"
description = "Measures the displayed width of unicode strings in a terminal"
optional = false
python-versions = "*"
files = [
- {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"},
- {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"},
+ {file = "wcwidth-0.2.8-py2.py3-none-any.whl", hash = "sha256:77f719e01648ed600dfa5402c347481c0992263b81a027344f3e1ba25493a704"},
+ {file = "wcwidth-0.2.8.tar.gz", hash = "sha256:8705c569999ffbb4f6a87c6d1b80f324bd6db952f5eb0b95bc07517f4c1813d4"},
+]
+
+[[package]]
+name = "webdataset"
+version = "0.2.65"
+description = "Record sequential storage for deep learning."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "webdataset-0.2.65-py3-none-any.whl", hash = "sha256:37e0d2e0638dd93be74b485def01f56400422826f5dac5b3b1ca3c2789d445ee"},
+ {file = "webdataset-0.2.65.tar.gz", hash = "sha256:5c8b3b5c98957ea70255dd63321dfe5372a12a943489679a37d7e013c471bd8b"},
+]
+
+[package.dependencies]
+braceexpand = "*"
+numpy = "*"
+pyyaml = "*"
+
+[[package]]
+name = "werkzeug"
+version = "2.2.3"
+description = "The comprehensive WSGI web application library."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Werkzeug-2.2.3-py3-none-any.whl", hash = "sha256:56433961bc1f12533306c624f3be5e744389ac61d722175d543e1751285da612"},
+ {file = "Werkzeug-2.2.3.tar.gz", hash = "sha256:2e1ccc9417d4da358b9de6f174e3ac094391ea1d4fbef2d667865d819dfd0afe"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.1.1"
+
+[package.extras]
+watchdog = ["watchdog"]
+
+[[package]]
+name = "xyzservices"
+version = "2023.10.0"
+description = "Source of XYZ tiles providers"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "xyzservices-2023.10.0-py3-none-any.whl", hash = "sha256:70b9910f6c8e46f6ca92dea21e9b8cf89edf0ead35a870198fb59a7d63579525"},
+ {file = "xyzservices-2023.10.0.tar.gz", hash = "sha256:eee203e91955782fd8bfc2f05846830c289139dc0ab4eaf733bfa8f0be71861f"},
]
[[package]]
name = "zipp"
-version = "3.15.0"
+version = "3.17.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"},
- {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"},
+ {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"},
+ {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"},
]
[package.extras]
-docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
[extras]
+cpu = ["torch", "torchvision"]
+evaluation = ["bop_toolkit_lib"]
+multiview = ["cosypose"]
render = ["panda3d", "pybullet"]
[metadata]
lock-version = "2.0"
-python-versions = "^3.8"
-content-hash = "1da69d66568c3b8dd6f0745a216ec1dbda76fb5f4bd4e9f4a4878640590e25c9"
+python-versions = ">=3.9,<3.11"
+content-hash = "98e8efd9ad682e321c6d18632d7fbd548e1589d54b48b040726ab8f0d17ac4cb"
diff --git a/pyproject.toml b/pyproject.toml
index 297bfbe9..a068e7a4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -24,39 +24,87 @@ readme = "README.md"
version = "0.1.0"
[tool.poetry.dependencies]
+addict = "^2.4.0"
+beautifulsoup4 = "^4.12.2"
+bokeh = "^3.2.2"
+bop_toolkit_lib = {git = "https://github.com/thodan/bop_toolkit", optional = true}
+cosypose = {optional = true, path = "happypose/pose_estimators/cosypose"}
+httpx = "^0.25.0"
+imageio = "^2.31.4"
+ipython = "^8.12.2"
+joblib = "^1.3.2"
+omegaconf = "^2.3.0"
+open3d = "^0.17.0"
+opencv-contrib-python = "^4.8.0.76"
+opencv-python = "^4.8.0.76"
panda3d = {optional = true, version = "=1.10.13"}
-pin = "^2.6.17"
+pin = "^2.6.20"
+plyfile = "^1.0.1"
+pybind11 = "^2.10.4"
pybullet = {optional = true, version = "^3.2.5"}
-python = "^3.8"
+pypng = "^0.20220715.0"
+python = ">=3.9,<3.11"
+pyyaml = "^6.0.1"
+roma = "^1.4.0"
+scikit-image = "^0.21.0"
+scikit-learn = "^1.3.1"
+scipy = "^1.11.2"
+seaborn = "^0.12.2"
+simplejson = "^3.19.1"
+torch = [
+ {markers = "extra=='cpu'", optional = true, source = "torch_cpu", version = "1.11.0+cpu"}
+]
+torchvision = [
+ {markers = "extra=='cpu'", optional = true, source = "torch_cpu", version = "0.12.0+cpu"}
+]
+tqdm = "^4.66.1"
+transforms3d = "^0.4.1"
+trimesh = "^3.23.5"
+webdataset = "^0.2.57"
[tool.poetry.extras]
+cpu = ["torch", "torchvision"]
+evaluation = ["bop_toolkit_lib"]
+multiview = ["cosypose"]
render = ["panda3d", "pybullet"]
[tool.poetry.group.dev]
optional = true
[tool.poetry.group.dev.dependencies]
-black = "^23.3.0"
+black = "^23.9.1"
coverage = {extras = ["toml"], version = "^7.2.5"}
-ipython = "^8.12.2"
pre-commit = "^3.2.1"
-ruff = "^0.0.260"
+ruff = "^0.0.290"
[tool.poetry.group.docs]
optional = true
[tool.poetry.group.docs.dependencies]
-myst-parser = "^1.0.0"
+myst-parser = "^2.0.0"
sphinx = "^6.2.1"
+[[tool.poetry.source]]
+name = "torch_cpu"
+priority = "supplemental"
+url = "https://download.pytorch.org/whl/cpu"
+
+[[tool.poetry.source]]
+name = "torch_cu113"
+priority = "supplemental"
+url = "https://download.pytorch.org/whl/cu113"
+
+[[tool.poetry.source]]
+name = "PyPI"
+priority = "primary"
+
[tool.poetry.urls]
changelog = "https://github.com/agimus-project/happypose/blob/main/CHANGELOG.md"
[tool.ruff]
-extend-exclude = ["cosypose", "megapose6d"]
extend-ignore = ["D203", "D213"]
-extend-select = ["A", "B", "C", "COM", "D", "EM", "EXE", "G", "N", "PTH", "RET", "RUF", "UP", "W", "YTT"]
-target-version = "py38"
+# extend-select = ["A", "B", "C", "COM", "D", "EM", "EXE", "G", "N", "PTH", "RET", "RUF", "UP", "W", "YTT"]
+target-version = "py39"
[tool.tomlsort]
all = true
diff --git a/rclone.conf b/rclone.conf
deleted file mode 100644
index b914a7bd..00000000
--- a/rclone.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-[happypose]
-type = http
-url = https://www.paris.inria.fr/archive_ylabbeprojectsdata/
-
diff --git a/tests/test_cosypose_inference.py b/tests/test_cosypose_inference.py
index d32f929c..358ba33a 100644
--- a/tests/test_cosypose_inference.py
+++ b/tests/test_cosypose_inference.py
@@ -1,35 +1,35 @@
"""Set of unit tests for testing inference example for CosyPose."""
import unittest
+import numpy as np
+import pinocchio as pin
import torch
import yaml
-import numpy as np
-
from PIL import Image
-from pathlib import Path
-import pinocchio as pin
-
-
-from happypose.toolbox.datasets.bop_object_datasets import BOPObjectDataset
-from happypose.toolbox.datasets.scene_dataset import CameraData
-from happypose.toolbox.inference.types import ObservationTensor
-from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
-from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
-from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR
+from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR, LOCAL_DATA_DIR
from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector
+from happypose.pose_estimators.cosypose.cosypose.integrated.pose_estimator import (
+ PoseEstimator,
+)
from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import (
- create_model_detector,
check_update_config as check_update_config_detector,
)
+from happypose.pose_estimators.cosypose.cosypose.training.detector_models_cfg import (
+ create_model_detector,
+)
from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
- create_model_refiner,
- create_model_coarse,
check_update_config as check_update_config_pose,
)
-from happypose.pose_estimators.cosypose.cosypose.integrated.pose_estimator import (
- PoseEstimator,
+from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import (
+ create_model_coarse,
+ create_model_refiner,
)
+from happypose.toolbox.datasets.bop_object_datasets import BOPObjectDataset
+from happypose.toolbox.datasets.scene_dataset import CameraData
+from happypose.toolbox.inference.types import ObservationTensor
+from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase
+from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer
class TestCosyPoseInference(unittest.TestCase):
@@ -37,13 +37,15 @@ class TestCosyPoseInference(unittest.TestCase):
@staticmethod
def _load_detector(
- device="cpu", ds_name="ycbv", run_id="detector-bop-ycbv-pbr--970850"
+ device="cpu",
+ ds_name="ycbv",
+ run_id="detector-bop-ycbv-pbr--970850",
):
"""Load CosyPose detector."""
run_dir = EXP_DIR / run_id
assert run_dir.exists(), "The run_id is invalid, or you forget to download data"
cfg = check_update_config_detector(
- yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.UnsafeLoader)
+ yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.UnsafeLoader),
)
label_to_category_id = cfg.label_to_category_id
ckpt = torch.load(run_dir / "checkpoint.pth.tar", map_location=device)[
@@ -58,7 +60,7 @@ def _load_detector(
@staticmethod
def _load_pose_model(run_id, renderer, mesh_db, device):
- """Load either coarse or refiner model (decided based on run_id/config)"""
+ """Load either coarse or refiner model (decided based on run_id/config)."""
run_dir = EXP_DIR / run_id
cfg = yaml.load((run_dir / "config.yaml").read_text(), Loader=yaml.UnsafeLoader)
cfg = check_update_config_pose(cfg)
@@ -83,24 +85,26 @@ def _load_pose_models(
):
"""Load coarse and refiner for the crackers example renderer."""
object_dataset = BOPObjectDataset(
- Path(__file__).parent / "data" / "crackers_example" / "models",
+ LOCAL_DATA_DIR / "examples" / "crackers_example" / "models",
label_format="ycbv-{label}",
)
renderer = Panda3dBatchRenderer(
- object_dataset, n_workers=n_workers, preload_cache=False
+ object_dataset,
+ n_workers=n_workers,
+ preload_cache=False,
)
mesh_db = MeshDataBase.from_object_ds(object_dataset)
mesh_db_batched = mesh_db.batched().to(device)
- kwargs = dict(renderer=renderer, mesh_db=mesh_db_batched, device=device)
+ kwargs = {"renderer": renderer, "mesh_db": mesh_db_batched, "device": device}
coarse_model = TestCosyPoseInference._load_pose_model(coarse_run_id, **kwargs)
refiner_model = TestCosyPoseInference._load_pose_model(refiner_run_id, **kwargs)
return coarse_model, refiner_model
@staticmethod
def _load_crackers_example_observation():
- """Load cracker example observation tensor"""
- data_dir = Path(__file__).parent.joinpath("data").joinpath("crackers_example")
+ """Load cracker example observation tensor."""
+ data_dir = LOCAL_DATA_DIR / "examples" / "crackers_example"
camera_data = CameraData.from_json((data_dir / "camera_data.json").read_text())
rgb = np.array(Image.open(data_dir / "image_rgb.png"), dtype=np.uint8)
assert rgb.shape[:2] == camera_data.resolution
@@ -133,7 +137,7 @@ def test_detector(self):
self.assertFalse(xmin < 500 < xmax and ymin < 450 < ymax)
def test_cosypose_pipeline(self):
- """Run detector with coarse and refiner"""
+ """Run detector with coarse and refiner."""
observation = self._load_crackers_example_observation()
detector = self._load_detector()
coarse_model, refiner_model = self._load_pose_models()
@@ -143,7 +147,9 @@ def test_cosypose_pipeline(self):
detector_model=detector,
)
preds, _ = pose_estimator.run_inference_pipeline(
- observation=observation, detection_th=0.8, run_detector=True
+ observation=observation,
+ detection_th=0.8,
+ run_detector=True,
)
self.assertEqual(len(preds), 1)
@@ -151,7 +157,8 @@ def test_cosypose_pipeline(self):
pose = pin.SE3(preds.poses[0].numpy())
exp_pose = pin.SE3(
- pin.exp3(np.array([1.44, 1.19, -0.91])), np.array([0, 0, 0.52])
+ pin.exp3(np.array([1.44, 1.19, -0.91])),
+ np.array([0, 0, 0.52]),
)
diff = pose.inverse() * exp_pose
self.assertLess(np.linalg.norm(pin.log6(diff).vector), 0.1)
diff --git a/tests/test_megapose_inference.py b/tests/test_megapose_inference.py
index c723ca07..bc5dae4a 100644
--- a/tests/test_megapose_inference.py
+++ b/tests/test_megapose_inference.py
@@ -3,18 +3,19 @@
import numpy as np
import pinocchio as pin
-from pathlib import Path
-from .test_cosypose_inference import TestCosyPoseInference
+from happypose.pose_estimators.cosypose.cosypose.config import LOCAL_DATA_DIR
from happypose.toolbox.datasets.bop_object_datasets import BOPObjectDataset
from happypose.toolbox.utils.load_model import NAMED_MODELS, load_named_model
+from .test_cosypose_inference import TestCosyPoseInference
+
class TestMegaPoseInference(unittest.TestCase):
"""Unit tests for MegaPose inference example."""
def test_megapose_pipeline(self):
- """Run detector from CosyPose with coarse and refiner from MegaPose"""
+ """Run detector from CosyPose with coarse and refiner from MegaPose."""
observation = TestCosyPoseInference._load_crackers_example_observation()
detector = TestCosyPoseInference._load_detector()
@@ -24,7 +25,7 @@ def test_megapose_pipeline(self):
detections = detections[:1] # let's keep the most promising one only.
object_dataset = BOPObjectDataset(
- Path(__file__).parent / "data" / "crackers_example" / "models",
+ LOCAL_DATA_DIR / "examples" / "crackers_example" / "models",
label_format="ycbv-{label}",
)
@@ -33,7 +34,9 @@ def test_megapose_pipeline(self):
# let's limit the grid, 278 is the most promising one, 477 the least one
pose_estimator._SO3_grid = pose_estimator._SO3_grid[[278, 477]]
preds, data = pose_estimator.run_inference_pipeline(
- observation, detections=detections, **model_info["inference_parameters"]
+ observation,
+ detections=detections,
+ **model_info["inference_parameters"],
)
scores = data["coarse"]["data"]["logits"]
@@ -44,7 +47,8 @@ def test_megapose_pipeline(self):
pose = pin.SE3(preds.poses[0].numpy())
exp_pose = pin.SE3(
- pin.exp3(np.array([1.44, 1.19, -0.91])), np.array([0, 0, 0.52])
+ pin.exp3(np.array([1.44, 1.19, -0.91])),
+ np.array([0, 0, 0.52]),
)
diff = pose.inverse() * exp_pose
self.assertLess(np.linalg.norm(pin.log6(diff).vector), 0.3)
diff --git a/tests/test_omega_conf.py b/tests/test_omega_conf.py
new file mode 100644
index 00000000..8beb6fb6
--- /dev/null
+++ b/tests/test_omega_conf.py
@@ -0,0 +1,28 @@
+import unittest
+
+from omegaconf import OmegaConf
+
+from happypose.pose_estimators.megapose.evaluation.eval_config import (
+ BOPEvalConfig,
+ EvalConfig,
+ FullEvalConfig,
+ HardwareConfig,
+)
+from happypose.pose_estimators.megapose.inference.types import InferenceConfig
+
+
+class TestOmegaConf(unittest.TestCase):
+ """
+ Check if megapose config dataclasses are valid.
+ """
+
+ def test_valid_dataclasses(self):
+ OmegaConf.structured(BOPEvalConfig)
+ OmegaConf.structured(HardwareConfig)
+ OmegaConf.structured(InferenceConfig)
+ OmegaConf.structured(EvalConfig)
+ OmegaConf.structured(FullEvalConfig)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/test_renderer_panda3d.py b/tests/test_renderer_panda3d.py
index 82a1ba09..7199150b 100644
--- a/tests/test_renderer_panda3d.py
+++ b/tests/test_renderer_panda3d.py
@@ -1,16 +1,17 @@
"""Set of unit tests for Panda3D renderer."""
import unittest
from pathlib import Path
+
import numpy as np
from numpy.testing import assert_equal
-from happypose.toolbox.datasets.object_dataset import RigidObjectDataset, RigidObject
+from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset
from happypose.toolbox.lib3d.transform import Transform
from happypose.toolbox.renderer.panda3d_scene_renderer import Panda3dSceneRenderer
from happypose.toolbox.renderer.types import (
- Panda3dObjectData,
Panda3dCameraData,
Panda3dLightData,
+ Panda3dObjectData,
)
@@ -26,15 +27,17 @@ def test_simple_render(self):
label="obj",
mesh_path=Path(__file__).parent.joinpath("data/obj_000001.ply"),
mesh_units="mm",
- )
- ]
- )
+ ),
+ ],
+ ),
)
object_datas = [
Panda3dObjectData(
- label="obj", TWO=Transform((0, 0, 0, 1), (0, 0, 1)), color=(1, 0, 0, 1)
- )
+ label="obj",
+ TWO=Transform((0, 0, 0, 1), (0, 0, 1)),
+ color=(1, 0, 0, 1),
+ ),
]
camera_datas = [
Panda3dCameraData(
@@ -43,10 +46,10 @@ def test_simple_render(self):
[600.0, 0.0, 160.0],
[0.0, 600.0, 160.0],
[0.0, 0.0, 1.0],
- ]
+ ],
),
resolution=(320, 320),
- )
+ ),
]
light_datas = [