Skip to content

Commit

Permalink
Merge pull request #118 from agimus-project/py38
Browse files Browse the repository at this point in the history
fix type hints for python 3.8 compatibility
  • Loading branch information
nim65s authored Jan 12, 2024
2 parents 5a07e70 + dddd0a9 commit f5f1faf
Show file tree
Hide file tree
Showing 55 changed files with 258 additions and 245 deletions.
3 changes: 2 additions & 1 deletion experiments/generate_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import time
import typing as tp
from dataclasses import dataclass
from typing import List

import hydra
import numpy as np
Expand All @@ -15,7 +16,7 @@
@dataclass
class DatasetGenerationConfig:
dataset_id: str
chunk_ids: tp.Optional[list[int]]
chunk_ids: tp.Optional[List[int]]
debug: bool = False
verbose: bool = True
overwrite: bool = False
Expand Down
7 changes: 4 additions & 3 deletions experiments/job-runner/job_runner/configs.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import typing as tp
from dataclasses import dataclass
from typing import Dict, List

from hydra.core.config_store import ConfigStore

Expand Down Expand Up @@ -31,21 +32,21 @@ class SlurmJobConfig(JobConfig):
account: str
qos: str
time: str
additional_parameters: tp.Optional[dict[str, tp.Any]]
additional_parameters: tp.Optional[Dict[str, tp.Any]]


@dataclass
class CodeSnapshotConfig:
snapshot_dir: tp.Optional[str]
exclude_path: tp.Optional[str]
python_packages_dir: tp.Optional[list[str]] = None
python_packages_dir: tp.Optional[List[str]] = None


@dataclass
class JobEnvironmentConfig:
conda_env: str
code_snapshot: tp.Optional[CodeSnapshotConfig] = None
env: tp.Optional[dict[str, str]] = None
env: tp.Optional[Dict[str, str]] = None


@dataclass
Expand Down
5 changes: 3 additions & 2 deletions experiments/job-runner/job_runner/utils.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import pathlib
import typing as tp
from typing import List

import submitit
from job_runner.configs import JobEnvironmentConfig, RunnerConfig


def make_setup(cfg: JobEnvironmentConfig) -> list[str]:
def make_setup(cfg: JobEnvironmentConfig) -> List[str]:
setup = []
if cfg.env:
for k, v in cfg.env.items():
Expand All @@ -14,7 +15,7 @@ def make_setup(cfg: JobEnvironmentConfig) -> list[str]:


def make_snapshots(
code_directories: list[pathlib.Path],
code_directories: List[pathlib.Path],
output_dir: pathlib.Path,
exclude: tp.Sequence[str] = (),
):
Expand Down
9 changes: 5 additions & 4 deletions experiments/make_shapenet_ids.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,15 @@
import typing as tp
from collections import deque
from dataclasses import dataclass
from typing import Dict, List


@dataclass
class ShapeNetSynset:
id: str
name: str
parents: list[str]
children: list[str]
parents: List[str]
children: List[str]


@dataclass
Expand All @@ -26,7 +27,7 @@ def read_models(shapenet_dir):
# TODO: This probably has issues / is poorly implemented and very slow
taxonomy = json.load(open(shapenet_dir / "taxonomy.json"))

id_to_synset: dict[int, ShapeNetSynset] = {}
id_to_synset: Dict[int, ShapeNetSynset] = {}

for synset in taxonomy:
synset_id = synset["synsetId"]
Expand Down Expand Up @@ -55,7 +56,7 @@ def get_names(synset_id, id_to_synset):
return names

models_path = shapenet_dir.glob("**/**/models/model_normalized.obj")
models: list[dict[str, tp.Union[int, str]]] = []
models: List[Dict[str, tp.Union[int, str]]] = []
for n, model_path in enumerate(models_path):
source_id = model_path.parent.parent.name
synset_id = model_path.parent.parent.parent.name
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Standard Library
from pathlib import Path
from typing import Any, Optional
from typing import Any, Dict, Optional

# Third Party
import torch
Expand Down Expand Up @@ -157,7 +157,7 @@ def get_save_dir(cfg: EvalConfig) -> Path:
def run_eval(
cfg: EvalConfig,
save_dir: Optional[Path] = None,
) -> dict[str, Any]:
) -> Dict[str, Any]:
"""Run eval for a single setting on a single dataset.
A single setting is a (detection_type, coarse_estimation_type) such
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
# Standard Library
import time
from collections import defaultdict
from typing import Optional
from typing import Dict, Optional

# Third Party
import numpy as np
Expand Down Expand Up @@ -84,7 +84,7 @@ def run_inference_pipeline(
obs_tensor: ObservationTensor,
gt_detections: DetectionsType,
initial_estimates: Optional[PoseEstimatesType] = None,
) -> dict[str, PoseEstimatesType]:
) -> Dict[str, PoseEstimatesType]:
"""Runs inference pipeline, extracts the results.
Returns: A dict with keys
Expand Down Expand Up @@ -160,7 +160,7 @@ def run_inference_pipeline(
def get_predictions(
self,
pose_estimator: PoseEstimator,
) -> dict[str, PoseEstimatesType]:
) -> Dict[str, PoseEstimatesType]:
"""Runs predictions.
Returns: A dict with keys
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import time
from collections import defaultdict
from typing import Any, Optional
from typing import Any, Optional, Tuple

import numpy as np
import torch
Expand Down Expand Up @@ -146,7 +146,7 @@ def run_inference_pipeline(
coarse_estimates: Optional[PoseEstimatesType] = None,
detection_th: float = 0.7,
mask_th: float = 0.8,
) -> tuple[PoseEstimatesType, dict]:
) -> Tuple[PoseEstimatesType, dict]:
timing_str = ""
timer = SimpleTimer()
timer.start()
Expand Down Expand Up @@ -248,7 +248,7 @@ def forward_coarse_model(
n_iterations: int = 5,
keep_all_outputs: bool = False,
cuda_timer: bool = False,
) -> tuple[dict, dict]:
) -> Tuple[dict, dict]:
"""Runs the refiner model for the specified number of iterations.
Will actually use the batched_model_predictions to stay within
Expand Down Expand Up @@ -357,7 +357,7 @@ def forward_refiner(
n_iterations: int = 5,
keep_all_outputs: bool = False,
cuda_timer: bool = False,
) -> tuple[dict, dict]:
) -> Tuple[dict, dict]:
"""Runs the refiner model for the specified number of iterations.
Will actually use the batched_model_predictions to stay within
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from collections import defaultdict
from typing import Tuple

import torch
from torch.utils.data import DataLoader, TensorDataset
Expand Down Expand Up @@ -133,7 +134,7 @@ def forward_coarse_model(
K,
data_TCO_init,
n_coarse_iterations,
) -> tuple[PoseEstimatesType, dict]:
) -> Tuple[PoseEstimatesType, dict]:
return self.batched_model_predictions(
self.coarse_model,
images,
Expand All @@ -148,7 +149,7 @@ def forward_refiner(
K,
data_TCO,
n_refiner_iterations,
) -> tuple[dict, dict]:
) -> Tuple[dict, dict]:
return self.batched_model_predictions(
self.refiner_model,
images,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import copy
import os
from pathlib import Path
from typing import Dict, Tuple

# Third Party
from omegaconf import OmegaConf
Expand Down Expand Up @@ -68,7 +69,7 @@ def create_eval_cfg(
detection_type: str,
coarse_estimation_type: str,
ds_name: str,
) -> tuple[str, EvalConfig]:
) -> Tuple[str, EvalConfig]:
cfg = copy.deepcopy(cfg)

cfg.inference.detection_type = detection_type
Expand Down Expand Up @@ -107,7 +108,7 @@ def run_full_eval(cfg: FullEvalConfig) -> None:
# Iterate over each dataset
for ds_name in cfg.ds_names:
# create the EvalConfig objects that we will call `run_eval` on
eval_configs: dict[str, EvalConfig] = {}
eval_configs: Dict[str, EvalConfig] = {}
for detection_type, coarse_estimation_type in cfg.detection_coarse_types:
name, cfg_ = create_eval_cfg(
cfg,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
########################
# Add cosypose to my path -> dirty
from pathlib import Path
from typing import Union
from typing import Tuple, Union

# Third Party
import numpy as np
Expand Down Expand Up @@ -47,7 +47,7 @@
def load_observation(
example_dir: Path,
load_depth: bool = False,
) -> tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
) -> Tuple[np.ndarray, Union[None, np.ndarray], CameraData]:
camera_data = CameraData.from_json((example_dir / "camera_data.json").read_text())

rgb = np.array(Image.open(example_dir / "image_rgb.png"), dtype=np.uint8)
Expand Down
4 changes: 2 additions & 2 deletions happypose/pose_estimators/megapose/evaluation/data_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@


# Standard Library
from typing import Optional
from typing import List, Optional

# Third Party
import numpy as np
Expand All @@ -30,7 +30,7 @@

def parse_obs_data(
obs: SceneObservation,
object_labels: Optional[list[str]] = None,
object_labels: Optional[List[str]] = None,
) -> PandasTensorCollection:
"""Parses object data into PandasTensorCollection.
Expand Down
6 changes: 3 additions & 3 deletions happypose/pose_estimators/megapose/evaluation/eval_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

# Standard Library
from dataclasses import dataclass
from typing import Optional
from typing import List, Optional

# MegaPose
from happypose.pose_estimators.megapose.inference.types import InferenceConfig
Expand Down Expand Up @@ -85,8 +85,8 @@ class EvalConfig:
@dataclass
class FullEvalConfig(EvalConfig):
# Full eval
detection_coarse_types: Optional[list] = None
ds_names: Optional[list[str]] = None
detection_coarse_types: Optional[List] = None
ds_names: Optional[List[str]] = None
run_bop_eval: bool = True
eval_coarse_also: bool = False
convert_only: bool = False
Expand Down
4 changes: 2 additions & 2 deletions happypose/pose_estimators/megapose/evaluation/evaluation.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

# Standard Library
from pathlib import Path
from typing import Any, Optional
from typing import Any, Dict, Optional

# Third Party
import torch
Expand Down Expand Up @@ -80,7 +80,7 @@ def get_save_dir(cfg: EvalConfig) -> Path:
def run_eval(
cfg: EvalConfig,
save_dir: Optional[Path] = None,
) -> dict[str, Any]:
) -> Dict[str, Any]:
"""Run eval for a single setting on a single dataset.
A single setting is a (detection_type, coarse_estimation_type) such
Expand Down
4 changes: 2 additions & 2 deletions happypose/pose_estimators/megapose/inference/depth_refiner.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

# Standard Library
from abc import ABC, abstractmethod
from typing import Optional
from typing import Optional, Tuple

# Third Party
import torch
Expand All @@ -33,7 +33,7 @@ def refine_poses(
masks: Optional[torch.tensor] = None,
depth: Optional[torch.tensor] = None,
K: Optional[torch.tensor] = None,
) -> tuple[PoseEstimatesType, dict]:
) -> Tuple[PoseEstimatesType, dict]:
"""Run the depth refinement.
Args:
Expand Down
4 changes: 2 additions & 2 deletions happypose/pose_estimators/megapose/inference/icp_refiner.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@


# Standard Library
from typing import Optional
from typing import Dict, Optional, Tuple

# Third Party
import cv2
Expand Down Expand Up @@ -236,7 +236,7 @@ def refine_poses(
masks: Optional[torch.tensor] = None,
depth: Optional[torch.tensor] = None,
K: Optional[torch.tensor] = None,
) -> tuple[PoseEstimatesType, dict]:
) -> Tuple[PoseEstimatesType, Dict]:
"""Runs icp refinement. See superclass DepthRefiner for full documentation."""
assert depth is not None
assert K is not None
Expand Down
Loading

0 comments on commit f5f1faf

Please sign in to comment.