From 8ddfae8d7ad7cf679f90979339b9aaa42d93035b Mon Sep 17 00:00:00 2001 From: Guilhem Saurel Date: Tue, 17 Oct 2023 16:07:09 +0200 Subject: [PATCH] manual ruff fixes --- .../cosypose/evaluation/evaluation.py | 19 ++- .../evaluation/pred_runner/bop_predictions.py | 3 +- .../cosypose/evaluation/prediction_runner.py | 9 +- .../cosypose/cosypose/integrated/detector.py | 2 - .../cosypose/integrated/icp_refiner.py | 8 +- .../cosypose/integrated/pose_estimator.py | 3 +- .../cosypose/integrated/pose_predictor.py | 3 +- .../cosypose/lib3d/rigid_mesh_database.py | 6 +- .../cosypose/cosypose/models/efficientnet.py | 17 ++- .../cosypose/models/efficientnet_utils.py | 20 ++- .../cosypose/cosypose/models/wide_resnet.py | 3 +- .../cosypose/multiview/bundle_adjustment.py | 3 +- .../scripts/make_ycbv_compat_models.py | 3 +- .../cosypose/scripts/run_bop_inference.py | 6 +- .../scripts/run_colmap_reconstruction.py | 2 +- .../cosypose/scripts/run_cosypose_eval.py | 81 +++++++++--- .../cosypose/scripts/run_custom_scenario.py | 9 +- .../cosypose/scripts/run_dataset_recording.py | 2 +- .../cosypose/scripts/run_detection_eval.py | 8 +- .../scripts/run_inference_on_example.py | 13 +- .../cosypose/scripts/test_render_objects.py | 2 +- .../cosypose/cosypose/simulator/camera.py | 3 +- .../cosypose/cosypose/training/train_pose.py | 12 +- .../cosypose/utils/colmap_read_write_model.py | 11 +- .../cosypose/utils/cosypose_wrapper.py | 15 ++- .../cosypose/visualization/multiview.py | 2 +- .../cosypose/visualization/singleview.py | 2 +- .../megapose/evaluation/bop.py | 15 +-- .../megapose/evaluation/eval_config.py | 3 +- .../megapose/evaluation/evaluation.py | 9 +- .../megapose/evaluation/prediction_runner.py | 9 +- .../megapose/inference/pose_estimator.py | 10 +- .../megapose/inference/teaserpp_refiner.py | 4 +- .../megapose/models/pose_rigid.py | 7 +- .../pose_estimators/megapose/models/resnet.py | 29 +++-- .../megapose/models/torchvision_resnet.py | 30 +++-- .../megapose/models/wide_resnet.py | 3 +- .../megapose/scripts/generate_shapenet_pbr.py | 118 ++++++++++-------- .../scripts/make_shapenet_statistics.py | 20 +-- .../scripts/run_inference_on_datasettemp.py | 4 +- .../megapose/scripts/run_megapose_training.py | 3 +- .../training/megapose_forward_loss.py | 5 +- .../megapose/training/train_megapose.py | 3 +- .../megapose/training/training_config.py | 3 +- .../megapose/training/utils.py | 14 ++- happypose/toolbox/datasets/datasets_cfg.py | 2 +- happypose/toolbox/datasets/deepim_modelnet.py | 25 ++-- happypose/toolbox/datasets/object_dataset.py | 38 +++--- happypose/toolbox/datasets/pose_dataset.py | 3 +- happypose/toolbox/datasets/scene_dataset.py | 21 ++-- .../datasets/shapenet_object_dataset.py | 2 +- happypose/toolbox/lib3d/cosypose_ops.py | 6 +- .../toolbox/lib3d/rigid_mesh_database.py | 8 +- happypose/toolbox/renderer/geometry.py | 3 - happypose/toolbox/renderer/utils.py | 5 +- happypose/toolbox/utils/download.py | 4 +- happypose/toolbox/utils/logs_bokeh.py | 3 +- happypose/toolbox/utils/webdataset.py | 3 +- .../toolbox/visualization/meshcat_utils.py | 8 +- pyproject.toml | 2 +- 60 files changed, 428 insertions(+), 261 deletions(-) diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/evaluation.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/evaluation.py index 8fe38acd..f5dab43e 100644 --- a/happypose/pose_estimators/cosypose/cosypose/evaluation/evaluation.py +++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/evaluation.py @@ -44,10 +44,12 @@ ) from happypose.pose_estimators.megapose.evaluation.runner_utils import format_results from happypose.pose_estimators.megapose.inference.icp_refiner import ICPRefiner -from happypose.toolbox.datasets.datasets_cfg import make_object_dataset # Pose estimator -# from happypose.pose_estimators.megapose.inference.teaserpp_refiner import TeaserppRefiner +from happypose.pose_estimators.megapose.inference.teaserpp_refiner import ( + TeaserppRefiner, +) +from happypose.toolbox.datasets.datasets_cfg import make_object_dataset from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer from happypose.toolbox.utils.distributed import get_rank, get_tmp_dir @@ -86,7 +88,9 @@ def load_pose_models(coarse_run_id, refiner_run_id, n_workers): # object_ds = BOPObjectDataset(BOP_DS_DIR / 'tless/models_cad') # object_ds = make_object_dataset(cfg.object_ds_name) # mesh_db = MeshDataBase.from_object_ds(object_ds) - # renderer = BulletBatchRenderer(object_set=cfg.urdf_ds_name, n_workers=n_workers, gpu_renderer=gpu_renderer) + # renderer = BulletBatchRenderer( + # object_set=cfg.urdf_ds_name, n_workers=n_workers, gpu_renderer=gpu_renderer + # ) # object_dataset = make_object_dataset("ycbv") @@ -210,9 +214,14 @@ def run_eval( # See https://stackoverflow.com/a/53287330 assert cfg.coarse_run_id is not None assert cfg.refiner_run_id is not None - # TODO (emaitre): This fuction seems to take the wrong parameters. Trying to fix this + # TODO (emaitre): This fuction seems to take the wrong parameters. Trying to fix + # this. """ - coarse_model, refiner_model, mesh_db = happypose.toolbox.inference.utils.load_pose_models( + ( + coarse_model, + refiner_model, + mesh_db, + ) = happypose.toolbox.inference.utils.load_pose_models( coarse_run_id=cfg.coarse_run_id, refiner_run_id=cfg.refiner_run_id, n_workers=cfg.n_rendering_workers, diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/bop_predictions.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/bop_predictions.py index 70995d02..475f4cc0 100644 --- a/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/bop_predictions.py +++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/pred_runner/bop_predictions.py @@ -152,7 +152,8 @@ def get_preds(): n_dets = len(this_batch_detections) logger.debug( - f"Full predictions: {n_dets} detections + pose estimation in {duration:.3f} s", + f"Full predictions: {n_dets} detections + pose estimation " + f"in {duration:.3f} s", ) logger.debug(f"{'-'*80}") return this_batch_detections, all_preds, duration diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py index 95da88d4..e1178393 100644 --- a/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py +++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py @@ -89,7 +89,8 @@ def run_inference_pipeline( Returns: A dict with keys - 'final': final preds - - 'refiner/final': preds at final refiner iteration (before depth refinement) + - 'refiner/final': preds at final refiner iteration (before depth + refinement) - 'depth_refinement': preds after depth refinement. @@ -130,14 +131,16 @@ def run_inference_pipeline( # - 'refiner/iteration=5` # - `depth_refiner` # Note: Since we support multi-hypotheses we need to potentially - # go back and extract out the 'refiner/iteration=1`, `refiner/iteration=5` things for the ones that were actually the highest scoring at the end. + # go back and extract out the 'refiner/iteration=1`, `refiner/iteration=5` + # things for the ones that were actually the highest scoring at the end. all_preds = {} data_TCO_refiner = extra_data["refiner"]["preds"] + k_0 = f"refiner/iteration={self.inference_cfg.n_refiner_iterations}" all_preds = { "final": preds, - f"refiner/iteration={self.inference_cfg.n_refiner_iterations}": data_TCO_refiner, + k_0: data_TCO_refiner, "refiner/final": data_TCO_refiner, "coarse": extra_data["coarse"]["preds"], } diff --git a/happypose/pose_estimators/cosypose/cosypose/integrated/detector.py b/happypose/pose_estimators/cosypose/cosypose/integrated/detector.py index 5040e822..4b9a36ac 100644 --- a/happypose/pose_estimators/cosypose/cosypose/integrated/detector.py +++ b/happypose/pose_estimators/cosypose/cosypose/integrated/detector.py @@ -4,8 +4,6 @@ import pandas as pd import torch -import happypose.pose_estimators.cosypose.cosypose.utils.tensor_collection as tc - # MegaPose import happypose.toolbox.utils.tensor_collection as tc from happypose.toolbox.inference.detector import DetectorModule diff --git a/happypose/pose_estimators/cosypose/cosypose/integrated/icp_refiner.py b/happypose/pose_estimators/cosypose/cosypose/integrated/icp_refiner.py index ccd2cf20..d2180d27 100644 --- a/happypose/pose_estimators/cosypose/cosypose/integrated/icp_refiner.py +++ b/happypose/pose_estimators/cosypose/cosypose/integrated/icp_refiner.py @@ -173,8 +173,12 @@ def icp_refinement( # import trimesh # print(points_src.shape, points_tgt.shape) - # trimesh.Trimesh(vertices=points_src[:, :3], normals=points_src[:, 3:]).export(DEBUG_DATA_DIR / 'src.ply') - # trimesh.Trimesh(vertices=points_tgt[:, :3], normals=points_tgt[:, 3:]).export(DEBUG_DATA_DIR / 'tgt.ply') + # trimesh.Trimesh(vertices=points_src[:, :3], normals=points_src[:, 3:]).export( + # DEBUG_DATA_DIR / "src.ply" + # ) + # trimesh.Trimesh(vertices=points_tgt[:, :3], normals=points_tgt[:, 3:]).export( + # DEBUG_DATA_DIR / "tgt.ply" + # ) # raise ValueError tolerence = 0.05 diff --git a/happypose/pose_estimators/cosypose/cosypose/integrated/pose_estimator.py b/happypose/pose_estimators/cosypose/cosypose/integrated/pose_estimator.py index be9df6a3..f2cfca6d 100644 --- a/happypose/pose_estimators/cosypose/cosypose/integrated/pose_estimator.py +++ b/happypose/pose_estimators/cosypose/cosypose/integrated/pose_estimator.py @@ -113,7 +113,8 @@ def batched_model_predictions(self, model, images, K, obj_data, n_iterations=1): preds[f"iteration={n}"].append(batch_preds) logger.debug( - f"Pose prediction on {len(obj_data)} detections (n_iterations={n_iterations}): {timer.stop()}", + f"Pose prediction on {len(obj_data)} detections " + f"(n_iterations={n_iterations}): {timer.stop()}", ) preds = dict(preds) for k, v in preds.items(): diff --git a/happypose/pose_estimators/cosypose/cosypose/integrated/pose_predictor.py b/happypose/pose_estimators/cosypose/cosypose/integrated/pose_predictor.py index c13789e3..3dc06210 100644 --- a/happypose/pose_estimators/cosypose/cosypose/integrated/pose_predictor.py +++ b/happypose/pose_estimators/cosypose/cosypose/integrated/pose_predictor.py @@ -66,7 +66,8 @@ def batched_model_predictions(self, model, images, K, obj_data, n_iterations=1): preds[f"iteration={n}"].append(batch_preds) logger.debug( - f"Pose prediction on {len(obj_data)} detections (n_iterations={n_iterations}): {timer.stop()}", + f"Pose prediction on {len(obj_data)} detections " + f"(n_iterations={n_iterations}): {timer.stop()}", ) preds = dict(preds) for k, v in preds.items(): diff --git a/happypose/pose_estimators/cosypose/cosypose/lib3d/rigid_mesh_database.py b/happypose/pose_estimators/cosypose/cosypose/lib3d/rigid_mesh_database.py index 0fc02427..db8ac1b3 100644 --- a/happypose/pose_estimators/cosypose/cosypose/lib3d/rigid_mesh_database.py +++ b/happypose/pose_estimators/cosypose/cosypose/lib3d/rigid_mesh_database.py @@ -16,7 +16,7 @@ class MeshDataBase: def __init__(self, obj_list): self.infos = {obj["label"]: obj for obj in obj_list} self.meshes = { - l: trimesh.load(obj["mesh_path"]) for l, obj in self.infos.items() + m: trimesh.load(obj["mesh_path"]) for m, obj in self.infos.items() } @staticmethod @@ -92,9 +92,9 @@ def n_sym_mapping(self): return {label: obj["n_sym"] for label, obj in self.infos.items()} def select(self, labels): - ids = [self.label_to_id[l] for l in labels] + ids = [self.label_to_id[label] for label in labels] return Meshes( - infos=[self.infos[l] for l in labels], + infos=[self.infos[label] for label in labels], labels=self.labels[ids], points=self.points[ids], symmetries=self.symmetries[ids], diff --git a/happypose/pose_estimators/cosypose/cosypose/models/efficientnet.py b/happypose/pose_estimators/cosypose/cosypose/models/efficientnet.py index 202585d6..6c2a3688 100644 --- a/happypose/pose_estimators/cosypose/cosypose/models/efficientnet.py +++ b/happypose/pose_estimators/cosypose/cosypose/models/efficientnet.py @@ -144,12 +144,16 @@ def forward(self, inputs, drop_connect_rate=None): return x def set_swish(self, memory_efficient=True): - """Sets swish function as memory efficient (for training) or standard (for export).""" + """ + Sets swish function as memory efficient (for training) or standard (for export). + """ self._swish = MemoryEfficientSwish() if memory_efficient else Swish() class EfficientNet(nn.Module): - """An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods. + """ + An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained + methods. Args: ---- @@ -238,7 +242,9 @@ def __init__(self, blocks_args=None, global_params=None, in_channels=3): self._swish = MemoryEfficientSwish() def set_swish(self, memory_efficient=True): - """Sets swish function as memory efficient (for training) or standard (for export).""" + """ + Sets swish function as memory efficient (for training) or standard (for export). + """ self._swish = MemoryEfficientSwish() if memory_efficient else Swish() for block in self._blocks: block.set_swish(memory_efficient) @@ -261,7 +267,10 @@ def extract_features(self, inputs): return x def forward(self, inputs): - """Calls extract_features to extract features, applies final linear layer, and returns logits.""" + """ + Calls extract_features to extract features, applies final linear layer, and + returns logits. + """ inputs.size(0) # Convolution layers x = self.extract_features(inputs) diff --git a/happypose/pose_estimators/cosypose/cosypose/models/efficientnet_utils.py b/happypose/pose_estimators/cosypose/cosypose/models/efficientnet_utils.py index 92c7fc75..bf7db135 100644 --- a/happypose/pose_estimators/cosypose/cosypose/models/efficientnet_utils.py +++ b/happypose/pose_estimators/cosypose/cosypose/models/efficientnet_utils.py @@ -1,6 +1,8 @@ """Copied from https://github.com/lukemelas/EfficientNet-PyTorch -This file contains helper functions for building the model and for loading model parameters. -These helper functions are built to mirror those in the official TensorFlow implementation. +This file contains helper functions for building the model and for loading model +parameters. +These helper functions are built to mirror those in the official TensorFlow +implementation. """ import collections @@ -120,7 +122,8 @@ def drop_connect(inputs, p, training): def get_same_padding_conv2d(image_size=None): - """Chooses static padding if you have specified an image size, and dynamic padding otherwise. + """Chooses static padding if you have specified an image size, and dynamic padding + otherwise. Static padding is necessary for ONNX exporting of models. """ if image_size is None: @@ -193,7 +196,9 @@ def __init__( # Calculate padding based on image size and save it assert image_size is not None - ih, iw = image_size if type(image_size) == list else [image_size, image_size] + ih, iw = ( + image_size if isinstance(image_size, list) else [image_size, image_size] + ) kh, kw = self.weight.size()[-2:] sh, sw = self.stride oh, ow = math.ceil(ih / sh), math.ceil(iw / sw) @@ -252,7 +257,9 @@ def efficientnet_params(model_name): class BlockDecoder: - """Block Decoder for readability, straight from the official TensorFlow repository.""" + """ + Block Decoder for readability, straight from the official TensorFlow repository. + """ @staticmethod def _decode_block_string(block_string): @@ -377,7 +384,8 @@ def get_model_params(model_name, override_params): else: raise NotImplementedError("model name is not pre-defined: %s" % model_name) if override_params: - # ValueError will be raised here if override_params has fields not included in global_params. + # ValueError will be raised here if override_params has fields not included in + # global_params. global_params = global_params._replace(**override_params) return blocks_args, global_params diff --git a/happypose/pose_estimators/cosypose/cosypose/models/wide_resnet.py b/happypose/pose_estimators/cosypose/cosypose/models/wide_resnet.py index b41a2792..4ef42a6e 100644 --- a/happypose/pose_estimators/cosypose/cosypose/models/wide_resnet.py +++ b/happypose/pose_estimators/cosypose/cosypose/models/wide_resnet.py @@ -17,7 +17,8 @@ def conv3x3(in_planes, out_planes, stride=1): class BasicBlockV2(nn.Module): r"""BasicBlock V2 from - `"Identity Mappings in Deep Residual Networks"`_ paper. + `"Identity Mappings in Deep Residual Networks" + `_ paper. This is used for ResNet V2 for 18, 34 layers. Args: diff --git a/happypose/pose_estimators/cosypose/cosypose/multiview/bundle_adjustment.py b/happypose/pose_estimators/cosypose/cosypose/multiview/bundle_adjustment.py index 0fc3258a..6d2a0156 100644 --- a/happypose/pose_estimators/cosypose/cosypose/multiview/bundle_adjustment.py +++ b/happypose/pose_estimators/cosypose/cosypose/multiview/bundle_adjustment.py @@ -221,7 +221,8 @@ def align_TCO_cand(self, TWO_9d, TCW_9d): def forward_jacobian(self, TWO_9d, TCW_9d, residuals_threshold): _, TCO_cand_aligned = self.align_TCO_cand(TWO_9d, TCW_9d) - # NOTE: This could be *much* faster by computing gradients manually, reducing number of operations. + # NOTE: This could be *much* faster by computing gradients manually, reducing + # number of operations. cand_ids, view_ids, obj_ids, point_ids, xy_ids = ( self.residuals_ids[k] for k in ("cand_id", "view_id", "obj_id", "point_id", "xy_id") diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/make_ycbv_compat_models.py b/happypose/pose_estimators/cosypose/cosypose/scripts/make_ycbv_compat_models.py index 9d6678f7..70e0cbd6 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/make_ycbv_compat_models.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/make_ycbv_compat_models.py @@ -13,7 +13,8 @@ orig_names = (ds_dir / "ycbv_friendly_names.txt").read_text() orig_names = { - str(int(l.split(" ")[0])): l.split(" ")[1] for l in orig_names.split("\n")[:-1] + str(int(line.split(" ")[0])): line.split(" ")[1] + for line in orig_names.split("\n")[:-1] } infos = json.loads((models_dir / "models_info.json").read_text()) diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_inference.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_inference.py index c6dee52c..997c955e 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_inference.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_bop_inference.py @@ -22,10 +22,10 @@ make_object_dataset, make_scene_dataset, ) -from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( +from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( # noqa: E501 MultiViewWrapper, ) -from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.bop_predictions import ( +from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.bop_predictions import ( # noqa: E501 BopPredictionRunner, ) from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import ( @@ -46,7 +46,7 @@ from happypose.pose_estimators.cosypose.cosypose.lib3d.rigid_mesh_database import ( MeshDataBase, ) -from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import ( +from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import ( # noqa: E501 BulletBatchRenderer, ) diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_colmap_reconstruction.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_colmap_reconstruction.py index 7ef2d504..26d87463 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_colmap_reconstruction.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_colmap_reconstruction.py @@ -9,7 +9,7 @@ from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import ( make_scene_dataset, ) -from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( +from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( # noqa: E501 MultiViewWrapper, ) diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py index 81611b23..45451794 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py @@ -1,3 +1,4 @@ +# ruff: noqa: E402 from happypose.pose_estimators.cosypose.cosypose.utils.tqdm import patch_tqdm patch_tqdm() @@ -27,16 +28,16 @@ make_scene_dataset, ) from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import ListSampler -from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( +from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( # noqa: E501 MultiViewWrapper, ) -from happypose.pose_estimators.cosypose.cosypose.evaluation.eval_runner.pose_eval import ( +from happypose.pose_estimators.cosypose.cosypose.evaluation.eval_runner.pose_eval import ( # noqa: E501 PoseEvaluation, ) from happypose.pose_estimators.cosypose.cosypose.evaluation.meters.pose_meters import ( PoseErrorMeter, ) -from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.multiview_predictions import ( +from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.multiview_predictions import ( # noqa: E501 MultiviewPredictionRunner, ) from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import ( @@ -53,7 +54,7 @@ from happypose.pose_estimators.cosypose.cosypose.lib3d.rigid_mesh_database import ( MeshDataBase, ) -from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import ( +from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_batch_renderer import ( # noqa: E501 BulletBatchRenderer, ) from happypose.pose_estimators.cosypose.cosypose.training.pose_models_cfg import ( @@ -208,7 +209,8 @@ def get_pose_meters(scene_ds, ds_name): if "tless" in ds_name: object_ds_name = "tless.eval" elif "ycbv" in ds_name: - object_ds_name = "ycbv.bop-compat.eval" # This is important for definition of symmetric objects + # This is important for definition of symmetric objects + object_ds_name = "ycbv.bop-compat.eval" else: raise ValueError @@ -262,12 +264,14 @@ def get_pose_meters(scene_ds, ds_name): if "tless" in ds_name: meters.update( { - f"{error_type}_ntop=BOP_matching=BOP": PoseErrorMeter( # For ADD-S<0.1d + # For ADD-S<0.1d + f"{error_type}_ntop=BOP_matching=BOP": PoseErrorMeter( error_type=error_type, match_threshold=0.1, **base_kwargs, ), - f"{error_type}_ntop=ALL_matching=BOP": PoseErrorMeter( # For mAP + # For mAP + f"{error_type}_ntop=ALL_matching=BOP": PoseErrorMeter( error_type=error_type, match_threshold=0.1, consider_all_predictions=True, @@ -530,33 +534,70 @@ def main(): metrics_to_print = {} if "ycbv" in ds_name: + k_0 = "posecnn/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean" + k_1 = ( + f"{det_key}/refiner/iteration={n_refiner_iterations}/" + f"ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean" + ) + k_2 = ( + f"{det_key}/refiner/iteration={n_refiner_iterations}/" + f"ADD-S_ntop=1_matching=CLASS/AUC/objects/mean" + ) + k_3 = ( + f"{det_key}/ba_output+all_cand/ADD(-S)_ntop=1_matching=" + f"CLASS/AUC/objects/mean" + ) + k_4 = ( + f"{det_key}/ba_output+all_cand/ADD-S_ntop=1_matching=" + f"CLASS/AUC/objects/mean" + ) metrics_to_print.update( { - "posecnn/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean": "PoseCNN/AUC of ADD(-S)", - f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean": "Singleview/AUC of ADD(-S)", - f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=1_matching=CLASS/AUC/objects/mean": "Singleview/AUC of ADD-S", - f"{det_key}/ba_output+all_cand/ADD(-S)_ntop=1_matching=CLASS/AUC/objects/mean": f"Multiview (n={args.n_views})/AUC of ADD(-S)", - f"{det_key}/ba_output+all_cand/ADD-S_ntop=1_matching=CLASS/AUC/objects/mean": f"Multiview (n={args.n_views})/AUC of ADD-S", + k_0: "PoseCNN/AUC of ADD(-S)", + k_1: "Singleview/AUC of ADD(-S)", + k_2: "Singleview/AUC of ADD-S", + k_3: f"Multiview (n={args.n_views})/AUC of ADD(-S)", + k_4: f"Multiview (n={args.n_views})/AUC of ADD-S", }, ) elif "tless" in ds_name: + k_0 = ( + f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=" + f"BOP_matching=OVERLAP/AUC/objects/mean" + ) + k_1 = ( + f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=" + f"BOP_matching=BOP/0.1d" + ) + k_2 = ( + f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=" + f"ALL_matching=BOP/mAP" + ) + k_3 = ( + f"{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching=" + f"OVERLAP/AUC/objects/mean" + ) + k_4 = f"{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching=BOP/0.1d" + k_5 = f"{det_key}/ba_output+all_cand/ADD-S_ntop=ALL_matching=BOP/mAP" metrics_to_print.update( { - f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=BOP_matching=OVERLAP/AUC/objects/mean": "Singleview/AUC of ADD-S", - # f'{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=BOP_matching=BOP/0.1d': f'Singleview/ADD-S<0.1d', - f"{det_key}/refiner/iteration={n_refiner_iterations}/ADD-S_ntop=ALL_matching=BOP/mAP": "Singleview/mAP@ADD-S<0.1d", - f"{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching=OVERLAP/AUC/objects/mean": f"Multiview (n={args.n_views})/AUC of ADD-S", - # f'{det_key}/ba_output+all_cand/ADD-S_ntop=BOP_matching=BOP/0.1d': f'Multiview (n={args.n_views})/ADD-S<0.1d', - f"{det_key}/ba_output+all_cand/ADD-S_ntop=ALL_matching=BOP/mAP": f"Multiview (n={args.n_views}/mAP@ADD-S<0.1d)", + k_0: "Singleview/AUC of ADD-S", + # k_1: f'Singleview/ADD-S<0.1d', + k_2: "Singleview/mAP@ADD-S<0.1d", + k_3: f"Multiview (n={args.n_views})/AUC of ADD-S", + # k_4: f'Multiview (n={args.n_views})/ADD-S<0.1d', + k_5: f"Multiview (n={args.n_views}/mAP@ADD-S<0.1d)", }, ) else: raise ValueError + k_0 = f"{det_key}/ba_input/ADD-S_ntop=BOP_matching=OVERLAP/norm" + k_1 = f"{det_key}/ba_output/ADD-S_ntop=BOP_matching=OVERLAP/norm" metrics_to_print.update( { - f"{det_key}/ba_input/ADD-S_ntop=BOP_matching=OVERLAP/norm": "Multiview before BA/ADD-S (m)", - f"{det_key}/ba_output/ADD-S_ntop=BOP_matching=OVERLAP/norm": "Multiview after BA/ADD-S (m)", + k_0: "Multiview before BA/ADD-S (m)", + k_1: "Multiview after BA/ADD-S (m)", }, ) diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_custom_scenario.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_custom_scenario.py index 06f0c237..e4d96469 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_custom_scenario.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_custom_scenario.py @@ -142,7 +142,8 @@ def main(): "--ransac_dist_threshold", default=0.02, type=float, - help="Threshold (in meters) on symmetric distance to consider a tentative match an inlier", + help="Threshold (in meters) on symmetric distance to consider " + "a tentative match an inlier", ) parser.add_argument( "--ba_n_iter", @@ -217,7 +218,8 @@ def main(): view_group_dir.mkdir(exist_ok=True, parents=True) logger.info( - f"Subscene {view_group} has {len(objects_)} objects and {len(cameras_)} cameras.", + f"Subscene {view_group} has {len(objects_)} objects and " + f"{len(cameras_)} cameras.", ) predicted_scene_path = view_group_dir / "predicted_scene.json" @@ -227,7 +229,8 @@ def main(): logger.info(f"Wrote predicted scene (objects+cameras): {predicted_scene_path}") logger.info( - f"Wrote predicted objects with pose expressed in camera frame: {scene_reprojected_path}", + f"Wrote predicted objects with pose expressed in camera frame: " + f"{scene_reprojected_path}", ) # if args.no_visualization: diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_dataset_recording.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_dataset_recording.py index 901ee44b..5d37da5a 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_dataset_recording.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_dataset_recording.py @@ -111,7 +111,7 @@ def main(): print(f"RESUMING {Fore.RED} {cfg.ds_name} {Style.RESET_ALL} \n ") else: print( - f"STARTING DATASET RECORDING {Fore.GREEN} {cfg.ds_name} {Style.RESET_ALL} \n ", + f"STARTING DATASET RECORDING {Fore.GREEN} {cfg.ds_name} {Style.RESET_ALL}" ) record_dataset(cfg) diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_detection_eval.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_detection_eval.py index 69524cdc..8fbf89af 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_detection_eval.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_detection_eval.py @@ -9,18 +9,18 @@ import torch.multiprocessing import yaml -from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR +from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR, RESULTS_DIR from happypose.pose_estimators.cosypose.cosypose.datasets.bop import remap_bop_targets from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import ( make_scene_dataset, ) -from happypose.pose_estimators.cosypose.cosypose.evaluation.eval_runner.detection_eval import ( +from happypose.pose_estimators.cosypose.cosypose.evaluation.eval_runner.detection_eval import ( # noqa: E501 DetectionEvaluation, ) -from happypose.pose_estimators.cosypose.cosypose.evaluation.meters.detection_meters import ( +from happypose.pose_estimators.cosypose.cosypose.evaluation.meters.detection_meters import ( # noqa: E501 DetectionMeter, ) -from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.detections import ( +from happypose.pose_estimators.cosypose.cosypose.evaluation.pred_runner.detections import ( # noqa: E501 DetectionRunner, ) from happypose.pose_estimators.cosypose.cosypose.evaluation.runner_utils import ( diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py index b42de581..8e798371 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py @@ -32,7 +32,8 @@ from happypose.toolbox.visualization.bokeh_plotter import BokehPlotter from happypose.toolbox.visualization.utils import make_contour_overlay -# from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import BulletSceneRenderer +# from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer +# import BulletSceneRenderer ######################## @@ -131,8 +132,10 @@ def rendering(predictions, example_dir): def save_predictions(example_dir, renderings): rgb_render = renderings.rgb rgb, _, _ = load_observation(example_dir, load_depth=False) - # render_prediction_wrt_camera calls BulletSceneRenderer.render_scene using only one camera at pose Identity and return only rgb values - # BulletSceneRenderer.render_scene: gets a "object list" (prediction like object), a list of camera infos (with Km pose, res) and renders + # render_prediction_wrt_camera calls BulletSceneRenderer.render_scene using only one + # camera at pose Identity and return only rgb values + # BulletSceneRenderer.render_scene: gets a "object list" (prediction like object), a + # list of camera infos (with Km pose, res) and renders # a "camera observation" for each camera/viewpoint # Actually, renders: rgb, mask, depth, near, far # rgb_render = render_prediction_wrt_camera(renderer, preds, cam) @@ -184,7 +187,9 @@ def run_inference( set_logging_level("info") parser = argparse.ArgumentParser() parser.add_argument("example_name") - # parser.add_argument("--model", type=str, default="megapose-1.0-RGB-multi-hypothesis") + # parser.add_argument( + # "--model", type=str, default="megapose-1.0-RGB-multi-hypothesis" + # ) parser.add_argument("--dataset", type=str, default="ycbv") # parser.add_argument("--vis-detections", action="store_true") parser.add_argument("--run-inference", action="store_true", default=True) diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/test_render_objects.py b/happypose/pose_estimators/cosypose/cosypose/scripts/test_render_objects.py index e91c6913..03ac3534 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/test_render_objects.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/test_render_objects.py @@ -2,7 +2,7 @@ import torch from tqdm import tqdm -from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import ( +from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import ( # noqa: E501 BulletSceneRenderer, ) diff --git a/happypose/pose_estimators/cosypose/cosypose/simulator/camera.py b/happypose/pose_estimators/cosypose/cosypose/simulator/camera.py index 11685111..dbcd4c05 100644 --- a/happypose/pose_estimators/cosypose/cosypose/simulator/camera.py +++ b/happypose/pose_estimators/cosypose/cosypose/simulator/camera.py @@ -61,7 +61,8 @@ def __init__(self, resolution=(320, 240), near=0.01, far=10, client_id=0): self.mask_link_index(True) self.casts_shadow(True) - # Transform between standard camera coordinate (z forward) and OPENGL camera coordinate system + # Transform between standard camera coordinate (z forward) and OPENGL camera + # coordinate system wxyz = transforms3d.euler.euler2quat(np.pi / 2, 0, 0, axes="rxyz") xyzw = [*wxyz[1:], wxyz[0]] self.TCCGL = Transform(xyzw, (0, 0, 0)) diff --git a/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py b/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py index a557358e..39163c1a 100644 --- a/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py +++ b/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py @@ -140,8 +140,12 @@ def load_model(run_id): # scene_ds_pred = MultiViewWrapper(scene_ds, n_views=1) # Predictions - # pred_runner = MultiviewPredictionRunner(scene_ds_pred, batch_size=1, - # n_workers=args.n_dataloader_workers, cache_data=False) + # pred_runner = MultiviewPredictionRunner( + # scene_ds_pred, + # batch_size=1, + # n_workers=args.n_dataloader_workers, + # cache_data=False, + # ) inference = { "detection_type": "gt", @@ -217,7 +221,9 @@ def load_model(run_id): meters = {k.split("_")[0]: v for k, v in meters.items()} list(iter(pred_runner.sampler)) print(scene_ds.frame_index) - # scene_ds_ids = np.concatenate(scene_ds.frame_index.loc[mv_group_ids, 'scene_ds_ids'].values) + # scene_ds_ids = np.concatenate( + # scene_ds.frame_index.loc[mv_group_ids, "scene_ds_ids"].values + # ) # sampler = ListSampler(scene_ds_ids) eval_runner = EvaluationRunner( scene_ds, diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/colmap_read_write_model.py b/happypose/pose_estimators/cosypose/cosypose/utils/colmap_read_write_model.py index b2870d45..a604b7aa 100644 --- a/happypose/pose_estimators/cosypose/cosypose/utils/colmap_read_write_model.py +++ b/happypose/pose_estimators/cosypose/cosypose/utils/colmap_read_write_model.py @@ -297,10 +297,13 @@ def write_images_text(images, path): mean_observations = sum( (len(img.point3D_ids) for _, img in images.items()), ) / len(images) - HEADER = "# Image list with two lines of data per image:\n" - "# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n" - "# POINTS2D[] as (X, Y, POINT3D_ID)\n" - f"# Number of images: {len(images)}, mean observations per image: {mean_observations}\n" + HEADER = ( + "# Image list with two lines of data per image:\n" + "# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\n" + "# POINTS2D[] as (X, Y, POINT3D_ID)\n" + f"# Number of images: {len(images)}, mean observations per image: " + f"{mean_observations}\n" + ) with open(path, "w") as fid: fid.write(HEADER) diff --git a/happypose/pose_estimators/cosypose/cosypose/utils/cosypose_wrapper.py b/happypose/pose_estimators/cosypose/cosypose/utils/cosypose_wrapper.py index 349ea0a2..6507ca41 100644 --- a/happypose/pose_estimators/cosypose/cosypose/utils/cosypose_wrapper.py +++ b/happypose/pose_estimators/cosypose/cosypose/utils/cosypose_wrapper.py @@ -39,7 +39,8 @@ create_model_refiner, ) -# from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg import make_scene_dataset, make_object_dataset +# from happypose.pose_estimators.cosypose.cosypose.datasets.datasets_cfg +# import make_scene_dataset, make_object_dataset from happypose.toolbox.datasets.datasets_cfg import make_object_dataset # Pose estimator @@ -64,7 +65,9 @@ def make_object_dataset(example_dir: Path) -> RigidObjectDataset: assert not mesh_path, f"there multiple meshes in the {label} directory" mesh_path = fn assert mesh_path, f"couldnt find a obj or ply mesh for {label}" - rigid_objects.append(RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units)) + rigid_objects.append( + RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units) + ) # TODO: fix mesh units rigid_object_dataset = RigidObjectDataset(rigid_objects) return rigid_object_dataset @@ -107,7 +110,9 @@ def load_pose_models(coarse_run_id, refiner_run_id, n_workers): # object_ds = BOPObjectDataset(BOP_DS_DIR / 'tless/models_cad') # object_ds = make_object_dataset(cfg.object_ds_name) # mesh_db = MeshDataBase.from_object_ds(object_ds) - # renderer = BulletBatchRenderer(object_set=cfg.urdf_ds_name, n_workers=n_workers, gpu_renderer=gpu_renderer) + # renderer = BulletBatchRenderer( + # object_set=cfg.urdf_ds_name, n_workers=n_workers, gpu_renderer=gpu_renderer + # ) # object_dataset = make_object_dataset("ycbv") @@ -122,7 +127,9 @@ def load_pose_models(coarse_run_id, refiner_run_id, n_workers): def load_model(run_id): run_dir = EXP_DIR / run_id - # cfg = yaml.load((run_dir / 'config.yaml').read_text(), Loader=yaml.FullLoader) + # cfg = yaml.load( + # (run_dir / "config.yaml").read_text(), Loader=yaml.FullLoader + # ) cfg = yaml.load( (run_dir / "config.yaml").read_text(), Loader=yaml.UnsafeLoader, diff --git a/happypose/pose_estimators/cosypose/cosypose/visualization/multiview.py b/happypose/pose_estimators/cosypose/cosypose/visualization/multiview.py index c9c3b4af..404ff313 100644 --- a/happypose/pose_estimators/cosypose/cosypose/visualization/multiview.py +++ b/happypose/pose_estimators/cosypose/cosypose/visualization/multiview.py @@ -10,7 +10,7 @@ from happypose.pose_estimators.cosypose.cosypose.lib3d.rotations import euler2quat from happypose.pose_estimators.cosypose.cosypose.lib3d.transform import Transform from happypose.pose_estimators.cosypose.cosypose.lib3d.transform_ops import invert_T -from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import ( +from happypose.pose_estimators.cosypose.cosypose.rendering.bullet_scene_renderer import ( # noqa: E501 BulletSceneRenderer, ) diff --git a/happypose/pose_estimators/cosypose/cosypose/visualization/singleview.py b/happypose/pose_estimators/cosypose/cosypose/visualization/singleview.py index 263dd323..e651489b 100644 --- a/happypose/pose_estimators/cosypose/cosypose/visualization/singleview.py +++ b/happypose/pose_estimators/cosypose/cosypose/visualization/singleview.py @@ -3,7 +3,7 @@ from happypose.pose_estimators.cosypose.cosypose.datasets.augmentations import ( CropResizeToAspectAugmentation, ) -from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.augmentation_wrapper import ( +from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.augmentation_wrapper import ( # noqa: E501 AugmentationWrapper, ) diff --git a/happypose/pose_estimators/megapose/evaluation/bop.py b/happypose/pose_estimators/megapose/evaluation/bop.py index 870c68f6..57bf0dc9 100644 --- a/happypose/pose_estimators/megapose/evaluation/bop.py +++ b/happypose/pose_estimators/megapose/evaluation/bop.py @@ -50,7 +50,6 @@ ################################## ################################## -import os # Official Task 4 detections (CNOS fastSAM) EXTERNAL_DETECTIONS_FILES = { @@ -285,8 +284,10 @@ def load_sam_predictions(ds_dir_name, scene_ds_dir): """ # dets_lst: list of dictionary, each element = detection of one object in an image $ df_all_dets[0].keys() - > ['scene_id', 'image_id', 'category_id', 'bbox', 'score', 'time', 'segmentation'] - - For the evaluation of Megapose, we only need the 'scene_id', 'image_id', 'category_id', 'score', 'time' and 'bbox' + > ['scene_id', 'image_id', 'category_id', 'bbox', 'score', 'time', + 'segmentation'] + - For the evaluation of Megapose, we only need the 'scene_id', 'image_id', + 'category_id', 'score', 'time' and 'bbox' - We also need need to change the format of bounding boxes as explained below """ dets_lst = [] @@ -332,8 +333,8 @@ def get_sam_detections(data, df_all_dets, df_targets, dt_det): ################# # Filter detections based on 2 criteria - # - 1) Localization 6D task: we can assume that we know which object category and how many instances - # are present in the image + # - 1) Localization 6D task: we can assume that we know which object category and + # how many instances are present in the image obj_ids = df_targets_scene_img.obj_id.to_list() df_dets_scene_img_obj_filt = df_dets_scene_img[ df_dets_scene_img["category_id"].isin(obj_ids) @@ -347,8 +348,8 @@ def get_sam_detections(data, df_all_dets, df_targets, dt_det): # TODO: retain only corresponding inst_count number for each detection category_id - # - 2) Retain detections with best cnos scores (kind of redundant with finalized 1) ) - # based on expected number of objects in the scene (from groundtruth) + # - 2) Retain detections with best cnos scores (kind of redundant with finalized 1) + # ) based on expected number of objects in the scene (from groundtruth) nb_gt_dets = df_targets_scene_img.inst_count.sum() # TODO: put that as a parameter somewhere? diff --git a/happypose/pose_estimators/megapose/evaluation/eval_config.py b/happypose/pose_estimators/megapose/evaluation/eval_config.py index 663854ac..1eccf2d8 100644 --- a/happypose/pose_estimators/megapose/evaluation/eval_config.py +++ b/happypose/pose_estimators/megapose/evaluation/eval_config.py @@ -47,7 +47,8 @@ class EvalConfig: 2. If `run_id` is None, then use `config_id`, `run_comment`and `run_postfix` to create a `run_id` - In 2., the parameters of the config are set-up using the function `update_cfg_with_config_id`. + In 2., the parameters of the config are set-up using the function + `update_cfg_with_config_id`. """ # Network diff --git a/happypose/pose_estimators/megapose/evaluation/evaluation.py b/happypose/pose_estimators/megapose/evaluation/evaluation.py index f783a416..8581da70 100644 --- a/happypose/pose_estimators/megapose/evaluation/evaluation.py +++ b/happypose/pose_estimators/megapose/evaluation/evaluation.py @@ -141,9 +141,14 @@ def run_eval( # See https://stackoverflow.com/a/53287330 assert cfg.coarse_run_id is not None assert cfg.refiner_run_id is not None - # TODO (emaitre): This fuction seems to take the wrong parameters. Trying to fix this + # TODO (emaitre): This fuction seems to take the wrong parameters. + # Trying to fix this """ - coarse_model, refiner_model, mesh_db = happypose.toolbox.inference.utils.load_pose_models( + ( + coarse_model, + refiner_model, + mesh_db, + ) = happypose.toolbox.inference.utils.load_pose_models( coarse_run_id=cfg.coarse_run_id, refiner_run_id=cfg.refiner_run_id, n_workers=cfg.n_rendering_workers, diff --git a/happypose/pose_estimators/megapose/evaluation/prediction_runner.py b/happypose/pose_estimators/megapose/evaluation/prediction_runner.py index 903a4c40..f722f2d6 100644 --- a/happypose/pose_estimators/megapose/evaluation/prediction_runner.py +++ b/happypose/pose_estimators/megapose/evaluation/prediction_runner.py @@ -95,7 +95,8 @@ def run_inference_pipeline( Returns: A dict with keys - 'final': final preds - - 'refiner/final': preds at final refiner iteration (before depth refinement) + - 'refiner/final': preds at final refiner iteration (before depth + refinement) - 'depth_refinement': preds after depth refinement. @@ -146,14 +147,16 @@ def run_inference_pipeline( # - 'refiner/iteration=5` # - `depth_refiner` # Note: Since we support multi-hypotheses we need to potentially - # go back and extract out the 'refiner/iteration=1`, `refiner/iteration=5` things for the ones that were actually the highest scoring at the end. + # go back and extract out the 'refiner/iteration=1`, `refiner/iteration=5` + # things for the ones that were actually the highest scoring at the end. all_preds = {} data_TCO_refiner = extra_data["refiner"]["preds"] + ref_str = f"refiner/iteration={self.inference_cfg.n_refiner_iterations}" all_preds = { "final": preds, - f"refiner/iteration={self.inference_cfg.n_refiner_iterations}": data_TCO_refiner, + ref_str: data_TCO_refiner, "refiner/final": data_TCO_refiner, "coarse": extra_data["coarse"]["preds"], } diff --git a/happypose/pose_estimators/megapose/inference/pose_estimator.py b/happypose/pose_estimators/megapose/inference/pose_estimator.py index 44e3a679..19238834 100644 --- a/happypose/pose_estimators/megapose/inference/pose_estimator.py +++ b/happypose/pose_estimators/megapose/inference/pose_estimator.py @@ -302,7 +302,10 @@ def forward_scoring_model( elapsed = time.time() - start_time - timing_str = f"time: {elapsed:.2f}, model_time: {model_time:.2f}, render_time: {render_time:.2f}" + timing_str = ( + f"time: {elapsed:.2f}, model_time: {model_time:.2f}, " + f"render_time: {render_time:.2f}" + ) extra_data = { "render_time": render_time, @@ -458,7 +461,10 @@ def forward_coarse_model( elapsed = time.time() - start_time - timing_str = f"time: {elapsed:.2f}, model_time: {model_time:.2f}, render_time: {render_time:.2f}" + timing_str = ( + f"time: {elapsed:.2f}, model_time: {model_time:.2f}, " + f"render_time: {render_time:.2f}" + ) extra_data = { "render_time": render_time, diff --git a/happypose/pose_estimators/megapose/inference/teaserpp_refiner.py b/happypose/pose_estimators/megapose/inference/teaserpp_refiner.py index 2b61d7d2..c2ecab7f 100644 --- a/happypose/pose_estimators/megapose/inference/teaserpp_refiner.py +++ b/happypose/pose_estimators/megapose/inference/teaserpp_refiner.py @@ -205,8 +205,8 @@ def refine_poses( To generate correspondences for Teaser++ we use the following approach. 1. Render depth image depth_rendered at the estimated pose from predictions. 2. Generate 3D --> 3D correspondences across rendered and observed depth images. - by assuming that pose is correctly aligned in rgb space. So depth_rendered[u,v] - corresponds to depth_observed[u,v]. + by assuming that pose is correctly aligned in rgb space. + So depth_rendered[u,v] corresponds to depth_observed[u,v]. 3. Estimate a mask to filter out some outliers in our generated correspondences. Args: diff --git a/happypose/pose_estimators/megapose/models/pose_rigid.py b/happypose/pose_estimators/megapose/models/pose_rigid.py index fee8ddd3..3646ba16 100644 --- a/happypose/pose_estimators/megapose/models/pose_rigid.py +++ b/happypose/pose_estimators/megapose/models/pose_rigid.py @@ -206,7 +206,8 @@ def crop_inputs( ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """Crop input images. - The image is cropped using the reprojection of the object points in the input pose (TCO). + The image is cropped using the reprojection of the object points in the input + pose (TCO). The reference point reprojects to the center of the cropped image. Please note that the unlike DeepIm, we do not explicitly use the input bounding box for cropping. @@ -223,8 +224,8 @@ def crop_inputs( ------- images_cropped: Images cropped and resized to self.render_size K_crop: Intrinsics of the fictive cropped camera. - boxes_rend: smallest bounding box defined by the reprojection of object points in - pose TCO. + boxes_rend: smallest bounding box defined by the reprojection of object + points in pose TCO. boxes_crop: bounding box used to crop the input image. """ bsz = images.shape[0] diff --git a/happypose/pose_estimators/megapose/models/resnet.py b/happypose/pose_estimators/megapose/models/resnet.py index aa8c41db..e3a2e8b6 100644 --- a/happypose/pose_estimators/megapose/models/resnet.py +++ b/happypose/pose_estimators/megapose/models/resnet.py @@ -89,7 +89,8 @@ def __init__( if dilation > 1: msg = "Dilation > 1 not supported in BasicBlock" raise NotImplementedError(msg) - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + # Both self.conv1 and self.downsample layers downsample the input + # when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) @@ -118,11 +119,14 @@ def forward(self, x: Tensor) -> Tensor: class Bottleneck(nn.Module): - # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) - # while original implementation places the stride at the first 1x1 convolution(self.conv1) - # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. - # This variant is also known as ResNet V1.5 and improves accuracy according to - # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + """ + Bottleneck in torchvision places the stride for downsampling at 3x3 + convolution(self.conv2) while original implementation places the stride at the first + 1x1 convolution(self.conv1) according to "Deep residual learning for image + recognition"https://arxiv.org/abs/1512.03385. This variant is also known as ResNet + V1.5 and improves accuracy according to + https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch + """ expansion: int = 4 @@ -141,7 +145,8 @@ def __init__( if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.0)) * groups - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + # Both self.conv2 and self.downsample layers downsample the input when + # stride != 1 self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) @@ -200,10 +205,11 @@ def __init__( # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: - msg = f"replace_stride_with_dilation should be None or a 3-element tuple, got {replace_stride_with_dilation}" - raise ValueError( - msg, + msg = ( + f"replace_stride_with_dilation should be None or a 3-element tuple, " + f"got {replace_stride_with_dilation}" ) + raise ValueError(msg) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d( @@ -248,7 +254,8 @@ def __init__( nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # so that the residual branch starts with zeros, and each residual block behaves + # like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): diff --git a/happypose/pose_estimators/megapose/models/torchvision_resnet.py b/happypose/pose_estimators/megapose/models/torchvision_resnet.py index d8ba3faf..92c88486 100644 --- a/happypose/pose_estimators/megapose/models/torchvision_resnet.py +++ b/happypose/pose_estimators/megapose/models/torchvision_resnet.py @@ -21,6 +21,7 @@ import torch import torch.nn as nn from torch import Tensor +from torch.hub import load_state_dict_from_url __all__ = [ "ResNet", @@ -97,7 +98,8 @@ def __init__( if dilation > 1: msg = "Dilation > 1 not supported in BasicBlock" raise NotImplementedError(msg) - # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + # Both self.conv1 and self.downsample layers downsample the input + # when stride != 1 self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) @@ -126,11 +128,14 @@ def forward(self, x: Tensor) -> Tensor: class Bottleneck(nn.Module): - # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) - # while original implementation places the stride at the first 1x1 convolution(self.conv1) - # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. - # This variant is also known as ResNet V1.5 and improves accuracy according to - # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + """ + Bottleneck in torchvision places the stride for downsampling at 3x3 + convolution(self.conv2) while original implementation places the stride at the first + 1x1 convolution(self.conv1) according to "Deep residual learning for image + recognition"https://arxiv.org/abs/1512.03385. This variant is also known as ResNet + V1.5 and improves accuracy according to + https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + """ expansion: int = 4 @@ -149,7 +154,8 @@ def __init__( if norm_layer is None: norm_layer = nn.BatchNorm2d width = int(planes * (base_width / 64.0)) * groups - # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + # Both self.conv2 and self.downsample layers downsample the input + # when stride != 1 self.conv1 = conv1x1(inplanes, width) self.bn1 = norm_layer(width) self.conv2 = conv3x3(width, width, stride, groups, dilation) @@ -208,10 +214,11 @@ def __init__( # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: - msg = f"replace_stride_with_dilation should be None or a 3-element tuple, got {replace_stride_with_dilation}" - raise ValueError( - msg, + msg = ( + f"replace_stride_with_dilation should be None or a 3-element tuple, " + f"got {replace_stride_with_dilation}" ) + raise ValueError(msg) self.groups = groups self.base_width = width_per_group self.conv1 = nn.Conv2d( @@ -258,7 +265,8 @@ def __init__( nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, - # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # so that the residual branch starts with zeros, and each residual block behaves + # like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): diff --git a/happypose/pose_estimators/megapose/models/wide_resnet.py b/happypose/pose_estimators/megapose/models/wide_resnet.py index c30ce54f..8b73b588 100644 --- a/happypose/pose_estimators/megapose/models/wide_resnet.py +++ b/happypose/pose_estimators/megapose/models/wide_resnet.py @@ -34,7 +34,8 @@ def conv3x3(in_planes, out_planes, stride=1): class BasicBlockV2(nn.Module): r"""BasicBlock V2 from - `"Identity Mappings in Deep Residual Networks"`_ paper. + `"Identity Mappings in Deep Residual Networks" + `_ paper. This is used for ResNet V2 for 18, 34 layers. Args: diff --git a/happypose/pose_estimators/megapose/scripts/generate_shapenet_pbr.py b/happypose/pose_estimators/megapose/scripts/generate_shapenet_pbr.py index b1b2d78c..fc2a9df4 100644 --- a/happypose/pose_estimators/megapose/scripts/generate_shapenet_pbr.py +++ b/happypose/pose_estimators/megapose/scripts/generate_shapenet_pbr.py @@ -37,7 +37,6 @@ BLENDERPROC_DIR, GSO_DIR, GSO_NORMALIZED_DIR, - GSO_ORIG_DIR, LOCAL_DATA_DIR, MEMORY, PROJECT_DIR, @@ -65,7 +64,6 @@ } SHAPENET_ORIG_DIR = SHAPENET_DIR / "models_orig" SHAPENET_SCALED_DIR = SHAPENET_DIR / "models_bop-renderer_scale=0.1" -GSO_ORIG_DIR = GSO_DIR / "models_orig" GSO_SCALED_DIR = GSO_DIR / "models_bop-renderer_scale=0.1" @@ -230,7 +228,8 @@ def make_shapenet_loader(synset_id, category_id, source_id=None, scale=None): { "module": "manipulators.EntityManipulator", "config": { - # get all shape net objects, as we have only loaded one, this returns only one entity + # get all shape net objects, as we have only loaded one, this returns + # only one entity "selector": { "provider": "getter.Entity", "conditions": { @@ -269,7 +268,8 @@ def make_gso_loader(obj_id, category_id, scale=None): { "module": "manipulators.EntityManipulator", "config": { - # get all shape net objects, as we have only loaded one, this returns only one entity + # get all shape net objects, as we have only loaded one, this returns + # only one entity "selector": { "provider": "getter.Entity", "conditions": { @@ -341,8 +341,10 @@ def make_light_sampler(radius_min=1, radius_max=1.5, energy=100): "location": { "provider": "sampler.Shell", "center": [0, 0, 0], - "radius_min": radius_min, # now depends on the bottom area of the box - "radius_max": radius_max, # this one too + # now depends on the bottom area of the box + "radius_min": radius_min, + # this one too + "radius_max": radius_max, "elevation_min": 5, "elevation_max": 89, "uniform_elevation": True, @@ -759,54 +761,62 @@ def record_chunk(cfg, ds_dir, chunk_id): shutil.rmtree(output_dir) return - # # HDF5 dataset generation - # if cfg.save_hdf5: - # shutil.copy( - # ds_dir / "shapenet_labels.json", output_dir / "bop_data" / "shapenet_labels.json" - # ) - # scene_ds = BOPDataset( - # output_dir / "bop_data", - # split="train_pbr", - # load_depth=True, - # allow_cache=False, - # per_view_annotations=False, - # ) - # write_scene_ds_as_hdf5( - # scene_ds, output_dir / f"bop_data/train_pbr/{0:06d}/data.hdf5", n_reading_workers=4 - # ) - - # if cfg.save_webdataset: - # shutil.copy( - # ds_dir / "shapenet_labels.json", output_dir / "bop_data" / "shapenet_labels.json" - # ) - # scene_ds = BOPDataset( - # output_dir / "bop_data", - # split="train_pbr", - # load_depth=True, - # allow_cache=False, - # per_view_annotations=False, - # ) - # write_scene_ds_as_wds( - # scene_ds, output_dir / f"bop_data/train_pbr/{0:06d}/", n_reading_workers=4 - # ) - - # # Move everything to base directory - # chunk_scene_dir = output_dir / f"bop_data/train_pbr/{0:06d}" - # train_pbr_dir = ds_dir / "train_pbr" - # target_dir = train_pbr_dir / f"{chunk_id:06d}" - # if target_dir.exists(): - # shutil.rmtree(target_dir) - # if cfg.save_files and success: - # shutil.copytree(chunk_scene_dir, target_dir) - # if cfg.save_hdf5 and success: - # target_dir.mkdir(exist_ok=True) - # shutil.copy(chunk_scene_dir / "data.hdf5", target_dir / "data.hdf5") - # if cfg.save_webdataset and success: - # target_dir.mkdir(exist_ok=True) - # shutil.copy(chunk_scene_dir / "shard-00000000.tar", target_dir / "shard-00000000.tar") - # shutil.copy(chunk_scene_dir / "ds_infos.json", target_dir / "ds_infos.json") - # shutil.rmtree(output_dir) - # return + +""" + # HDF5 dataset generation + if cfg.save_hdf5: + shutil.copy( + ds_dir / "shapenet_labels.json", + output_dir / "bop_data" / "shapenet_labels.json", + ) + scene_ds = BOPDataset( + output_dir / "bop_data", + split="train_pbr", + load_depth=True, + allow_cache=False, + per_view_annotations=False, + ) + write_scene_ds_as_hdf5( + scene_ds, + output_dir / f"bop_data/train_pbr/{0:06d}/data.hdf5", + n_reading_workers=4, + ) + if cfg.save_webdataset: + shutil.copy( + ds_dir / "shapenet_labels.json", + output_dir / "bop_data" / "shapenet_labels.json", + ) + scene_ds = BOPDataset( + output_dir / "bop_data", + split="train_pbr", + load_depth=True, + allow_cache=False, + per_view_annotations=False, + ) + write_scene_ds_as_wds( + scene_ds, output_dir / f"bop_data/train_pbr/{0:06d}/", n_reading_workers=4 + ) + + # Move everything to base directory + chunk_scene_dir = output_dir / f"bop_data/train_pbr/{0:06d}" + train_pbr_dir = ds_dir / "train_pbr" + target_dir = train_pbr_dir / f"{chunk_id:06d}" + if target_dir.exists(): + shutil.rmtree(target_dir) + if cfg.save_files and success: + shutil.copytree(chunk_scene_dir, target_dir) + if cfg.save_hdf5 and success: + target_dir.mkdir(exist_ok=True) + shutil.copy(chunk_scene_dir / "data.hdf5", target_dir / "data.hdf5") + if cfg.save_webdataset and success: + target_dir.mkdir(exist_ok=True) + shutil.copy( + chunk_scene_dir / "shard-00000000.tar", target_dir / "shard-00000000.tar" + ) + shutil.copy(chunk_scene_dir / "ds_infos.json", target_dir / "ds_infos.json") + shutil.rmtree(output_dir) + return +""" def find_chunks_to_record(cfg, chunk_ids): diff --git a/happypose/pose_estimators/megapose/scripts/make_shapenet_statistics.py b/happypose/pose_estimators/megapose/scripts/make_shapenet_statistics.py index f145bde0..93920745 100644 --- a/happypose/pose_estimators/megapose/scripts/make_shapenet_statistics.py +++ b/happypose/pose_estimators/megapose/scripts/make_shapenet_statistics.py @@ -42,20 +42,20 @@ def measure_memory(gltf_path): s = f.getvalue() s = s.splitlines() mems = [] - for l in s: - if "GeomVertexData arrays occupy" in l: - print(l) - l_ = l.split(" ") + for line in s: + if "GeomVertexData arrays occupy" in line: + print(line) + l_ = line.split(" ") idx = [n for n, w in enumerate(l_) if w == "occupy"][0] mems.append(float(l_[idx + 1])) - elif "GeomPrimitive arrays occupy" in l: - print(l) - l_ = l.split(" ") + elif "GeomPrimitive arrays occupy" in line: + print(line) + l_ = line.split(" ") idx = [n for n, w in enumerate(l_) if w == "occupy"][0] mems.append(float(l_[idx + 1])) - elif "texture memory required" in l: - print(l) - l_ = l.split(" ") + elif "texture memory required" in line: + print(line) + l_ = line.split(" ") idx = [n for n, w in enumerate(l_) if w == "minimum"][0] mems.append(float(l_[idx + 1])) tot_mem_kb = sum(mems) diff --git a/happypose/pose_estimators/megapose/scripts/run_inference_on_datasettemp.py b/happypose/pose_estimators/megapose/scripts/run_inference_on_datasettemp.py index 52a4dbaa..317b4f29 100644 --- a/happypose/pose_estimators/megapose/scripts/run_inference_on_datasettemp.py +++ b/happypose/pose_estimators/megapose/scripts/run_inference_on_datasettemp.py @@ -127,7 +127,9 @@ def make_object_dataset(example_dir: Path) -> RigidObjectDataset: assert not mesh_path, f"there multiple meshes in the {label} directory" mesh_path = fn assert mesh_path, f"couldnt find a obj or ply mesh for {label}" - rigid_objects.append(RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units)) + rigid_objects.append( + RigidObject(label=label, mesh_path=mesh_path, mesh_units=mesh_units) + ) # TODO: fix mesh units rigid_object_dataset = RigidObjectDataset(rigid_objects) return rigid_object_dataset diff --git a/happypose/pose_estimators/megapose/scripts/run_megapose_training.py b/happypose/pose_estimators/megapose/scripts/run_megapose_training.py index 8d29986f..2a4ca03f 100644 --- a/happypose/pose_estimators/megapose/scripts/run_megapose_training.py +++ b/happypose/pose_estimators/megapose/scripts/run_megapose_training.py @@ -14,7 +14,8 @@ """ -# SPDX-FileCopyrightText: Copyright (c) NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-FileCopyrightText: Copyright (c) NVIDIA CORPORATION & AFFILIATES. +# All rights reserved. # SPDX-License-Identifier: MIT # # Permission is hereby granted, free of charge, to any person obtaining a diff --git a/happypose/pose_estimators/megapose/training/megapose_forward_loss.py b/happypose/pose_estimators/megapose/training/megapose_forward_loss.py index e7086b2e..2b3548ff 100644 --- a/happypose/pose_estimators/megapose/training/megapose_forward_loss.py +++ b/happypose/pose_estimators/megapose/training/megapose_forward_loss.py @@ -344,7 +344,10 @@ def add_mask_to_image( if cfg.predict_rendered_views_logits: assert is_hypothesis_positive is not None is_positive = is_hypothesis_positive[batch_idx, init_idx] - f.title.text = f"idx={batch_idx},view={view_idx},init={init_idx},target={is_positive}" + f.title.text = ( + f"idx={batch_idx},view={view_idx},init={init_idx}," + f"target={is_positive}" + ) row.append(f) if n_channels == 6: diff --git a/happypose/pose_estimators/megapose/training/train_megapose.py b/happypose/pose_estimators/megapose/training/train_megapose.py index 8e4e48cf..72fe673c 100644 --- a/happypose/pose_estimators/megapose/training/train_megapose.py +++ b/happypose/pose_estimators/megapose/training/train_megapose.py @@ -257,7 +257,8 @@ def make_iterable_scene_dataset( ckpt = torch.load(ckpt_path) except EOFError: print( - "Unable to load checkpoint.pth.tar. Falling back to checkpoint_epoch=last.pth.tar", + "Unable to load checkpoint.pth.tar. " + "Falling back to checkpoint_epoch=last.pth.tar", ) ckpt_path = resume_run_dir / "checkpoint_epoch=last.pth.tar" ckpt = torch.load(ckpt_path) diff --git a/happypose/pose_estimators/megapose/training/training_config.py b/happypose/pose_estimators/megapose/training/training_config.py index c4401f82..ddc7d9bf 100644 --- a/happypose/pose_estimators/megapose/training/training_config.py +++ b/happypose/pose_estimators/megapose/training/training_config.py @@ -48,7 +48,8 @@ class TrainingConfig(omegaconf.dictconfig.DictConfig): 2. If `run_id` is None, then use `config_id`, `run_comment`and `run_postfix` to create a `run_id`. - In 2., the parameters of the config are set-up using the function `update_cfg_with_config_id`. + In 2., the parameters of the config are set-up using the function + `update_cfg_with_config_id`. """ # Datasets diff --git a/happypose/pose_estimators/megapose/training/utils.py b/happypose/pose_estimators/megapose/training/utils.py index 0e152e99..68753d56 100644 --- a/happypose/pose_estimators/megapose/training/utils.py +++ b/happypose/pose_estimators/megapose/training/utils.py @@ -93,15 +93,19 @@ def cast_raw_numpy_images_to_tensor(images): """ B, H, W, C = images.shape - assert C in [ - 3, - 4, - ], f"images must have shape [B,H,W,C] with C=3 (rgb) or C=4 (rgbd), encountered C={C}" + msg = ( + f"images must have shape [B,H,W,C] with C=3 (rgb) or C=4 (rgbd), " + f"encountered C={C}" + ) + assert C in [3, 4], msg images = torch.as_tensor(images) max_rgb = torch.max(images[:, RGB_DIMS]) if max_rgb < 1.5: - msg = "You are about to divide by 255 but the max rgb pixel value is less than 1.5" + msg = ( + "You are about to divide by 255 " + "but the max rgb pixel value is less than 1.5" + ) raise Warning(msg) # [B,C,H,W] diff --git a/happypose/toolbox/datasets/datasets_cfg.py b/happypose/toolbox/datasets/datasets_cfg.py index 920f4c1a..452323aa 100644 --- a/happypose/toolbox/datasets/datasets_cfg.py +++ b/happypose/toolbox/datasets/datasets_cfg.py @@ -224,7 +224,7 @@ def make_scene_dataset( # Synthetic datasets elif "synthetic." in ds_name: - from happypose.pose_estimators.cosypose.cosypose.datasets.synthetic_dataset import ( + from happypose.pose_estimators.cosypose.cosypose.datasets.synthetic_dataset import ( # noqa: E501 SyntheticSceneDataset, ) diff --git a/happypose/toolbox/datasets/deepim_modelnet.py b/happypose/toolbox/datasets/deepim_modelnet.py index b78a9e18..2a9c87ca 100644 --- a/happypose/toolbox/datasets/deepim_modelnet.py +++ b/happypose/toolbox/datasets/deepim_modelnet.py @@ -56,38 +56,31 @@ def __init__( n_images_per_object: int = 50, load_depth: bool = False, ): + data_dir = modelnet_dir / "modelnet_render_v1" / "data" self.test_template_im = ( - modelnet_dir - / "modelnet_render_v1/data/real/{category}/{split}/{obj_id}_{im_id:04d}-color.png" + data_dir / "real/{category}/{split}/{obj_id}_{im_id:04d}-color.png" ) self.test_template_depth = ( - modelnet_dir - / "modelnet_render_v1/data/real/{category}/{split}/{obj_id}_{im_id:04d}-depth.png" + data_dir / "real/{category}/{split}/{obj_id}_{im_id:04d}-depth.png" ) self.test_template_label = ( - modelnet_dir - / "modelnet_render_v1/data/real/{category}/{split}/{obj_id}_{im_id:04d}-label.png" + data_dir / "real/{category}/{split}/{obj_id}_{im_id:04d}-label.png" ) self.test_template_pose = ( - modelnet_dir - / "modelnet_render_v1/data/real/{category}/{split}/{obj_id}_{im_id:04d}-pose.txt" + data_dir / "real/{category}/{split}/{obj_id}_{im_id:04d}-pose.txt" ) self.init_template_im = ( - modelnet_dir - / "modelnet_render_v1/data/rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-color.png" + data_dir / "rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-color.png" ) self.init_template_depth = ( - modelnet_dir - / "modelnet_render_v1/data/rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-depth.png" + data_dir / "rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-depth.png" ) self.init_template_label = ( - modelnet_dir - / "modelnet_render_v1/data/rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-label.png" + data_dir / "rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-label.png" ) self.init_template_pose = ( - modelnet_dir - / "modelnet_render_v1/data/rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-pose.txt" + data_dir / "rendered/{category}/{split}/{obj_id}_{im_id:04d}_0-pose.txt" ) object_ids = ( diff --git a/happypose/toolbox/datasets/object_dataset.py b/happypose/toolbox/datasets/object_dataset.py index 048568c1..ba591430 100644 --- a/happypose/toolbox/datasets/object_dataset.py +++ b/happypose/toolbox/datasets/object_dataset.py @@ -48,27 +48,32 @@ def __init__( ---- label (str): A unique label to identify an object. mesh_path (Path): Path to a mesh. Multiple object types are supported. - Please refer to downstream usage of this class for the supported formats. - For example, when a `RigidObjectDataset`is passed to a `Panda3dSceneRenderer`, - the user must ensure that the mesh can be loaded correctly. + Please refer to downstream usage of this class for the supported + formats. + For example, when a `RigidObjectDataset`is passed to a + `Panda3dSceneRenderer`, the user must ensure that the mesh can be loaded + correctly. category (Optional[str], optional): Can be used to identify the object - as one of a known category, e.g. mug or shoes. In the general case, an - object does not need to belong to a category. The notion of category can also - ambiguous. In this codebase, this is only used to parse the categories of the - ShapeNet dataset in order to remove the instances that overlap with the test - categories of the ModelNet dataset. + as one of a known category, e.g. mug or shoes. In the general case, an + object does not need to belong to a category. The notion of category can + also ambiguous. In this codebase, this is only used to parse the + categories of the ShapeNet dataset in order to remove the instances that + overlap with the test categories of the ModelNet dataset. mesh_diameter (Optional[float], optional): Diameter of the object, expressed the in unit of the meshes. - This is useful for computing error some metrics like ADD<0.1d or ADD-S<0.1d. - mesh_units (str, optional): Units in which the vertex positions are expressed. - Can be `m`or `mm`, defaults to `m`. In the operations of this codebase, - all mesh coordinates and poses must be expressed in meters. - When an object is loaded, a scaling will be applied to the mesh + This is useful for computing error some metrics like ADD<0.1d or + ADD-S<0.1d. + mesh_units (str, optional): Units in which the vertex positions are + expressed. Can be `m`or `mm`, defaults to `m`. In the operations of + this codebase, all mesh coordinates and poses must be expressed in + meters. When an object is loaded, a scaling will be applied to the mesh to ensure its coordinates are in meters when in memory. symmetries_discrete (List[ContinuousSymmetry], optional): - See https://github.com/thodan/bop_toolkit/blob/master/bop_toolkit_lib/misc.py + See https://github.com/thodan/bop_toolkit/blob/master/ + bop_toolkit_lib/misc.py symmetries_continuous (List[DiscreteSymmetry], optional): - See https://github.com/thodan/bop_toolkit/blob/master/bop_toolkit_lib/misc.py + See https://github.com/thodan/bop_toolkit/blob/master/ + bop_toolkit_lib/misc.py ypr_offset_deg (np.ndarray, optional): A rotation offset applied to the mesh **only when loaded in Panda3D**. This can be useful to correct some mesh conventions where axes are flipped. @@ -80,7 +85,8 @@ def __init__( For example, if you have a mesh with coordinates expressed in `mm` which you want to resize to 10% of its size, you should pass `mesh_units=mm`and `scaling_factor=0.1`. - Note that `mesh_units=m` and `scaling_factor=100` would be strictly equivalent. + Note that `mesh_units=m` and `scaling_factor=100` would be strictly + equivalent. scaling_factor_mesh_units_to_meters (float, optional): Can be used instead of the mesh_units argument. This is the scale that converts mesh units to meters. diff --git a/happypose/toolbox/datasets/pose_dataset.py b/happypose/toolbox/datasets/pose_dataset.py index eb49e94a..1b330565 100644 --- a/happypose/toolbox/datasets/pose_dataset.py +++ b/happypose/toolbox/datasets/pose_dataset.py @@ -248,7 +248,8 @@ def make_data_from_obs(self, obs: SceneObservation) -> Union[PoseData, None]: The object satisfies the constraints: 1. The visible 2D area is superior or equal to min_area 2. if `keep_objects_set` isn't None, the object must belong to this set - If there are no objects that satisfy this condition in the observation, returns None. + If there are no objects that satisfy this condition in the observation, + returns None. """ obs = remove_invisible_objects(obs) diff --git a/happypose/toolbox/datasets/scene_dataset.py b/happypose/toolbox/datasets/scene_dataset.py index 6b541b2f..0fa9fb96 100644 --- a/happypose/toolbox/datasets/scene_dataset.py +++ b/happypose/toolbox/datasets/scene_dataset.py @@ -52,7 +52,8 @@ tensors: K: [B,3,3] camera intrinsics poses: [B,4,4] object to camera transform - poses_init [Optional]: [B,4,4] object to camera transform. Used if the dataset has initial estimates (ModelNet) + poses_init [Optional]: [B,4,4] object to camera transform. Used if the dataset has + initial estimates (ModelNet) TCO: same as poses bboxes: [B,4] bounding boxes for objects masks: (optional) @@ -70,14 +71,16 @@ def transform_to_list(T: Transform) -> ListPose: @dataclass class ObjectData: - # NOTE (Yann): bbox_amodal, bbox_modal, visib_fract should be moved to SceneObservation + # NOTE (Yann): bbox_amodal, bbox_modal, visib_fract should be moved to + # SceneObservation label: str TWO: Transform | None = None unique_id: int | None = None bbox_amodal: np.ndarray | None = None # (4, ) array [xmin, ymin, xmax, ymax] bbox_modal: np.ndarray | None = None # (4, ) array [xmin, ymin, xmax, ymax] visib_fract: float | None = None - TWO_init: Transform | None = None # Some pose estimation datasets (ModelNet) provide an initial pose estimate + TWO_init: Transform | None = None + # Some pose estimation datasets (ModelNet) provide an initial pose estimate # NOTE: This should be loaded externally def to_json(self) -> dict[str, SingleDataJsonType]: @@ -124,7 +127,8 @@ class CameraData: resolution: Resolution | None = None TWC: Transform | None = None camera_id: str | None = None - TWC_init: Transform | None = None # Some pose estimation datasets (ModelNet) provide an initial pose estimate + TWC_init: Transform | None = None + # Some pose estimation datasets (ModelNet) provide an initial pose estimate # NOTE: This should be loaded externally def to_json(self) -> str: @@ -191,7 +195,8 @@ class SceneObservation: rgb: np.ndarray | None = None # (h,w,3) uint8 numpy array depth: np.ndarray | None = None # (h, w), np.float32 segmentation: np.ndarray | None = None # (h, w), np.uint32 (important); - # contains objects unique ids. int64 are not handled and can be dangerous when used with PIL + # contains objects unique ids. int64 are not handled and can be dangerous when used + # with PIL infos: ObservationInfos | None = None object_datas: list[ObjectData] | None = None camera_data: CameraData | None = None @@ -400,8 +405,10 @@ def __init__( Args: ---- - frame_index (pd.DataFrame): Must contain the following columns: scene_id, view_id - load_depth (bool, optional): Whether to load depth images. Defaults to False. + frame_index (pd.DataFrame): Must contain the following columns: + scene_id, view_id + load_depth (bool, optional): Whether to load depth images. + Defaults to False. load_segmentation (bool, optional): Whether to load image segmentation. Defaults to True. Defaults to f'{label}'. diff --git a/happypose/toolbox/datasets/shapenet_object_dataset.py b/happypose/toolbox/datasets/shapenet_object_dataset.py index 52d0df9d..2e626a9c 100644 --- a/happypose/toolbox/datasets/shapenet_object_dataset.py +++ b/happypose/toolbox/datasets/shapenet_object_dataset.py @@ -94,7 +94,7 @@ def get_descendants(synset): if len(synset.children) == 0: return synset.models else: - return sum([get_descendants(child) for child in children]) + return sum([get_descendants(child) for child in synset.children]) for synset in synset_id_to_synset.values(): synset.models_descendants = get_descendants(synset) diff --git a/happypose/toolbox/lib3d/cosypose_ops.py b/happypose/toolbox/lib3d/cosypose_ops.py index c0d6731c..9eb1c82b 100644 --- a/happypose/toolbox/lib3d/cosypose_ops.py +++ b/happypose/toolbox/lib3d/cosypose_ops.py @@ -18,6 +18,7 @@ import torch # Local Folder +from .camera_geometry import project_points from .rotations import compute_rotation_matrix_from_ortho6d from .transform_ops import invert_transform_matrices, transform_pts @@ -284,6 +285,7 @@ def TCO_init_from_boxes_zup_autodepth(boxes_2d, model_points_3d, K): def TCO_init_from_boxes_v3(layer, boxes, K): # TODO: Clean these 2 functions + # TODO: F821 Undefined name `_TCO_init_from_boxes_v2` # MegaPose from happypose.pose_estimators.megapose.math_utils.meshes import get_T_offset @@ -301,7 +303,7 @@ def TCO_init_from_boxes_v3(layer, boxes, K): .to(boxes.device) .to(boxes.dtype) ) - TCO = _TCO_init_from_boxes_v2(z, boxes, K) + TCO = _TCO_init_from_boxes_v2(z, boxes, K) # noqa: F821 pts2d = project_points(pts, K, TCO) deltax = pts2d[..., 0].max() - pts2d[..., 0].min() deltay = pts2d[..., 1].max() - pts2d[..., 1].min() @@ -313,7 +315,7 @@ def TCO_init_from_boxes_v3(layer, boxes, K): ratio_y = deltay / bb_deltay z2 = z * (ratio_y.unsqueeze(1) + ratio_x.unsqueeze(1)) / 2 - TCO = _TCO_init_from_boxes_v2(z2, boxes, K) + TCO = _TCO_init_from_boxes_v2(z2, boxes, K) # noqa: F821 return TCO diff --git a/happypose/toolbox/lib3d/rigid_mesh_database.py b/happypose/toolbox/lib3d/rigid_mesh_database.py index 1ea4b355..ed26cc3b 100644 --- a/happypose/toolbox/lib3d/rigid_mesh_database.py +++ b/happypose/toolbox/lib3d/rigid_mesh_database.py @@ -55,7 +55,7 @@ def __init__(self, obj_list: list[RigidObject]): self.obj_list = obj_list self.infos = {obj.label: {} for obj in obj_list} self.meshes = { - l: as_mesh( + label: as_mesh( trimesh.load( obj.mesh_path, group_material=False, @@ -64,7 +64,7 @@ def __init__(self, obj_list: list[RigidObject]): maintain_order=True, ), ) - for l, obj in self.obj_dict.items() + for label, obj in self.obj_dict.items() } for label, obj in self.obj_dict.items(): @@ -145,9 +145,9 @@ def n_sym_mapping(self): return {label: obj["n_sym"] for label, obj in self.infos.items()} def select(self, labels): - ids = [self.label_to_id[l] for l in labels] + ids = [self.label_to_id[label] for label in labels] return Meshes( - infos=[self.infos[l] for l in labels], + infos=[self.infos[label] for label in labels], labels=self.labels[ids], points=self.points[ids], symmetries=self.symmetries[ids], diff --git a/happypose/toolbox/renderer/geometry.py b/happypose/toolbox/renderer/geometry.py index 08a65106..7135c8e2 100644 --- a/happypose/toolbox/renderer/geometry.py +++ b/happypose/toolbox/renderer/geometry.py @@ -11,10 +11,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -""" - -""" https://github.com/ikalevatykh/panda3d_viewer/blob/master/panda3d_viewer/geometry.py """ diff --git a/happypose/toolbox/renderer/utils.py b/happypose/toolbox/renderer/utils.py index 378e6923..99cc846f 100644 --- a/happypose/toolbox/renderer/utils.py +++ b/happypose/toolbox/renderer/utils.py @@ -20,6 +20,7 @@ import numpy as np import numpy.typing as npt import panda3d as p3d +from panda3d.core import AntialiasAttrib, GeomNode, Material, NodePath # MegaPose from happypose.toolbox.lib3d.transform import Transform @@ -90,7 +91,7 @@ def make_cube_node(scale, color=(1, 0, 0, 1)): node.set_color(color) node.set_render_mode_thickness(4) node.set_antialias(AntialiasAttrib.MLine) - node.set_material(Material(), 1) + node.setMaterial(Material(), 1) return node @@ -129,7 +130,7 @@ def show_node_center(node, radius=None): else: radius = bounds.get_radius() sphere_node.set_scale(radius * 0.1) - set_material(sphere_node, (1, 1, 1, 1)) + sphere_node.set_material(sphere_node, (1, 1, 1, 1)) sphere_node.reparentTo(node) sphere_node.setPos(0, 0, 0) diff --git a/happypose/toolbox/utils/download.py b/happypose/toolbox/utils/download.py index f57605f1..4b08193d 100755 --- a/happypose/toolbox/utils/download.py +++ b/happypose/toolbox/utils/download.py @@ -346,7 +346,7 @@ async def download_dir(self, download_path, local_path, flags): logger.error(f"Failed {download_path} with timeout") return if r.status_code != 200: - logger.error(f"Failed {download_path} with code {res.status_code}") + logger.error(f"Failed {download_path} with code {r.status_code}") return Path(local_path).mkdir(parents=True, exist_ok=True) soup = BeautifulSoup(r.content, "html.parser") @@ -372,7 +372,7 @@ async def download_file(self, download_path, local_path): logger.error(f"Failed {download_path} with timeout") return if r.status_code != 200: - logger.error(f"Failed {download_path} with code {res.status_code}") + logger.error(f"Failed {download_path} with code {r.status_code}") return logger.info(f"Copying {download_path} to {local_path}") Path(local_path.parent).mkdir(parents=True, exist_ok=True) diff --git a/happypose/toolbox/utils/logs_bokeh.py b/happypose/toolbox/utils/logs_bokeh.py index b9e951f5..03d1facf 100644 --- a/happypose/toolbox/utils/logs_bokeh.py +++ b/happypose/toolbox/utils/logs_bokeh.py @@ -25,6 +25,7 @@ import numpy as np import pandas as pd import seaborn as sns +import yaml from bokeh.io import output_notebook, show from bokeh.layouts import gridplot from bokeh.models import HoverTool @@ -67,7 +68,7 @@ def load_logs(self, run_ids): cfg_path = run_dir / "config.yaml" try: config = OmegaConf.load(cfg_path) - except: + except Exception: config = yaml.load(cfg_path.read_text(), Loader=yaml.UnsafeLoader) config = vars(config) configs[run_id] = self.fill_config_fn(config) diff --git a/happypose/toolbox/utils/webdataset.py b/happypose/toolbox/utils/webdataset.py index 908dd00d..3bccc167 100644 --- a/happypose/toolbox/utils/webdataset.py +++ b/happypose/toolbox/utils/webdataset.py @@ -53,7 +53,8 @@ def group_by_keys(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=N current_sample = {"__key__": prefix, "__url__": filesample["__url__"]} if suffix in current_sample: print( - f"{fname}: duplicate file name in tar file {suffix} {current_sample.keys()}", + f"{fname}: duplicate file name in tar file {suffix} " + f"{current_sample.keys()}", ) current_sample["__bad__"] = True if suffixes is None or suffix in suffixes: diff --git a/happypose/toolbox/visualization/meshcat_utils.py b/happypose/toolbox/visualization/meshcat_utils.py index f11d6a9e..428f394f 100644 --- a/happypose/toolbox/visualization/meshcat_utils.py +++ b/happypose/toolbox/visualization/meshcat_utils.py @@ -30,9 +30,9 @@ def isRotationMatrix(M, tol=1e-4): """Checks if something is a valid rotation matrix.""" tag = False - I = np.identity(M.shape[0]) + img = np.identity(M.shape[0]) - if (np.linalg.norm(np.matmul(M, M.T) - I) < tol) and ( + if (np.linalg.norm(np.matmul(M, M.T) - img) < tol) and ( np.abs(np.linalg.det(M) - 1) < tol ): tag = True @@ -134,8 +134,8 @@ def visualize_scene(vis, object_dict, randomize_color=True): def create_visualizer(clear=True, zmq_url="tcp://127.0.0.1:6000"): print( - "Waiting for meshcat server... have you started a server? Run `meshcat-server` to start a" - f" server. Communicating on zmq_url={zmq_url}", + "Waiting for meshcat server... have you started a server? Run `meshcat-server`" + f" to start a server. Communicating on zmq_url={zmq_url}", ) vis = meshcat.Visualizer(zmq_url=zmq_url) if clear: diff --git a/pyproject.toml b/pyproject.toml index 522345a6..3fd1cf85 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -103,7 +103,7 @@ changelog = "https://github.com/agimus-project/happypose/blob/main/CHANGELOG.md" [tool.ruff] extend-ignore = ["D203", "D213"] -extend-select = ["A", "B", "C", "COM", "D", "EM", "EXE", "G", "N", "PTH", "RET", "RUF", "UP", "W", "YTT"] +# extend-select = ["A", "B", "C", "COM", "D", "EM", "EXE", "G", "N", "PTH", "RET", "RUF", "UP", "W", "YTT"] target-version = "py39" [tool.tomlsort]