From 400d14de96c7f5ba385bed0dc756ef61c9f3c220 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 25 Mar 2024 10:04:50 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- .../cosypose/datasets/detection_dataset.py | 51 ++++++-------- .../cosypose/datasets/synthetic_dataset.py | 31 ++++----- .../cosypose/evaluation/prediction_runner.py | 8 +-- .../cosypose/evaluation/runner_utils.py | 2 +- .../cosypose/cosypose/models/pose.py | 2 +- .../cosypose/scripts/run_cosypose_eval.py | 8 +-- .../scripts/run_inference_on_example.py | 4 +- .../cosypose/scripts/run_pose_training.py | 2 + .../cosypose/training/pose_forward_loss.py | 1 - .../cosypose/training/train_detector.py | 35 ++++------ .../cosypose/cosypose/training/train_pose.py | 45 +++++------- happypose/toolbox/datasets/augmentations.py | 3 +- .../toolbox/datasets/bop_scene_dataset.py | 1 - happypose/toolbox/datasets/scene_dataset.py | 9 ++- .../datasets/scene_dataset_wrappers.py | 1 - pyproject.toml | 2 +- tests/config/test_config.py | 4 +- tests/test_batch_renderer_panda3d.py | 69 +++++++++++-------- tests/test_cosypose_inference.py | 36 +++++----- tests/test_megapose_inference.py | 19 +++-- tests/test_renderer_bullet.py | 47 ++++++++----- tests/test_scene_renderer_panda3d.py | 38 +++++----- 22 files changed, 215 insertions(+), 203 deletions(-) diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/detection_dataset.py b/happypose/pose_estimators/cosypose/cosypose/datasets/detection_dataset.py index f1071033..63c0b29c 100644 --- a/happypose/pose_estimators/cosypose/cosypose/datasets/detection_dataset.py +++ b/happypose/pose_estimators/cosypose/cosypose/datasets/detection_dataset.py @@ -1,6 +1,5 @@ import random from dataclasses import dataclass -from typing import List, Optional import numpy as np import torch @@ -29,20 +28,18 @@ PillowSharpness, VOCBackgroundAugmentation, ) +from happypose.toolbox.datasets.augmentations import ( + SceneObservationAugmentation as SceneObsAug, +) # HappyPose from happypose.toolbox.datasets.scene_dataset import ( IterableSceneDataset, - ObjectData, SceneDataset, SceneObservation, ) - -from happypose.toolbox.datasets.augmentations import ( - SceneObservationAugmentation as SceneObsAug, -) - from happypose.toolbox.datasets.scene_dataset_wrappers import remove_invisible_objects + # from happypose.toolbox.datasets.scene_dataset_wrappers import VisibilityWrapper """ from .augmentations import ( @@ -58,6 +55,7 @@ from .wrappers.visibility_wrapper import VisibilityWrapper """ + def collate_fn(batch): print("inside collate fn") rgbs, targets = zip(*batch) @@ -92,7 +90,8 @@ def collate_fn(batch): # Return the batch data as a dictionary return {"rgbs": rgbs, "targets": target} -#TODO : Double check on types and add format documentation + +# TODO : Double check on types and add format documentation @dataclass class DetectionData: """rgb: (h, w, 3) uint8 @@ -135,6 +134,7 @@ def pin_memory(self) -> "BatchDetectionData": self.depths = self.depths.pin_memory() return self + class DetectionDataset(torch.utils.data.IterableDataset): def __init__( self, @@ -168,13 +168,14 @@ def __init__( SceneObsAug(PillowSharpness(factor_interval=(0.0, 50.0)), p=0.3), SceneObsAug(PillowContrast(factor_interval=(0.2, 50.0)), p=0.3), SceneObsAug(PillowBrightness(factor_interval=(0.1, 6.0)), p=0.5), - SceneObsAug(PillowColor(factor_interval=(0.0, 20.0)), p=0.3),] + SceneObsAug(PillowColor(factor_interval=(0.0, 20.0)), p=0.3), + ] ) - ] self.label_to_category_id = label_to_category_id self.min_area = min_area + """ def collate_fn(self, list_data: List[DetectionData]) -> BatchDetectionData: batch_data = BatchDetectionData( @@ -194,19 +195,19 @@ def collate_fn(self, list_data: List[DetectionData]) -> BatchDetectionData: return batch_data """ + def make_data_from_obs(self, obs: SceneObservation, idx): - obs = remove_invisible_objects(obs) obs = self.resize_augmentation(obs) for aug in self.background_augmentations: obs = aug(obs) - + if self.rgb_augmentations and random.random() < 0.8: for aug in self.rgb_augmentations: obs = aug(obs) - + assert obs.object_datas is not None assert obs.rgb is not None assert obs.camera_data is not None @@ -226,13 +227,13 @@ def make_data_from_obs(self, obs: SceneObservation, idx): if obs.binary_masks is not None: binary_mask = torch.tensor(obs.binary_masks[obj_data.unique_id]).float() masks.append(binary_mask) - + if obs.segmentation is not None: binary_mask = np.zeros_like(obs.segmentation, dtype=np.bool_) binary_mask[obs.segmentation == obj_data.unique_id] = 1 binary_mask = torch.as_tensor(binary_mask).float() masks.append(binary_mask) - + masks = np.array(masks) masks = masks == obj_ids[:, None, None] @@ -258,17 +259,15 @@ def make_data_from_obs(self, obs: SceneObservation, idx): target["image_id"] = image_id target["area"] = area target["iscrowd"] = iscrowd - + return rgb, target - - def __getitem__(self, index: int): assert isinstance(self.scene_ds, SceneDataset) obs = self.scene_ds[index] return self.make_data_from_obs(obs, index) - - # def find_valid_data(self, iterator: Iterator[SceneObservation]) -> PoseData: + + # def find_valid_data(self, iterator: Iterator[SceneObservation]) -> PoseData: def find_valid_data(self, iterator): n_attempts = 0 for idx, obs in enumerate(iterator): @@ -279,17 +278,13 @@ def find_valid_data(self, iterator): if n_attempts > 200: msg = "Cannot find valid image in the dataset" raise ValueError(msg) - + def __iter__(self): assert isinstance(self.scene_ds, IterableSceneDataset) iterator = iter(self.scene_ds) while True: yield self.find_valid_data(iterator) - - - - - + def __init___old( self, scene_ds, @@ -322,10 +317,7 @@ def __init___old( self.label_to_category_id = label_to_category_id self.min_area = min_area - - def get_data_old(self, idx): - print("I am in get_data") rgb, mask, state = self.scene_ds[idx] @@ -393,4 +385,3 @@ def __getitem__old(self, index): try_index = random.randint(0, len(self.scene_ds) - 1) n_attempts += 1 return im, target - diff --git a/happypose/pose_estimators/cosypose/cosypose/datasets/synthetic_dataset.py b/happypose/pose_estimators/cosypose/cosypose/datasets/synthetic_dataset.py index 6c22de59..02db7a30 100644 --- a/happypose/pose_estimators/cosypose/cosypose/datasets/synthetic_dataset.py +++ b/happypose/pose_estimators/cosypose/cosypose/datasets/synthetic_dataset.py @@ -14,7 +14,6 @@ CameraData, ObjectData, ObservationInfos, - SceneDataset, SceneObservation, ) from happypose.toolbox.lib3d.transform import Transform @@ -82,33 +81,33 @@ def __getitem__(self, idx): if obj["id_in_segm"] in mask_uniqs: obj["bbox"] = dets_gt[obj["id_in_segm"]].numpy() obj_data = ObjectData( - label= obj['name'], - TWO=Transform(obj['TWO']), - unique_id=obj['body_id'], - bbox_modal=obj['bbox'] + label=obj["name"], + TWO=Transform(obj["TWO"]), + unique_id=obj["body_id"], + bbox_modal=obj["bbox"], ) else: obj_data = ObjectData( - label= obj['name'], - TWO=Transform(obj['TWO']), - unique_id=obj['body_id'] + label=obj["name"], + TWO=Transform(obj["TWO"]), + unique_id=obj["body_id"], ) object_datas.append(obj_data) - + state = { "camera": cam, "objects": objects, "frame_info": self.frame_index.iloc[idx].to_dict(), } image_infos = ObservationInfos( - scene_id=self.frame_index.iloc[idx].to_dict()['scene_id'], - view_id=self.frame_index.iloc[idx].to_dict()['view_id']) - + scene_id=self.frame_index.iloc[idx].to_dict()["scene_id"], + view_id=self.frame_index.iloc[idx].to_dict()["view_id"], + ) + cam = CameraData( - K=cam["K"], - resolution=cam["resolution"], - TWC=Transform(cam["TWC"])) - + K=cam["K"], resolution=cam["resolution"], TWC=Transform(cam["TWC"]) + ) + observation = SceneObservation( rgb=rgb.numpy().astype(np.uint8), segmentation=mask.numpy().astype(np.uint32), diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py index 1efbbec9..084f0483 100644 --- a/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py +++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/prediction_runner.py @@ -94,10 +94,10 @@ def run_inference_pipeline( """ - if self.inference_cfg['detection_type'] == "gt": + if self.inference_cfg["detection_type"] == "gt": detections = gt_detections run_detector = False - elif self.inference_cfg['detection_type'] == "detector": + elif self.inference_cfg["detection_type"] == "detector": detections = None run_detector = True else: @@ -105,7 +105,7 @@ def run_inference_pipeline( raise ValueError(msg) coarse_estimates = None - if self.inference_cfg['coarse_estimation_type'] == "external": + if self.inference_cfg["coarse_estimation_type"] == "external": # TODO (ylabbe): This is hacky, clean this for modelnet eval. coarse_estimates = initial_estimates coarse_estimates = happypose.toolbox.inference.utils.add_instance_id( @@ -144,7 +144,7 @@ def run_inference_pipeline( "coarse": extra_data["coarse"]["preds"], } - if self.inference_cfg['run_depth_refiner']: + if self.inference_cfg["run_depth_refiner"]: all_preds["depth_refiner"] = extra_data["depth_refiner"]["preds"] # Remove any mask tensors diff --git a/happypose/pose_estimators/cosypose/cosypose/evaluation/runner_utils.py b/happypose/pose_estimators/cosypose/cosypose/evaluation/runner_utils.py index b360431c..4dedbfa8 100644 --- a/happypose/pose_estimators/cosypose/cosypose/evaluation/runner_utils.py +++ b/happypose/pose_estimators/cosypose/cosypose/evaluation/runner_utils.py @@ -14,7 +14,7 @@ def run_pred_eval(pred_runner, pred_kwargs, eval_runner, eval_preds=None): all_predictions = {} for pred_prefix, pred_kwargs_n in pred_kwargs.items(): - pose_predictor = pred_kwargs_n['pose_predictor'] + pose_predictor = pred_kwargs_n["pose_predictor"] preds = pred_runner.get_predictions(pose_predictor) for preds_name, preds_n in preds.items(): all_predictions[f"{pred_prefix}/{preds_name}"] = preds_n diff --git a/happypose/pose_estimators/cosypose/cosypose/models/pose.py b/happypose/pose_estimators/cosypose/cosypose/models/pose.py index 028b68ad..54d8f557 100644 --- a/happypose/pose_estimators/cosypose/cosypose/models/pose.py +++ b/happypose/pose_estimators/cosypose/cosypose/models/pose.py @@ -180,7 +180,7 @@ def forward(self, images, K, labels, TCO, n_iterations=1): K_crop=K_crop, boxes_rend=boxes_rend, boxes_crop=boxes_crop, - model_outputs=model_outputs + model_outputs=model_outputs, ) TCO_input = TCO_output diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py index 243084e0..3817d28d 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_cosypose_eval.py @@ -22,10 +22,6 @@ RESULTS_DIR, ) from happypose.pose_estimators.cosypose.cosypose.datasets.bop import remap_bop_targets -from happypose.toolbox.datasets.datasets_cfg import ( - make_object_dataset, - make_scene_dataset, -) from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import ListSampler from happypose.pose_estimators.cosypose.cosypose.datasets.wrappers.multiview_wrapper import ( MultiViewWrapper, @@ -61,6 +57,10 @@ init_distributed_mode, ) from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger +from happypose.toolbox.datasets.datasets_cfg import ( + make_object_dataset, + make_scene_dataset, +) from happypose.toolbox.lib3d.transform import Transform from happypose.toolbox.renderer.bullet_batch_renderer import BulletBatchRenderer diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py index 7558bae7..c79f4b79 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_inference_on_example.py @@ -85,7 +85,9 @@ def run_inference( rgb, depth, camera_data = load_observation_example(example_dir, load_depth=True) # TODO: cosypose forward does not work if depth is loaded detection # contrary to megapose - observation = ObservationTensor.from_numpy(rgb, depth=None, K=camera_data.K).to(device) + observation = ObservationTensor.from_numpy(rgb, depth=None, K=camera_data.K).to( + device + ) # Load models pose_estimator = setup_pose_estimator(args.dataset, object_dataset) diff --git a/happypose/pose_estimators/cosypose/cosypose/scripts/run_pose_training.py b/happypose/pose_estimators/cosypose/cosypose/scripts/run_pose_training.py index 937ca2e7..bcbea419 100644 --- a/happypose/pose_estimators/cosypose/cosypose/scripts/run_pose_training.py +++ b/happypose/pose_estimators/cosypose/cosypose/scripts/run_pose_training.py @@ -11,8 +11,10 @@ import warnings + warnings.filterwarnings("ignore") + def make_cfg(args): cfg = argparse.ArgumentParser("").parse_args([]) if args.config: diff --git a/happypose/pose_estimators/cosypose/cosypose/training/pose_forward_loss.py b/happypose/pose_estimators/cosypose/cosypose/training/pose_forward_loss.py index c0f49042..c2b460c9 100644 --- a/happypose/pose_estimators/cosypose/cosypose/training/pose_forward_loss.py +++ b/happypose/pose_estimators/cosypose/cosypose/training/pose_forward_loss.py @@ -68,7 +68,6 @@ def h_pose(model, mesh_db, data, meters, cfg, n_iterations=1, input_generator="f TCO_pred = iter_outputs.TCO_output model_outputs = iter_outputs.model_outputs - if cfg.loss_disentangled: if cfg.n_pose_dims == 9: loss_fn = loss_refiner_CO_disentangled diff --git a/happypose/pose_estimators/cosypose/cosypose/training/train_detector.py b/happypose/pose_estimators/cosypose/cosypose/training/train_detector.py index ed754add..1858a8f7 100644 --- a/happypose/pose_estimators/cosypose/cosypose/training/train_detector.py +++ b/happypose/pose_estimators/cosypose/cosypose/training/train_detector.py @@ -10,7 +10,7 @@ import yaml from torch.backends import cudnn from torch.hub import load_state_dict_from_url -from torch.utils.data import ConcatDataset, DataLoader +from torch.utils.data import DataLoader from torchnet.meter import AverageValueMeter from torchvision.models.detection.mask_rcnn import model_urls from tqdm import tqdm @@ -18,9 +18,8 @@ from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR from happypose.pose_estimators.cosypose.cosypose.datasets.detection_dataset import ( DetectionDataset, - collate_fn + collate_fn, ) -from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import PartialSampler from happypose.pose_estimators.cosypose.cosypose.integrated.detector import Detector # Evaluation @@ -35,25 +34,17 @@ sync_model, ) from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger -from happypose.pose_estimators.cosypose.cosypose.utils.multiepoch_dataloader import ( - MultiEpochDataLoader, -) from happypose.toolbox.datasets.datasets_cfg import make_scene_dataset - from happypose.toolbox.datasets.scene_dataset import ( IterableMultiSceneDataset, - IterableSceneDataset, RandomIterableSceneDataset, - SceneDataset, ) - from happypose.toolbox.utils.resources import ( get_cuda_memory, get_gpu_memory, get_total_memory, ) - from .detector_models_cfg import check_update_config, create_model_detector from .maskrcnn_forward_loss import h_maskrcnn @@ -154,7 +145,6 @@ def train_detector(args): args.n_gpus = world_size args.global_batch_size = world_size * args.batch_size logger.info(f"Connection established with {world_size} gpus.") - # Make train/val datasets def make_datasets(dataset_names): @@ -168,9 +158,11 @@ def make_datasets(dataset_names): logger.info(f"Loaded {ds_name} with {len(ds)} images.") pre_label = ds_name.split(".")[0] for idx, label in enumerate(ds.all_labels): - ds.all_labels[idx] = "{pre_label}-{label}".format(pre_label=pre_label, label=label) + ds.all_labels[idx] = "{pre_label}-{label}".format( + pre_label=pre_label, label=label + ) all_labels = all_labels.union(set(ds.all_labels)) - + for _ in range(n_repeat): datasets.append(iterator) return IterableMultiSceneDataset(datasets), all_labels @@ -193,13 +185,13 @@ def make_datasets(dataset_names): "gray_augmentation": args.gray_augmentation, "label_to_category_id": label_to_category_id, } - + print("make datasets") ds_train = DetectionDataset(scene_ds_train, **ds_kwargs) ds_val = DetectionDataset(scene_ds_val, **ds_kwargs) print("make dataloaders") - #train_sampler = PartialSampler(ds_train, epoch_size=args.epoch_size) + # train_sampler = PartialSampler(ds_train, epoch_size=args.epoch_size) ds_iter_train = DataLoader( ds_train, batch_size=args.batch_size, @@ -210,7 +202,7 @@ def make_datasets(dataset_names): ) ds_iter_train = iter(ds_iter_train) - #val_sampler = PartialSampler(ds_val, epoch_size=int(0.1 * args.epoch_size)) + # val_sampler = PartialSampler(ds_val, epoch_size=int(0.1 * args.epoch_size)) ds_iter_val = DataLoader( ds_val, batch_size=args.batch_size, @@ -334,7 +326,9 @@ def train_epoch(): max_norm=np.inf, norm_type=2, ) - meters_train["grad_norm"].add(torch.as_tensor(total_grad_norm).item()) + meters_train["grad_norm"].add( + torch.as_tensor(total_grad_norm).item() + ) optimizer.step() meters_time["backward"].add(time.time() - t) @@ -364,8 +358,8 @@ def validation(): if epoch % args.val_epoch_interval == 0: validation() - #test_dict = None - #if epoch % args.test_epoch_interval == 0: + # test_dict = None + # if epoch % args.test_epoch_interval == 0: # model.eval() # test_dict = run_eval(args, model, epoch) @@ -387,7 +381,6 @@ def validation(): }, ) - for string, meters in zip(("train", "val"), (meters_train, meters_val)): for k in dict(meters).keys(): log_dict[f"{string}_{k}"] = meters[k].mean diff --git a/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py b/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py index e4d1b1de..6b48dd4d 100644 --- a/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py +++ b/happypose/pose_estimators/cosypose/cosypose/training/train_pose.py @@ -8,14 +8,11 @@ import torch.distributed as dist import yaml from torch.backends import cudnn -from torch.utils.data import ConcatDataset, DataLoader +from torch.utils.data import DataLoader from torchnet.meter import AverageValueMeter from tqdm import tqdm from happypose.pose_estimators.cosypose.cosypose.config import EXP_DIR -from happypose.toolbox.datasets.pose_dataset import PoseDataset - -from happypose.pose_estimators.cosypose.cosypose.datasets.samplers import PartialSampler from happypose.pose_estimators.cosypose.cosypose.evaluation.prediction_runner import ( PredictionRunner, ) @@ -27,12 +24,7 @@ from happypose.pose_estimators.cosypose.cosypose.integrated.pose_estimator import ( PoseEstimator, ) -from happypose.pose_estimators.megapose.evaluation.meters.modelnet_meters import ( - ModelNetErrorMeter, -) - from happypose.pose_estimators.cosypose.cosypose.scripts.run_cosypose_eval import ( - get_pose_meters, load_pix2pose_results, load_posecnn_results, ) @@ -47,32 +39,31 @@ sync_model, ) from happypose.pose_estimators.cosypose.cosypose.utils.logging import get_logger -from happypose.pose_estimators.cosypose.cosypose.utils.multiepoch_dataloader import ( - MultiEpochDataLoader, -) from happypose.pose_estimators.megapose.evaluation.evaluation_runner import ( EvaluationRunner, ) +from happypose.pose_estimators.megapose.evaluation.meters.modelnet_meters import ( + ModelNetErrorMeter, +) from happypose.toolbox.datasets.datasets_cfg import ( make_object_dataset, make_scene_dataset, ) -from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase -from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer - -from .pose_forward_loss import h_pose -from .pose_models_cfg import check_update_config, create_pose_model_cosypose - +from happypose.toolbox.datasets.pose_dataset import PoseDataset from happypose.toolbox.datasets.scene_dataset import ( IterableMultiSceneDataset, RandomIterableSceneDataset, ) +from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase +from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer from happypose.toolbox.utils.resources import ( get_cuda_memory, get_gpu_memory, get_total_memory, ) +from .pose_forward_loss import h_pose +from .pose_models_cfg import check_update_config, create_pose_model_cosypose cudnn.benchmark = True logger = get_logger(__name__) @@ -305,18 +296,18 @@ def make_datasets(dataset_names): scene_ds_train = make_datasets(args.train_ds_names) scene_ds_val = make_datasets(args.val_ds_names) - + ds_kwargs = { "resize": args.input_resize, "apply_rgb_augmentation": args.rgb_augmentation, "apply_background_augmentation": args.background_augmentation, "min_area": args.min_area, - "apply_depth_augmentation":False + "apply_depth_augmentation": False, } ds_train = PoseDataset(scene_ds_train, **ds_kwargs) ds_val = PoseDataset(scene_ds_val, **ds_kwargs) - #train_sampler = PartialSampler(ds_train, epoch_size=args.epoch_size) + # train_sampler = PartialSampler(ds_train, epoch_size=args.epoch_size) ds_iter_train = DataLoader( ds_train, batch_size=args.batch_size, @@ -327,7 +318,7 @@ def make_datasets(dataset_names): ) ds_iter_train = iter(ds_iter_train) - #val_sampler = PartialSampler(ds_val, epoch_size=int(0.1 * args.epoch_size)) + # val_sampler = PartialSampler(ds_val, epoch_size=int(0.1 * args.epoch_size)) ds_iter_val = DataLoader( ds_val, batch_size=args.batch_size, @@ -449,7 +440,9 @@ def train_epoch(): max_norm=args.clip_grad_norm, norm_type=2, ) - meters_train["grad_norm"].add(torch.as_tensor(total_grad_norm).item()) + meters_train["grad_norm"].add( + torch.as_tensor(total_grad_norm).item() + ) optimizer.step() meters_time["backward"].add(time.time() - t) @@ -485,8 +478,8 @@ def test(): if epoch % args.val_epoch_interval == 0: validation() - #test_dict = None - #if epoch % args.test_epoch_interval == 0: + # test_dict = None + # if epoch % args.test_epoch_interval == 0: # test_dict = test() print("updating dic") @@ -508,8 +501,6 @@ def test(): }, ) - - print("meters") for string, meters in zip(("train", "val"), (meters_train, meters_val)): for k in dict(meters).keys(): diff --git a/happypose/toolbox/datasets/augmentations.py b/happypose/toolbox/datasets/augmentations.py index a91c1530..6a9c38fd 100644 --- a/happypose/toolbox/datasets/augmentations.py +++ b/happypose/toolbox/datasets/augmentations.py @@ -26,8 +26,7 @@ import PIL import torch from PIL import ImageEnhance, ImageFilter -from torchvision.datasets import ImageFolder, VOCSegmentation - +from torchvision.datasets import VOCSegmentation # HappyPose from happypose.toolbox.datasets.scene_dataset import Resolution, SceneObservation diff --git a/happypose/toolbox/datasets/bop_scene_dataset.py b/happypose/toolbox/datasets/bop_scene_dataset.py index b3c44127..1236ff84 100644 --- a/happypose/toolbox/datasets/bop_scene_dataset.py +++ b/happypose/toolbox/datasets/bop_scene_dataset.py @@ -238,7 +238,6 @@ def __init__( ) models_infos = json.loads((ds_dir / "models" / "models_info.json").read_text()) self.all_labels = [f"obj_{int(obj_id):06d}" for obj_id in models_infos.keys()] - def _load_scene_observation( self, diff --git a/happypose/toolbox/datasets/scene_dataset.py b/happypose/toolbox/datasets/scene_dataset.py index 16d87414..d27d3249 100644 --- a/happypose/toolbox/datasets/scene_dataset.py +++ b/happypose/toolbox/datasets/scene_dataset.py @@ -205,27 +205,26 @@ class SceneObservation: def __iter__(self): masks = [] obs = self - + rgb = obs.rgb for _n, obj_data in enumerate(obs.object_datas): if obs.binary_masks is not None: binary_mask = torch.tensor(obs.binary_masks[obj_data.unique_id]).float() masks.append(binary_mask) - + if obs.segmentation is not None: binary_mask = np.zeros_like(obs.segmentation, dtype=np.bool_) binary_mask[obs.segmentation == obj_data.unique_id] = 1 binary_mask = torch.as_tensor(binary_mask).float() masks.append(binary_mask) - + obs = { "objects": obs.object_datas, "camera": obs.camera_data, - "frame_info": obs.infos + "frame_info": obs.infos, } return iter((rgb, masks, obs)) - @staticmethod def collate_fn( batch: List[SceneObservation], diff --git a/happypose/toolbox/datasets/scene_dataset_wrappers.py b/happypose/toolbox/datasets/scene_dataset_wrappers.py index 9d8bcd62..391a4d50 100644 --- a/happypose/toolbox/datasets/scene_dataset_wrappers.py +++ b/happypose/toolbox/datasets/scene_dataset_wrappers.py @@ -18,7 +18,6 @@ # Third Party import numpy as np -import pandas as pd # Local Folder from happypose.toolbox.datasets.scene_dataset import SceneDataset, SceneObservation diff --git a/pyproject.toml b/pyproject.toml index a957a438..dd632199 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,6 +41,7 @@ plyfile = "^1.0.3" pybind11 = "^2.10.4" pybullet = "^3.2.5" pypng = "^0.20220715.0" +pytest = "^8.1.1" python = ">=3.9,<3.11" pyyaml = "^6.0.1" roma = "^1.4.0" @@ -62,7 +63,6 @@ tqdm = "^4.66.1" transforms3d = "^0.4.1" trimesh = "^3.23.5" webdataset = "^0.2.57" -pytest = "^8.1.1" [tool.poetry.extras] cpu = ["torch", "torchvision"] diff --git a/tests/config/test_config.py b/tests/config/test_config.py index f7201c35..37b41a86 100644 --- a/tests/config/test_config.py +++ b/tests/config/test_config.py @@ -1,8 +1,8 @@ import torch if torch.cuda.is_available(): - DEVICE = ['cuda'] + DEVICE = ["cuda"] # Should be later changed to DEVICE = ['cpu', 'cuda'] # See Issue #146 else: - DEVICE = ['cpu'] + DEVICE = ["cpu"] diff --git a/tests/test_batch_renderer_panda3d.py b/tests/test_batch_renderer_panda3d.py index cdac921b..a9f53c9a 100644 --- a/tests/test_batch_renderer_panda3d.py +++ b/tests/test_batch_renderer_panda3d.py @@ -1,8 +1,7 @@ """Set of unit tests for Panda3D renderer.""" -import unittest +import os from pathlib import Path -from typing import List import numpy as np import pytest @@ -11,26 +10,23 @@ from numpy.testing import assert_equal as np_assert_equal from torch.testing import assert_allclose as tr_assert_allclose -from .config.test_config import DEVICE - from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset from happypose.toolbox.lib3d.transform import Transform from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer -from happypose.toolbox.renderer.panda3d_scene_renderer import Panda3dSceneRenderer from happypose.toolbox.renderer.types import ( - CameraRenderingData, Panda3dCameraData, Panda3dLightData, Panda3dObjectData, ) -import os +from .config.test_config import DEVICE + +os.environ["CUDA_VISIBLE_DEVICES"] = "0" -os.environ["CUDA_VISIBLE_DEVICES"] = "0" -class TestPanda3DBatchRenderer(): +class TestPanda3DBatchRenderer: """Unit tests for Panda3D renderer.""" - + @pytest.fixture(autouse=True) def setUp(self) -> None: self.obj_label = "my_favorite_object_label" @@ -71,12 +67,13 @@ def setUp(self) -> None: self.light_datas = Nb_lights * [ Panda3dLightData(light_type="ambient", color=(1.0, 1.0, 1.0, 1.0)) ] + """ @pytest.mark.order(1) @pytest.mark.parametrize('device', DEVICE) def test_scene_renderer(self, device): """ - #Scene render an example object and check that output image match expectation. + # Scene render an example object and check that output image match expectation. """ SAVEFIG = False @@ -134,7 +131,7 @@ def test_scene_renderer(self, device): # ================================ assert np_assert_equal(rgb[0, 0], (0, 0, 0)) is None assert np_assert_array_less((0, 0, 0), rgb[self.height // 2, self.width // 2]) is None - + assert depth[0, 0] == 0 assert depth[self.height // 2, self.width // 2] < self.z_obj assert np_assert_equal(normals[0, 0], (0, 0, 0)) is None @@ -207,14 +204,15 @@ def test_scene_renderer(self, device): render_binary_mask=True, ) """ + @pytest.mark.order(2) - @pytest.mark.parametrize('device', DEVICE) + @pytest.mark.parametrize("device", DEVICE) def test_batch_renderer(self, device): """ Batch render an example object and check that output image match expectation. """ SAVEFIG = False - + renderer = Panda3dBatchRenderer( asset_dataset=self.asset_dataset, n_workers=4, @@ -228,7 +226,7 @@ def test_batch_renderer(self, device): K = K.unsqueeze(0) TCO = TCO.repeat(self.Nc, 1, 1) K = K.repeat(self.Nc, 1, 1) - + # labels and light_datas need to have same size as TCO/K batch size renderings = renderer.render( labels=self.Nc * [self.obj_label], @@ -240,8 +238,7 @@ def test_batch_renderer(self, device): render_depth=True, render_binary_mask=True, ) - - + assert renderings.rgbs.shape == (self.Nc, 3, self.height, self.width) assert renderings.depths.shape == (self.Nc, 1, self.height, self.width) assert renderings.normals.shape == (self.Nc, 3, self.height, self.width) @@ -256,24 +253,35 @@ def test_batch_renderer(self, device): assert tr_assert_allclose(renderings.rgbs[0], renderings.rgbs[1]) is None assert tr_assert_allclose(renderings.normals[0], renderings.normals[1]) is None assert tr_assert_allclose(renderings.depths[0], renderings.depths[1]) is None - assert tr_assert_allclose(renderings.binary_masks[0], renderings.binary_masks[1]) is None - + assert ( + tr_assert_allclose(renderings.binary_masks[0], renderings.binary_masks[1]) + is None + ) + if device == "cpu": rgb = renderings.rgbs[0].movedim(0, -1).numpy() # (Nc,3,h,w) -> (h,w,3) - normals = renderings.normals[0].movedim(0, -1).numpy() # (Nc,1,h,w) -> (h,w,1) + normals = ( + renderings.normals[0].movedim(0, -1).numpy() + ) # (Nc,1,h,w) -> (h,w,1) depth = renderings.depths[0].movedim(0, -1).numpy() # (Nc,3,h,w) -> (h,w,3) binary_mask = ( renderings.binary_masks[0].movedim(0, -1).numpy() ) # (Nc,1,h,w) -> (h,w,1) - + else: - rgb = renderings.rgbs[0].movedim(0, -1).cpu().numpy() # (Nc,3,h,w) -> (h,w,3) - normals = renderings.normals[0].movedim(0, -1).cpu().numpy() # (Nc,1,h,w) -> (h,w,1) - depth = renderings.depths[0].movedim(0, -1).cpu().numpy() # (Nc,3,h,w) -> (h,w,3) + rgb = ( + renderings.rgbs[0].movedim(0, -1).cpu().numpy() + ) # (Nc,3,h,w) -> (h,w,3) + normals = ( + renderings.normals[0].movedim(0, -1).cpu().numpy() + ) # (Nc,1,h,w) -> (h,w,1) + depth = ( + renderings.depths[0].movedim(0, -1).cpu().numpy() + ) # (Nc,3,h,w) -> (h,w,3) binary_mask = ( renderings.binary_masks[0].movedim(0, -1).cpu().numpy() ) # (Nc,1,h,w) -> (h,w,1) - + if SAVEFIG: import matplotlib.pyplot as plt @@ -293,15 +301,20 @@ def test_batch_renderer(self, device): # ================================ assert np_assert_equal(rgb[0, 0], (0, 0, 0)) is None - assert np_assert_array_less((0, 0, 0), rgb[self.height // 2, self.width // 2]) is None + assert ( + np_assert_array_less((0, 0, 0), rgb[self.height // 2, self.width // 2]) + is None + ) assert depth[0, 0] == 0 assert depth[self.height // 2, self.width // 2] < self.z_obj assert np_assert_equal(normals[0, 0], (0, 0, 0)) is None - assert np_assert_array_less((0, 0, 0), normals[self.height // 2, self.width // 2]) is None + assert ( + np_assert_array_less((0, 0, 0), normals[self.height // 2, self.width // 2]) + is None + ) assert binary_mask[0, 0] == 0 assert binary_mask[self.height // 2, self.width // 2] == 1 - # ================== # Partial renderings # ================== diff --git a/tests/test_cosypose_inference.py b/tests/test_cosypose_inference.py index fd1a1f94..adddb767 100644 --- a/tests/test_cosypose_inference.py +++ b/tests/test_cosypose_inference.py @@ -19,9 +19,11 @@ from happypose.toolbox.inference.types import ObservationTensor from happypose.toolbox.inference.utils import load_detector from happypose.toolbox.lib3d.rigid_mesh_database import MeshDataBase + from .config.test_config import DEVICE -class TestCosyPoseInference(): + +class TestCosyPoseInference: """Unit tests for CosyPose inference example.""" @pytest.fixture(autouse=True) @@ -37,11 +39,13 @@ def setUp(self) -> None: self.refiner_run_id = "refiner-bop-hope-pbr--955392" # TODO : this should be corrected and probably use pytest.parametrize as other tests # however, stacking decorators seems to not work as intended. - self.device = 'cpu' + self.device = "cpu" rgb, depth, camera_data = load_observation_example(data_dir, load_depth=True) # TODO: cosypose forward does not work if depth is loaded detection contrary to megapose - self.observation = ObservationTensor.from_numpy(rgb, depth=None, K=camera_data.K).cpu() + self.observation = ObservationTensor.from_numpy( + rgb, depth=None, K=camera_data.K + ).cpu() self.detector = load_detector( run_id="detector-bop-hope-pbr--15246", device=self.device @@ -58,17 +62,17 @@ def setUp(self) -> None: ] ) mesh_db = MeshDataBase.from_object_ds(self.object_dataset) - self.mesh_db_batched = mesh_db.batched().to('cpu') + self.mesh_db_batched = mesh_db.batched().to("cpu") - @pytest.mark.parametrize('device', DEVICE) + @pytest.mark.parametrize("device", DEVICE) @pytest.mark.order(3) def test_cosypose_pipeline_panda3d(self, device): from happypose.toolbox.renderer.panda3d_batch_renderer import ( Panda3dBatchRenderer, ) - + # This is a trick that should be replaced, see comment line 38 - if device == 'cpu': + if device == "cpu": self.device = device self.observation = self.observation.cpu() self.detector.to(device) @@ -111,24 +115,24 @@ def test_cosypose_pipeline_panda3d(self, device): assert len(preds) == 1 assert preds.infos.label[0] == self.expected_object_label - if device == "cpu": + if device == "cpu": pose = pin.SE3(preds.poses[0].numpy()) else: pose = pin.SE3(preds.poses[0].cpu().numpy()) - + exp_pose = pin.SE3( pin.exp3(np.array([1.4, 1.6, -1.11])), np.array([0.1, 0.07, 0.45]), ) diff = pose.inverse() * exp_pose assert np.linalg.norm(pin.log6(diff).vector) < 0.3 - - @pytest.mark.parametrize('device', DEVICE) + + @pytest.mark.parametrize("device", DEVICE) def test_cosypose_pipeline_bullet(self, device): from happypose.toolbox.renderer.bullet_batch_renderer import BulletBatchRenderer # This is a trick that should be replaced, see comment line 38 - - if device == 'cpu': + + if device == "cpu": self.device = device self.observation = self.observation.cpu() self.detector.to(device) @@ -175,14 +179,14 @@ def test_cosypose_pipeline_bullet(self, device): assert len(preds) == 1 assert preds.infos.label[0] == self.expected_object_label - if device == "cpu": + if device == "cpu": pose = pin.SE3(preds.poses[0].numpy()) else: pose = pin.SE3(preds.poses[0].cpu().numpy()) - + exp_pose = pin.SE3( pin.exp3(np.array([1.4, 1.6, -1.11])), np.array([0.1, 0.07, 0.45]), ) diff = pose.inverse() * exp_pose - assert np.linalg.norm(pin.log6(diff).vector) < 0.3 \ No newline at end of file + assert np.linalg.norm(pin.log6(diff).vector) < 0.3 diff --git a/tests/test_megapose_inference.py b/tests/test_megapose_inference.py index 93f80411..dae34e4e 100644 --- a/tests/test_megapose_inference.py +++ b/tests/test_megapose_inference.py @@ -13,14 +13,15 @@ from happypose.toolbox.inference.types import ObservationTensor from happypose.toolbox.inference.utils import load_detector from happypose.toolbox.utils.load_model import NAMED_MODELS, load_named_model + from .config.test_config import DEVICE class TestMegaPoseInference: """Unit tests for MegaPose inference example.""" - + @pytest.mark.order(1) - @pytest.mark.parametrize('device', DEVICE) + @pytest.mark.parametrize("device", DEVICE) def test_megapose_pipeline(self, device): """Run detector from with coarse and refiner from MegaPose.""" @@ -33,10 +34,14 @@ def test_megapose_pipeline(self, device): rgb, depth, camera_data = load_observation_example(data_dir, load_depth=True) # TODO: cosypose forward does not work if depth is loaded detection contrary to megapose if device == "cpu": - observation = ObservationTensor.from_numpy(rgb, depth=None, K=camera_data.K).cpu() + observation = ObservationTensor.from_numpy( + rgb, depth=None, K=camera_data.K + ).cpu() else: - observation = ObservationTensor.from_numpy(rgb, depth=None, K=camera_data.K).cuda() - + observation = ObservationTensor.from_numpy( + rgb, depth=None, K=camera_data.K + ).cuda() + detector = load_detector(run_id="detector-bop-hope-pbr--15246", device=device) object_dataset = RigidObjectDataset( objects=[ @@ -67,11 +72,11 @@ def test_megapose_pipeline(self, device): assert len(preds) == 1 assert preds.infos.label[0] == expected_object_label - if device == "cpu": + if device == "cpu": pose = pin.SE3(preds.poses[0].numpy()) else: pose = pin.SE3(preds.poses[0].cpu().numpy()) - + exp_pose = pin.SE3( pin.exp3(np.array([1.4, 1.6, -1.11])), np.array([0.1, 0.07, 0.45]), diff --git a/tests/test_renderer_bullet.py b/tests/test_renderer_bullet.py index 2dc78e83..fc09b6db 100644 --- a/tests/test_renderer_bullet.py +++ b/tests/test_renderer_bullet.py @@ -1,24 +1,23 @@ """Set of unit tests for bullet renderer.""" -import unittest from pathlib import Path -import pytest import numpy as np +import pytest import torch from numpy.testing import assert_array_less as np_assert_array_less from numpy.testing import assert_equal as np_assert_equal from torch.testing import assert_allclose as tr_assert_allclose -from .config.test_config import DEVICE - from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset from happypose.toolbox.lib3d.transform import Transform from happypose.toolbox.renderer.bullet_batch_renderer import BulletBatchRenderer from happypose.toolbox.renderer.bullet_scene_renderer import BulletSceneRenderer +from .config.test_config import DEVICE + -class TestBulletRenderer(): +class TestBulletRenderer: """Unit tests for bullet renderer.""" @pytest.fixture(autouse=True) @@ -63,7 +62,7 @@ def setUp(self) -> None: } ] - @pytest.mark.parametrize('device', DEVICE) + @pytest.mark.parametrize("device", DEVICE) def test_scene_renderer(self, device): """ Render an example object and check that output image match expectation. @@ -95,7 +94,7 @@ def test_scene_renderer(self, device): assert rgb.shape == (self.height, self.width, 3) assert depth.shape == (self.height, self.width, 1) assert binary_mask.shape == (self.height, self.width, 1) - + assert rgb.dtype == np.dtype(np.uint8) assert depth.dtype == np.dtype(np.float32) assert binary_mask.dtype == np.dtype(bool) @@ -117,7 +116,10 @@ def test_scene_renderer(self, device): # ================================ assert np_assert_equal(rgb[0, 0], (0, 0, 0)) is None - assert np_assert_array_less((0, 0, 0), rgb[self.height // 2, self.width // 2]) is None + assert ( + np_assert_array_less((0, 0, 0), rgb[self.height // 2, self.width // 2]) + is None + ) assert depth[0, 0] == 0 assert depth[self.height // 2, self.width // 2] < self.z_obj assert binary_mask[0, 0] == 0 @@ -156,7 +158,7 @@ def test_scene_renderer(self, device): assert renderings[0].depth is None assert renderings[0].binary_mask is not None - @pytest.mark.parametrize('device', DEVICE) + @pytest.mark.parametrize("device", DEVICE) def test_batch_renderer(self, device): """ Render an example object and check that output image match expectation. @@ -194,7 +196,7 @@ def test_batch_renderer(self, device): ) assert renderings.normals is None # normals not supported - + assert renderings.rgbs.shape == (self.Nc, 3, self.height, self.width) assert renderings.depths.shape == (self.Nc, 1, self.height, self.width) assert renderings.binary_masks.shape == (self.Nc, 1, self.height, self.width) @@ -206,17 +208,24 @@ def test_batch_renderer(self, device): # Renders from 2 identical cams are equals assert tr_assert_allclose(renderings.rgbs[0], renderings.rgbs[1]) is None assert tr_assert_allclose(renderings.depths[0], renderings.depths[1]) is None - assert tr_assert_allclose(renderings.binary_masks[0], renderings.binary_masks[1]) is None - - if device=="cpu": + assert ( + tr_assert_allclose(renderings.binary_masks[0], renderings.binary_masks[1]) + is None + ) + + if device == "cpu": rgb = renderings.rgbs[0].movedim(0, -1).numpy() # (Nc,3,h,w) -> (h,w,3) depth = renderings.depths[0].movedim(0, -1).numpy() # (Nc,1,h,w) -> (h,w,1) binary_mask = ( renderings.binary_masks[0].movedim(0, -1).numpy() ) # (Nc,1,h,w) -> (h,w,1) else: - rgb = renderings.rgbs[0].movedim(0, -1).cpu().numpy() # (Nc,3,h,w) -> (h,w,3) - depth = renderings.depths[0].movedim(0, -1).cpu().numpy() # (Nc,1,h,w) -> (h,w,1) + rgb = ( + renderings.rgbs[0].movedim(0, -1).cpu().numpy() + ) # (Nc,3,h,w) -> (h,w,3) + depth = ( + renderings.depths[0].movedim(0, -1).cpu().numpy() + ) # (Nc,1,h,w) -> (h,w,1) binary_mask = ( renderings.binary_masks[0].movedim(0, -1).cpu().numpy() ) # (Nc,1,h,w) -> (h,w,1) @@ -238,13 +247,15 @@ def test_batch_renderer(self, device): # ================================ assert np_assert_equal(rgb[0, 0], (0, 0, 0)) is None - assert np_assert_array_less((0, 0, 0), rgb[self.height // 2, self.width // 2]) is None + assert ( + np_assert_array_less((0, 0, 0), rgb[self.height // 2, self.width // 2]) + is None + ) assert depth[0, 0] == 0 assert depth[self.height // 2, self.width // 2] < self.z_obj assert binary_mask[0, 0] == 0 assert binary_mask[self.height // 2, self.width // 2] == 1 - # ================== # Partial renderings # ================== @@ -282,4 +293,4 @@ def test_batch_renderer(self, device): ) assert renderings.rgbs is not None assert renderings.depths is None - assert renderings.binary_masks is not None \ No newline at end of file + assert renderings.binary_masks is not None diff --git a/tests/test_scene_renderer_panda3d.py b/tests/test_scene_renderer_panda3d.py index 410c5ec4..5233e77c 100644 --- a/tests/test_scene_renderer_panda3d.py +++ b/tests/test_scene_renderer_panda3d.py @@ -1,21 +1,17 @@ """Set of unit tests for Panda3D renderer.""" -import unittest +import os from pathlib import Path from typing import List import numpy as np import pytest -import torch from numpy.testing import assert_array_less as np_assert_array_less from numpy.testing import assert_equal as np_assert_equal from torch.testing import assert_allclose as tr_assert_allclose -from .config.test_config import DEVICE - from happypose.toolbox.datasets.object_dataset import RigidObject, RigidObjectDataset from happypose.toolbox.lib3d.transform import Transform -from happypose.toolbox.renderer.panda3d_batch_renderer import Panda3dBatchRenderer from happypose.toolbox.renderer.panda3d_scene_renderer import Panda3dSceneRenderer from happypose.toolbox.renderer.types import ( CameraRenderingData, @@ -24,13 +20,14 @@ Panda3dObjectData, ) -import os +from .config.test_config import DEVICE + +os.environ["CUDA_VISIBLE_DEVICES"] = "0" -os.environ["CUDA_VISIBLE_DEVICES"] = "0" -class TestPanda3DBatchRenderer(): +class TestPanda3DBatchRenderer: """Unit tests for Panda3D renderer.""" - + @pytest.fixture(autouse=True) def setUp(self) -> None: self.obj_label = "my_favorite_object_label" @@ -71,9 +68,9 @@ def setUp(self) -> None: self.light_datas = Nb_lights * [ Panda3dLightData(light_type="ambient", color=(1.0, 1.0, 1.0, 1.0)) ] - + @pytest.mark.order(4) - @pytest.mark.parametrize('device', DEVICE) + @pytest.mark.parametrize("device", DEVICE) def test_scene_renderer(self, device): """ Scene render an example object and check that output image match expectation. @@ -97,7 +94,10 @@ def test_scene_renderer(self, device): assert tr_assert_allclose(renderings[0].rgb, renderings[1].rgb) is None assert tr_assert_allclose(renderings[0].normals, renderings[1].normals) is None assert tr_assert_allclose(renderings[0].depth, renderings[1].depth) is None - assert tr_assert_allclose(renderings[0].binary_mask, renderings[1].binary_mask) is None + assert ( + tr_assert_allclose(renderings[0].binary_mask, renderings[1].binary_mask) + is None + ) rgb = renderings[0].rgb normals = renderings[0].normals @@ -133,12 +133,18 @@ def test_scene_renderer(self, device): # ================================ assert np_assert_equal(rgb[0, 0], (0, 0, 0)) is None - assert np_assert_array_less((0, 0, 0), rgb[self.height // 2, self.width // 2]) is None - + assert ( + np_assert_array_less((0, 0, 0), rgb[self.height // 2, self.width // 2]) + is None + ) + assert depth[0, 0] == 0 assert depth[self.height // 2, self.width // 2] < self.z_obj assert np_assert_equal(normals[0, 0], (0, 0, 0)) is None - assert np_assert_array_less((0, 0, 0), normals[self.height // 2, self.width // 2]) is None + assert ( + np_assert_array_less((0, 0, 0), normals[self.height // 2, self.width // 2]) + is None + ) assert binary_mask[0, 0] == 0 assert binary_mask[self.height // 2, self.width // 2] == 1 @@ -205,4 +211,4 @@ def test_scene_renderer(self, device): render_depth=False, render_normals=False, render_binary_mask=True, - ) \ No newline at end of file + )