From 4f3f351e935ecab415617e598d8ab022b8ac3296 Mon Sep 17 00:00:00 2001 From: Isabelle Bouchard Date: Tue, 19 Mar 2024 10:35:53 -0400 Subject: [PATCH] clean training --- cameras.json | 1 - .../gaussian_renderer/__init__.py | 13 +- gaussian_splatting/optimizer.py | 9 +- gaussian_splatting/scene/gaussian_model.py | 126 +++++- gaussian_splatting/training.py | 424 ++++++------------ gaussian_splatting/utils/general.py | 27 +- input.ply | Bin 3201 -> 0 bytes scripts/render.py | 4 +- scripts/train.py | 10 - 9 files changed, 268 insertions(+), 346 deletions(-) delete mode 100644 cameras.json delete mode 100644 input.ply diff --git a/cameras.json b/cameras.json deleted file mode 100644 index 7d438be36..000000000 --- a/cameras.json +++ /dev/null @@ -1 +0,0 @@ -[{"id": 0, "img_name": "91", "width": 973, "height": 1694, "position": [0.8108296526234662, 0.11267820146747262, 4.93253093820004], "rotation": [[0.98653576584077, -0.0434107256093174, -0.15767907793692232], [0.03562677079476447, 0.9980185458906418, -0.05186246485721734], [0.159618031311324, 0.045546580117516396, 0.9861275744648335]], "fy": 3450.9290725258384, "fx": 2525.538657941052}, {"id": 1, "img_name": "384", "width": 973, "height": 1694, "position": [-0.8108296794706251, -0.1126781973682736, -4.9325307789971635], "rotation": [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], "fy": 3450.9290725258384, "fx": 2525.538657941052}] \ No newline at end of file diff --git a/gaussian_splatting/gaussian_renderer/__init__.py b/gaussian_splatting/gaussian_renderer/__init__.py index 2df6077b5..e6de931a0 100644 --- a/gaussian_splatting/gaussian_renderer/__init__.py +++ b/gaussian_splatting/gaussian_renderer/__init__.py @@ -29,7 +29,6 @@ def render( Background tensor (bg_color) must be on GPU! """ - if bg_color is None: bg_color = torch.tensor([1, 1, 1], dtype=torch.float32, device="cuda") @@ -80,9 +79,9 @@ def render( # Those Gaussians that were frustum culled or had a radius of 0 were not visible. # They will be excluded from value updates used in the splitting criteria. - return { - "render": rendered_image, - "viewspace_points": screenspace_points, - "visibility_filter": radii > 0, - "radii": radii, - } + return ( + rendered_image, + screenspace_points, + radii > 0, + radii, + ) diff --git a/gaussian_splatting/optimizer.py b/gaussian_splatting/optimizer.py index 5e65d6b60..b4f1379fb 100644 --- a/gaussian_splatting/optimizer.py +++ b/gaussian_splatting/optimizer.py @@ -1,4 +1,5 @@ import torch +from torch import nn from gaussian_splatting.utils.general import get_expon_lr_func @@ -78,11 +79,11 @@ def state_dict(self): def load_state_dict(self, state_dict): self._optimizer.load_state_dict(state_dict) - def replace_tensor(self, tensor, name): + def replace_points(self, tensor, name): optimizable_tensors = {} for group in self._optimizer.param_groups: if group["name"] == name: - stored_state = self.optimizer.state.get(group["params"][0], None) + stored_state = self._optimizer.state.get(group["params"][0], None) stored_state["exp_avg"] = torch.zeros_like(tensor) stored_state["exp_avg_sq"] = torch.zeros_like(tensor) @@ -94,7 +95,7 @@ def replace_tensor(self, tensor, name): return optimizable_tensors - def prune(self, mask): + def prune_points(self, mask): optimizable_tensors = {} for group in self._optimizer.param_groups: stored_state = self._optimizer.state.get(group["params"][0], None) @@ -117,7 +118,7 @@ def prune(self, mask): return optimizable_tensors - def cat_tensors(self, tensors_dict): + def concatenate_points(self, tensors_dict): optimizable_tensors = {} for group in self._optimizer.param_groups: assert len(group["params"]) == 1 diff --git a/gaussian_splatting/scene/gaussian_model.py b/gaussian_splatting/scene/gaussian_model.py index 6b7736114..77e81b3c2 100644 --- a/gaussian_splatting/scene/gaussian_model.py +++ b/gaussian_splatting/scene/gaussian_model.py @@ -17,7 +17,8 @@ from simple_knn._C import distCUDA2 from torch import nn -from gaussian_splatting.utils.general import (build_scaling_rotation, +from gaussian_splatting.utils.general import (build_rotation, + build_scaling_rotation, inverse_sigmoid, strip_symmetric) from gaussian_splatting.utils.graphics import BasicPointCloud from gaussian_splatting.utils.sh import RGB2SH @@ -34,7 +35,7 @@ def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation): class GaussianModel: - def __init__(self, sh_degree: int = 3, percent_dense=0.01): + def __init__(self, sh_degree: int = 3): self.active_sh_degree = 0 self.max_sh_degree = sh_degree @@ -49,8 +50,6 @@ def __init__(self, sh_degree: int = 3, percent_dense=0.01): self.xyz_gradient_accum = torch.empty(0) self.denom = torch.empty(0) - self.percent_dense = percent_dense - self.scaling_activation = torch.exp self.scaling_inverse_activation = torch.log self.covariance_activation = build_covariance_from_scaling_rotation @@ -111,8 +110,19 @@ def get_features(self): def get_opacity(self): return self.opacity_activation(self._opacity) - def set_opacity(self, opacity): - self._opacity = opacity + def set_optimizable_tensors(self, optimizable_tensors): + if "xyz" in optimizable_tensors: + self._xyz = optimizable_tensors["xyz"] + if "f_dc" in optimizable_tensors: + self._features_dc = optimizable_tensors["f_dc"] + if "f_rest" in optimizable_tensors: + self._features_rest = optimizable_tensors["f_rest"] + if "opacity" in optimizable_tensors: + self._opacity = optimizable_tensors["opacity"] + if "scaling" in optimizable_tensors: + self._scaling = optimizable_tensors["scaling"] + if "rotation" in optimizable_tensors: + self._rotation = optimizable_tensors["rotation"] def get_covariance(self, scaling_modifier=1): return self.covariance_activation( @@ -167,8 +177,8 @@ def initialize(self, dataset): self._scaling = nn.Parameter(scales.requires_grad_(True)) self._rotation = nn.Parameter(rots.requires_grad_(True)) self._opacity = nn.Parameter(opacities.requires_grad_(True)) - self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") + self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") @@ -303,16 +313,100 @@ def load_ply(self, path): self.active_sh_degree = self.max_sh_degree - def set_optimizable_tensors(self, optimizable_tensors): - self.set_xyz = optimizable_tensors["xyz"] - self._features_dc = optimizable_tensors["f_dc"] - self._features_rest = optimizable_tensors["f_rest"] - self._opacity = optimizable_tensors["opacity"] - self._scaling = optimizable_tensors["scaling"] - self._rotation = optimizable_tensors["rotation"] - - def add_densification_stats(self, viewspace_point_tensor, update_filter): + def reset_opacity(self): + new_opacity = inverse_sigmoid( + torch.min(self._opacity, torch.ones_like(self._opacity) * 0.01) + ) + + return new_opacity + + def split_points(self, gradient_threshold, percent_dense): + gradients = self.xyz_gradient_accum / self.denom + gradients[gradients.isnan()] = 0.0 + + # Extract large Gaussians in over-reconstruction regions with high variance gradients + split_mask = torch.logical_and( + torch.where( + gradients.detach().squeeze() >= gradient_threshold, True, False + ), + torch.max(self.get_scaling, dim=1).values + > percent_dense * self.camera_extent, + ) + + stds = self.get_scaling[split_mask].repeat(2, 1) + means = torch.zeros((stds.size(0), 3), device="cuda") + samples = torch.normal(mean=means, std=stds) + rots = build_rotation(self._rotation[split_mask]).repeat(2, 1, 1) + + new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self._xyz[ + split_mask + ].repeat(2, 1) + new_scaling = self.scaling_inverse_activation( + self.get_scaling[split_mask].repeat(2, 1) / (0.8 * 2) + ) + new_rotation = self._rotation[split_mask].repeat(2, 1) + new_features_dc = self._features_dc[split_mask].repeat(2, 1, 1) + new_features_rest = self._features_rest[split_mask].repeat(2, 1, 1) + new_opacity = self._opacity[split_mask].repeat(2, 1) + + new_points = { + "xyz": new_xyz, + "f_dc": new_features_dc, + "f_rest": new_features_rest, + "opacity": new_opacity, + "scaling": new_scaling, + "rotation": new_rotation, + } + + return new_points, split_mask + + def clone_points(self, gradient_threshold, percent_dense): + gradients = self.xyz_gradient_accum / self.denom + gradients[gradients.isnan()] = 0.0 + + # Extract small Gaussians in under-reconstruction regions with high variance gradients + clone_mask = torch.logical_and( + torch.where( + torch.norm(gradients, dim=-1) >= gradient_threshold, True, False + ), + torch.max(self.get_scaling, dim=1).values + <= percent_dense * self.camera_extent, + ) + + new_xyz = self._xyz[clone_mask] + new_features_dc = self._features_dc[clone_mask] + new_features_rest = self._features_rest[clone_mask] + new_opacity = self._opacity[clone_mask] + new_scaling = self._scaling[clone_mask] + new_rotation = self._rotation[clone_mask] + + new_points = { + "xyz": new_xyz, + "f_dc": new_features_dc, + "f_rest": new_features_rest, + "opacity": new_opacity, + "scaling": new_scaling, + "rotation": new_rotation, + } + + return new_points, clone_mask + + def update_stats(self, viewspace_point_tensor, update_filter, radii): + self.max_radii2D[update_filter] = torch.max( + self.max_radii2D[update_filter], + radii[update_filter], + ) self.xyz_gradient_accum[update_filter] += torch.norm( viewspace_point_tensor.grad[update_filter, :2], dim=-1, keepdim=True ) self.denom[update_filter] += 1 + + def mask_stats(self, mask): + self.max_radii2D = self.max_radii2D[mask] + self.xyz_gradient_accum = self.xyz_gradient_accum[mask] + self.denom = self.denom[mask] + + def reset_stats(self): + self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda") + self.xyz_gradient_accum = torch.zeros((self._xyz.shape[0], 1), device="cuda") + self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda") diff --git a/gaussian_splatting/training.py b/gaussian_splatting/training.py index 37e1e0a79..3dfe2cec7 100644 --- a/gaussian_splatting/training.py +++ b/gaussian_splatting/training.py @@ -19,52 +19,47 @@ class Trainer: def __init__( self, source_path, - model_path="", keep_eval=False, - sh_degree=3, resolution=-1, - testing_iterations=None, - saving_iterations=None, - checkpoint_iterations=None, + sh_degree=3, checkpoint_path=None, ): - self._resolution = resolution + self._model_path = self._prepare_model_path() - if testing_iterations is None: - testing_iterations = [7000, 30000] - self._testing_iterations = testing_iterations + self.dataset = Dataset(source_path, keep_eval=keep_eval, resolution=resolution) + self.dataset.save_scene_info(self._model_path) - if saving_iterations is None: - saving_iterations = [7000, 30000] - self._saving_iterations = saving_iterations + self.gaussian_model = GaussianModel(sh_degree) + self.gaussian_model.initialize(self.dataset) - if checkpoint_iterations is None: - checkpoint_iterations = [] - self._checkpoint_iterations = checkpoint_iterations + self.optimizer = Optimizer(self.gaussian_model) self._checkpoint_path = checkpoint_path - self.iterations = 30000 - self.lambda_dssim = 0.2 - self.densification_interval = 100 - self.opacity_reset_interval = 3000 - self.densify_from_iter = 500 - self.densify_until_iter = 15000 - self.densify_grad_threshold = 0.0002 + self._debug = False - self.dataset = Dataset(source_path, keep_eval=keep_eval, resolution=resolution) - self.dataset.save_scene_info(model_path) + self._iterations = 30000 + self._testing_iterations = [7000, 30000] + self._saving_iterations = [7000, 30000] + self._checkpoint_iterations = [] - self.gaussian_model = GaussianModel(sh_degree) - self.gaussian_model.initialize(self.dataset) + # Loss function + self._lambda_dssim = 0.2 - self.optimizer = Optimizer(self.gaussian_model) + # Densification and pruning + self._opacity_reset_interval = 3000 + self._min_opacity = 0.005 + self._max_screen_size = 20 + self._percent_dense = 0.01 + self._densification_interval = 100 + self._densification_iteration_start = 500 + self._densification_iteration_stop = 15000 + self._densification_grad_threshold = 0.0002 safe_state() def run(self): first_iter = 0 - if self._checkpoint_path: gaussian_model_state_dict, self.optimizer_state_dict, first_iter = ( torch.load(checkpoint_path) @@ -72,46 +67,40 @@ def run(self): self.gaussian_model.load_state_dict(gaussian_model_state_dict) self.optimizer.load_state_dict(optmizer_state_dict) - iter_start = torch.cuda.Event(enable_timing=True) - iter_end = torch.cuda.Event(enable_timing=True) - - viewpoint_stack = None ema_loss_for_log = 0.0 + cameras = None progress_bar = tqdm( - range(first_iter, self.iterations), desc="Training progress" + range(first_iter, self._iterations), desc="Training progress" ) first_iter += 1 - for iteration in range(first_iter, self.iterations + 1): - iter_start.record() - + for iteration in range(first_iter, self._iterations + 1): self.optimizer.update_learning_rate(iteration) # Every 1000 its we increase the levels of SH up to a maximum degree if iteration % 1000 == 0: self.gaussian_model.oneupSHdegree() - # Pick a random Camera - if not viewpoint_stack: - viewpoint_stack = self.dataset.getTrainCameras().copy() - viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack) - 1)) - - render_pkg = render(viewpoint_cam, self.gaussian_model) - image, viewspace_point_tensor, visibility_filter, radii = ( - render_pkg["render"], - render_pkg["viewspace_points"], - render_pkg["visibility_filter"], - render_pkg["radii"], + # Pick a random camera + if not cameras: + cameras = self.dataset.getTrainCameras().copy() + camera = cameras.pop(randint(0, len(cameras) - 1)) + + # Render image + rendered_image, viewspace_point_tensor, visibility_filter, radii = render( + camera, self.gaussian_model ) # Loss - gt_image = viewpoint_cam.original_image.cuda() - Ll1 = l1_loss(image, gt_image) - loss = (1.0 - self.lambda_dssim) * Ll1 + self.lambda_dssim * ( - 1.0 - ssim(image, gt_image) + gt_image = camera.original_image.cuda() + Ll1 = l1_loss(rendered_image, gt_image) + loss = (1.0 - self._lambda_dssim) * Ll1 + self._lambda_dssim * ( + 1.0 - ssim(rendered_image, gt_image) ) - loss.backward() - iter_end.record() + # try: + loss.backward() + # except Exception: + # import pdb; pdb.set_trace() with torch.no_grad(): # Progress bar @@ -119,21 +108,13 @@ def run(self): if iteration % 10 == 0: progress_bar.set_postfix({"Loss": f"{ema_loss_for_log:.{7}f}"}) progress_bar.update(10) - if iteration == self.iterations: + if iteration == self._iterations: progress_bar.close() # Log and save - training_report( - iteration, - Ll1, - loss, - l1_loss, - iter_start.elapsed_time(iter_end), - self._testing_iterations, - self.dataset, - self.gaussian_model, - render, - ) + if iteration in self._testing_iterations: + self._report(iteration) + if iteration in self._saving_iterations: print("\n[ITER {}] Saving Gaussians".format(iteration)) point_cloud_path = os.path.join( @@ -144,43 +125,29 @@ def run(self): ) # Densification - if iteration < self.densify_until_iter: - # Keep track of max radii in image-space for pruning - self.gaussian_model.max_radii2D[visibility_filter] = torch.max( - self.gaussian_model.max_radii2D[visibility_filter], - radii[visibility_filter], - ) - self.gaussian_model.add_densification_stats( - viewspace_point_tensor, visibility_filter + if iteration < self._densification_iteration_stop: + self.gaussian_model.update_stats( + viewspace_point_tensor, visibility_filter, radii ) - # Densify if ( - iteration > self.densify_from_iter - and iteration % self.densification_interval == 0 + iteration >= self._densification_iteration_start + and iteration % self._densification_interval == 0 ): - size_threshold = ( - 20 if iteration > self.opacity_reset_interval else None - ) - self.gaussian_model.densify_and_prune( - self.densify_grad_threshold, - 0.005, - self.dataset.cameras_extent, - size_threshold, + self._densify_and_prune( + iteration > self._opacity_reset_interval ) - # Reset interval - if ( - iteration % self.opacity_reset_interval == 0 - or iteration == self.densify_from_iter - ): - self.reset_opacity() + # Reset opacity interval + if iteration % self._opacity_reset_interval == 0: + self._reset_opacity() # Optimizer step - if iteration < self.iterations: + if iteration < self._iterations: self.optimizer.step() self.optimizer.zero_grad(set_to_none=True) + # Save checkpoint if iteration in self._checkpoint_iterations: print("\n[ITER {}] Saving Checkpoint".format(iteration)) torch.save( @@ -192,214 +159,111 @@ def run(self): self.model_path + "/chkpnt" + str(iteration) + ".pth", ) - def reset_opacity(self): - new_opacity = inverse_sigmoid( - torch.min( - self.gaussian_model.get_opacity, - torch.ones_like(self.gaussian_model.get_opacity) * 0.01, - ) - ) - optimizable_tensors = self.optimizer.replace_tensor(new_opacity, "opacity") - self.gaussian_model.set_opacity = optimizable_tensors["opacity"] + def _prepare_model_path(self): + unique_str = str(uuid.uuid4()) + model_path = os.path.join("./output/", unique_str[0:10]) - def prune_points(self, mask): - valid_points_mask = ~mask - optimizable_tensors = self.optimizer.prune_mask(valid_points_mask) - self.gaussian_model.set_optimizable_tensors(optimizable_tensors) - # TODO - self.gaussian_model.xyz_gradient_accum = self.gaussian_model.xyz_gradient_accum[ - valid_points_mask - ] - self.gaussian_model.denom = self.gaussian_model.denom[valid_points_mask] - self.gaussian_model.max_radii2D = self.gaussian_model.max_radii2D[ - valid_points_mask - ] - - def densification_postfix( - self, - new_xyz, - new_features_dc, - new_features_rest, - new_opacities, - new_scaling, - new_rotation, - ): - d = { - "xyz": new_xyz, - "f_dc": new_features_dc, - "f_rest": new_features_rest, - "opacity": new_opacities, - "scaling": new_scaling, - "rotation": new_rotation, - } + # Set up output folder + print("Output folder: {}".format(model_path)) + os.makedirs(model_path, exist_ok=True) - optimizable_tensors = self.optimizer.cat_tensors(d) - self.gaussian_model.set_optimizable_tensors(optimizable_tensors) + return model_path - # TODO - self.gaussian_model.xyz_gradient_accum = torch.zeros( - (self.gaussian_model.get_xyz.shape[0], 1), deviiblece="cuda" - ) - self.gaussian_model.denom = torch.zeros( - (self.gaussian_model.get_xyz.shape[0], 1), device="cuda" - ) - self.gaussian_model.max_radii2D = torch.zeros( - (self.gaussian_model.get_xyz.shape[0]), device="cuda" - ) + def _report(self, iteration): + # Report test and samples of training set + torch.cuda.empty_cache() + validation_configs = { + "test": self.dataset.getTestCameras(), + "train": [ + self.dataset.getTrainCameras()[ + idx % len(self.dataset.getTrainCameras()) + ] + for idx in range(5, 30, 5) + ], + } - def densify_and_split(self, grads, grad_threshold, scene_extent, N=2): - n_init_points = self.get_xyz.shape[0] - # Extract points that satisfy the gradient condition - padded_grad = torch.zeros((n_init_points), device="cuda") - padded_grad[: grads.shape[0]] = grads.squeeze() - selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False) - selected_pts_mask = torch.logical_and( - selected_pts_mask, - torch.max(self.get_scaling, dim=1).values - > self.percent_dense * scene_extent, - ) + for config_name, cameras in validation_configs: + if not cameras or len(cameras) == 0: + continue - stds = self.get_scaling[selected_pts_mask].repeat(N, 1) - means = torch.zeros((stds.size(0), 3), device="cuda") - samples = torch.normal(mean=means, std=stds) - rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N, 1, 1) - new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[ - selected_pts_mask - ].repeat(N, 1) - new_scaling = self.scaling_inverse_activation( - self.get_scaling[selected_pts_mask].repeat(N, 1) / (0.8 * N) - ) - new_rotation = self._rotation[selected_pts_mask].repeat(N, 1) - new_features_dc = self._features_dc[selected_pts_mask].repeat(N, 1, 1) - new_features_rest = self._features_rest[selected_pts_mask].repeat(N, 1, 1) - new_opacity = self._opacity[selected_pts_mask].repeat(N, 1) - - self.densification_postfix( - new_xyz, - new_features_dc, - new_features_rest, - new_opacity, - new_scaling, - new_rotation, - ) + l1_test, psnr_test = 0.0, 0.0 + for idx, camera in enumerate(cameras): + rendered_image, _, _, _ = render(camera, self.gaussian_model) + gt_image = camera.original_image.to("cuda") - prune_filter = torch.cat( - ( - selected_pts_mask, - torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool), - ) - ) - self.prune_points(prune_filter) + rendered_image = torch.clamp(rendered_image, 0.0, 1.0) + gt_image = torch.clamp(gt, 0.0, 1.0) - def densify_and_clone(self, grads, grad_threshold, scene_extent): - # Extract points that satisfy the gradient condition - selected_pts_mask = torch.where( - torch.norm(grads, dim=-1) >= grad_threshold, True, False - ) - selected_pts_mask = torch.logical_and( - selected_pts_mask, - torch.max(self.get_scaling, dim=1).values - <= self.percent_dense * scene_extent, - ) + l1_test += l1_loss(image, gt_image).mean().double() + psnr_test += psnr(image, gt_image).mean().double() - new_xyz = self._xyz[selected_pts_mask] - new_features_dc = self._features_dc[selected_pts_mask] - new_features_rest = self._features_rest[selected_pts_mask] - new_opacities = self._opacity[selected_pts_mask] - new_scaling = self._scaling[selected_pts_mask] - new_rotation = self._rotation[selected_pts_mask] - - self.densification_postfix( - new_xyz, - new_features_dc, - new_features_rest, - new_opacities, - new_scaling, - new_rotation, - ) + psnr_test /= len(cameras) + l1_test /= len(cameras) - def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size): - grads = self.xyz_gradient_accum / self.denom - grads[grads.isnan()] = 0.0 + print( + f"\n[ITER {iteration}] Evaluating {config_name}: L1 {l1_test} PSNR {psnr_test}" + ) - self.densify_and_clone(grads, max_grad, extent) - self.densify_and_split(grads, max_grad, extent) + torch.cuda.empty_cache() - prune_mask = (self.get_opacity < min_opacity).squeeze() - if max_screen_size: - big_points_vs = self.max_radii2D > max_screen_size - big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent + def _densify_and_prune(self, prune_big_points): + # Clone large gaussian in over-reconstruction areas + self._clone_points() + # Split small gaussians in under-construction areas. + self._split_points() + + # Prune transparent and large gaussians. + prune_mask = (self.gaussian_model.get_opacity < self._min_opacity).squeeze() + if prune_big_points: + big_points_vs = self.gaussian_model.max_radii2D > self._max_screen_size + big_points_ws = ( + self.gaussian_model.get_scaling.max(dim=1).values + > 0.1 * self.gaussian_model.camera_extent + ) prune_mask = torch.logical_or( torch.logical_or(prune_mask, big_points_vs), big_points_ws ) - self.prune_points(prune_mask) + if self._debug: + print(f"Pruning: {prune_mask.sum().item()} points.") + self._prune_points(valid_mask=~prune_mask) torch.cuda.empty_cache() + def _split_points(self): + new_points, split_mask = self.gaussian_model.split_points( + self._densification_grad_threshold, self._percent_dense + ) + self._concatenate_points(new_points) -def prepare_output_and_logger(args): - if not args.model_path: - if os.getenv("OAR_JOB_ID"): - unique_str = os.getenv("OAR_JOB_ID") - else: - unique_str = str(uuid.uuid4()) - args.model_path = os.path.join("./output/", unique_str[0:10]) - - # Set up output folder - print("Output folder: {}".format(args.model_path)) - os.makedirs(args.model_path, exist_ok=True) - with open(os.path.join(args.model_path, "cfg_args"), "w") as cfg_log_f: - cfg_log_f.write(str(Namespace(**vars(args)))) - - -def training_report( - iteration, - Ll1, - loss, - l1_loss, - elapsed, - testing_iterations, - dataset: Dataset, - gaussian_model: GaussianModel, - renderFunc, -): - # Report test and samples of training set - if iteration in testing_iterations: - torch.cuda.empty_cache() - validation_configs = ( - {"name": "test", "cameras": dataset.getTestCameras()}, - { - "name": "train", - "cameras": [ - dataset.getTrainCameras()[idx % len(dataset.getTrainCameras())] - for idx in range(5, 30, 5) - ], - }, + prune_mask = torch.cat( + ( + split_mask, + torch.zeros(2 * split_mask.sum(), device="cuda", dtype=bool), + ) ) + if self._debug: + print(f"Densification: split {split_mask.sum().item()} points.") + self._prune_points(valid_mask=~prune_mask) - for config in validation_configs: - if config["cameras"] and len(config["cameras"]) > 0: - l1_test = 0.0 - psnr_test = 0.0 - for idx, viewpoint in enumerate(config["cameras"]): - image = torch.clamp( - renderFunc(viewpoint, gaussian_model)["render"], - 0.0, - 1.0, - ) - gt_image = torch.clamp( - viewpoint.original_image.to("cuda"), 0.0, 1.0 - ) - l1_test += l1_loss(image, gt_image).mean().double() - psnr_test += psnr(image, gt_image).mean().double() - psnr_test /= len(config["cameras"]) - l1_test /= len(config["cameras"]) - print( - "\n[ITER {}] Evaluating {}: L1 {} PSNR {}".format( - iteration, config["name"], l1_test, psnr_test - ) - ) + def _clone_points(self): + new_points, clone_mask = self.gaussian_model.clone_points( + self._densification_grad_threshold, self._percent_dense + ) + if self._debug: + print(f"Densification: clone {clone_mask.sum().item()} points.") + self._concatenate_points(new_points) - torch.cuda.empty_cache() + def _reset_opacity(self): + new_opacity = self.gaussian_model.reset_opacity() + optimizable_tensors = self.optimizer.replace_points(new_opacity, "opacity") + self.gaussian_model.set_optimizable_tensors(optimizable_tensors) - print("\nTraining complete.") + def _prune_points(self, valid_mask): + optimizable_tensors = self.optimizer.prune_points(valid_mask) + self.gaussian_model.set_optimizable_tensors(optimizable_tensors) + self.gaussian_model.mask_stats(valid_mask) + + def _concatenate_points(self, new_tensors): + optimizable_tensors = self.optimizer.concatenate_points(new_tensors) + self.gaussian_model.set_optimizable_tensors(optimizable_tensors) + self.gaussian_model.reset_stats() diff --git a/gaussian_splatting/utils/general.py b/gaussian_splatting/utils/general.py index 6ae7f15c4..f1181ba90 100644 --- a/gaussian_splatting/utils/general.py +++ b/gaussian_splatting/utils/general.py @@ -120,32 +120,7 @@ def build_scaling_rotation(s, r): return L -def safe_state(silent=True, seed=0): - old_f = sys.stdout - - class F: - def __init__(self, silent): - self.silent = silent - - def write(self, x): - if not self.silent: - if x.endswith("\n"): - old_f.write( - x.replace( - "\n", - " [{}]\n".format( - str(datetime.now().strftime("%d/%m %H:%M:%S")) - ), - ) - ) - else: - old_f.write(x) - - def flush(self): - old_f.flush() - - sys.stdout = F(silent) - +def safe_state(seed=0): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) diff --git a/input.ply b/input.ply deleted file mode 100644 index 49bbd4393f89c5a22a749f135f39dd6319d4924e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 3201 zcmaJ>dr*^i7G=a8XYtYcXzggRp;8?RwJI$|TcD_56%?TxQ7IrX;UUQv2?->Cyn-fy zNC=MvkR&_|1mzimNCdybS}Nmoi?uCvT5Y%Nx~+xvvDn&DZRW2$^+K{2zCHM79{iNpMf#5}>09ZF7+a3li$|Aqc2B}oJVW@eEm zlnLA(j}V_MNZ<=3Zd2cXqPx-)YX*Xe&$TGXt{?HsQhI!yTflSBqpIuP_&of%Al=Km(R-=7n zSHW!8;X^&0tEp78l1aemV<_iFKVg}Q@)_@PJQbwFJsB0OR%={ZJJ@y82JDQWke1>9 z&Akg!QIS6%&MBvr|0$r+XIb6*HYc~?o)@Rf}_**MnpY3fOc=nYB-@Q z_o~O);z9U&P^dCLul?Ko=rq;g)y>ofFEscUg9YIuW(>yD&6i~=CZ??j(l*2O6csE; zWnNJ#;k?v~6=Dh*vgDHRrEm#5hqo8gV4uvD*X()&LjFaJyg;Fx*aJHjKZh;uTKEOi z%omA-R}&hbcz6MV4EiX>qN0$eP9dZp_^S_vF7@^=cnvwba z2dr0ANZnVBjRO=?iAAr3mLO-U3hoPNFJ@n~`)8T(Emgy*pzmd`E&s>AD%i<6$FMPy zhOIPHEP5dy-`tGGKey0`Ub77E;;KDe^1oxL?lN3K^TZyC!+$X+hAlhk}yL6zSo8-C3V zWPMVp;5D0Vl--NCrNg+FNz+@H$a7z-hiU#WGHPkq25n7C4i;ig8H?6%YNO|*@r++P z4tDfoj)+1RPq)ucDa8M-yo>O)gJk4>waLDC2b#98MBPKm>eiIssOo}$WaowKo*CxL z48xH|a4mf}=Sx|vtz*H~XK>@AF?R6@I+tQcK2N@aoWqkjkWgb@YR?BwezX@tmKsLWrwgSO3$8ILKAgq4PKh)c@@VQ#tdub7BvS@m*T^w$;}PtroQ^zP zuAc>J^;r$qmnx9!NiP5swf#^;G2FYH;1<#kRvMQu3-sO{75Y4l@Hj&qrpHA8I;unZ zWHUZ}oeHW%%q*15Ea9&#{!o<~Z9HoK-P4z*^r!c>{77KRLSMIH| zjK1%Uw0UN9j!?m3AunaQ7N%}HZrr0`%T5$**xd-P%X?VEUdp=l`PlSZLfpMx3=M|{ zpv_p`Y}mu@>nugzL;5J9h>#9$D4vehK^II}k{Is7xkk*}Qi_eabccC|g|#nxVu;{_F>lqg{&ndANi-@;_ER852eSUjZvvT f_6GNB86JF2Po<-