From 209f93ec0acabe03cc3868632873d1257d172a85 Mon Sep 17 00:00:00 2001 From: Isabelle Bouchard Date: Mon, 18 Mar 2024 10:39:12 -0400 Subject: [PATCH] Clean PipelineParams --- gaussian_splatting/arguments/__init__.py | 6 ----- .../gaussian_renderer/__init__.py | 26 +++---------------- gaussian_splatting/training.py | 6 ++--- scripts/render.py | 15 +++++------ scripts/train.py | 4 +-- scripts/train_modal.py | 12 +++------ 6 files changed, 17 insertions(+), 52 deletions(-) diff --git a/gaussian_splatting/arguments/__init__.py b/gaussian_splatting/arguments/__init__.py index 3f17a9d9c..958fd7d88 100644 --- a/gaussian_splatting/arguments/__init__.py +++ b/gaussian_splatting/arguments/__init__.py @@ -63,12 +63,6 @@ def extract(self, args): g.source_path = os.path.abspath(g.source_path) return g -class PipelineParams(ParamGroup): - def __init__(self, parser=None): - self.convert_SHs_python = False - self.compute_cov3D_python = False - super().__init__(parser, "Pipeline Parameters") - class OptimizationParams(ParamGroup): def __init__(self, parser=None): self.iterations = 30_000 diff --git a/gaussian_splatting/gaussian_renderer/__init__.py b/gaussian_splatting/gaussian_renderer/__init__.py index f05ed3afc..c2019f3f8 100644 --- a/gaussian_splatting/gaussian_renderer/__init__.py +++ b/gaussian_splatting/gaussian_renderer/__init__.py @@ -15,7 +15,7 @@ from gaussian_splatting.scene.gaussian_model import GaussianModel from gaussian_splatting.utils.sh_utils import eval_sh -def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None): +def render(viewpoint_camera, pc : GaussianModel, bg_color : torch.Tensor, scaling_modifier = 1.0): """ Render the scene. @@ -54,32 +54,14 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, means2D = screenspace_points opacity = pc.get_opacity - # If precomputed 3d covariance is provided, use it. If not, then it will be computed from - # scaling / rotation by the rasterizer. - scales = None - rotations = None cov3D_precomp = None - if pipe.compute_cov3D_python: - cov3D_precomp = pc.get_covariance(scaling_modifier) - else: - scales = pc.get_scaling - rotations = pc.get_rotation + scales = pc.get_scaling + rotations = pc.get_rotation # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer. - shs = None colors_precomp = None - if override_color is None: - if pipe.convert_SHs_python: - shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2) - dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1)) - dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True) - sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized) - colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0) - else: - shs = pc.get_features - else: - colors_precomp = override_color + shs = pc.get_features # Rasterize visible Gaussians to image, obtain their radii (on screen). rendered_image, radii = rasterizer( diff --git a/gaussian_splatting/training.py b/gaussian_splatting/training.py index 32c252f4b..f9d3c012e 100644 --- a/gaussian_splatting/training.py +++ b/gaussian_splatting/training.py @@ -10,7 +10,6 @@ from tqdm import tqdm from gaussian_splatting.utils.image_utils import psnr from argparse import Namespace -from gaussian_splatting.arguments import ModelParams, PipelineParams, OptimizationParams class Trainer: @@ -46,7 +45,6 @@ def run( self, dataset, opt, - pipe, ): first_iter = 0 gaussians = GaussianModel(dataset.sh_degree) @@ -83,7 +81,7 @@ def run( bg = torch.rand((3), device="cuda") if opt.random_background else background - render_pkg = render(viewpoint_cam, gaussians, pipe, bg) + render_pkg = render(viewpoint_cam, gaussians, bg) image, viewspace_point_tensor, visibility_filter, radii = render_pkg["render"], render_pkg["viewspace_points"], render_pkg["visibility_filter"], render_pkg["radii"] # Loss @@ -113,7 +111,7 @@ def run( self._testing_iterations, scene, render, - (pipe, background) + (background) ) if (iteration in self._saving_iterations): print("\n[ITER {}] Saving Gaussians".format(iteration)) diff --git a/scripts/render.py b/scripts/render.py index 8e7afc80a..0fbc0bab3 100644 --- a/scripts/render.py +++ b/scripts/render.py @@ -18,11 +18,11 @@ from gaussian_splatting.gaussian_renderer import render from gaussian_splatting.utils.general_utils import safe_state -from gaussian_splatting.arguments import ModelParams, PipelineParams, get_combined_args +from gaussian_splatting.arguments import ModelParams, get_combined_args from gaussian_splatting.gaussian_renderer import GaussianModel from gaussian_splatting.scene import Scene -def render_set(model_path, name, iteration, views, gaussians, pipeline, background): +def render_set(model_path, name, iteration, views, gaussians, background): render_path = os.path.join(model_path, name, "ours_{}".format(iteration), "renders") gts_path = os.path.join(model_path, name, "ours_{}".format(iteration), "gt") @@ -30,12 +30,12 @@ def render_set(model_path, name, iteration, views, gaussians, pipeline, backgrou makedirs(gts_path, exist_ok=True) for idx, view in enumerate(tqdm(views, desc="Rendering progress")): - rendering = render(view, gaussians, pipeline, background)["render"] + rendering = render(view, gaussians, background)["render"] gt = view.original_image[0:3, :, :] torchvision.utils.save_image(rendering, os.path.join(render_path, '{0:05d}'.format(idx) + ".png")) torchvision.utils.save_image(gt, os.path.join(gts_path, '{0:05d}'.format(idx) + ".png")) -def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParams, skip_train : bool, skip_test : bool): +def render_sets(dataset : ModelParams, iteration : int, skip_train : bool, skip_test : bool): with torch.no_grad(): gaussians = GaussianModel(dataset.sh_degree) scene = Scene(dataset, gaussians, load_iteration=iteration, shuffle=False) @@ -44,16 +44,15 @@ def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParam background = torch.tensor(bg_color, dtype=torch.float32, device="cuda") if not skip_train: - render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, pipeline, background) + render_set(dataset.model_path, "train", scene.loaded_iter, scene.getTrainCameras(), gaussians, background) if not skip_test: - render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, pipeline, background) + render_set(dataset.model_path, "test", scene.loaded_iter, scene.getTestCameras(), gaussians, background) if __name__ == "__main__": # Set up command line argument parser parser = ArgumentParser(description="Testing script parameters") model = ModelParams(parser, sentinel=True) - pipeline = PipelineParams(parser) parser.add_argument("--iteration", default=-1, type=int) parser.add_argument("--skip_train", action="store_true") parser.add_argument("--skip_test", action="store_true") @@ -64,4 +63,4 @@ def render_sets(dataset : ModelParams, iteration : int, pipeline : PipelineParam # Initialize system state (RNG) safe_state(args.quiet) - render_sets(model.extract(args), args.iteration, pipeline.extract(args), args.skip_train, args.skip_test) + render_sets(model.extract(args), args.iteration, args.skip_train, args.skip_test) diff --git a/scripts/train.py b/scripts/train.py index ba3796215..5fc22e5d8 100644 --- a/scripts/train.py +++ b/scripts/train.py @@ -10,7 +10,7 @@ # import sys from argparse import ArgumentParser -from gaussian_splatting.arguments import ModelParams, PipelineParams, OptimizationParams +from gaussian_splatting.arguments import ModelParams, OptimizationParams from gaussian_splatting.training import Trainer @@ -19,7 +19,6 @@ parser = ArgumentParser(description="Training script parameters") lp = ModelParams(parser) op = OptimizationParams(parser) - pp = PipelineParams(parser) parser.add_argument('--detect_anomaly', action='store_true', default=False) parser.add_argument("--test_iterations", nargs="+", type=int, default=[7_000, 30_000]) parser.add_argument("--save_iterations", nargs="+", type=int, default=[7_000, 30_000]) @@ -40,5 +39,4 @@ trainer.run( dataset=lp.extract(args), opt=op.extract(args), - pipe=pp.extract(args), ) diff --git a/scripts/train_modal.py b/scripts/train_modal.py index a557e9fdd..618a63997 100644 --- a/scripts/train_modal.py +++ b/scripts/train_modal.py @@ -60,18 +60,13 @@ def __init__(self,): self.white_background = False self.eval = False -class Pipeline(): - def __init__(self): - self.convert_SHs_python = False - self.compute_cov3D_python = False - class Optimization(): def __init__(self): - self.iterations = 30_000 + self.iterations = 30000 self.position_lr_init = 0.00016 self.position_lr_final = 0.0000016 self.position_lr_delay_mult = 0.01 - self.position_lr_max_steps = 30_000 + self.position_lr_max_steps = 30000 self.feature_lr = 0.0025 self.opacity_lr = 0.05 self.scaling_lr = 0.005 @@ -81,7 +76,7 @@ def __init__(self): self.densification_interval = 100 self.opacity_reset_interval = 3000 self.densify_from_iter = 500 - self.densify_until_iter = 15_000 + self.densify_until_iter = 15000 self.densify_grad_threshold = 0.0002 self.random_background = False @@ -101,7 +96,6 @@ def f(): trainer.run( dataset=Dataset(), opt=Optimization(), - pipe=Pipeline() ) volume_model.commit()