Skip to content

Commit

Permalink
Rename variables, remove debugging flag
Browse files Browse the repository at this point in the history
  • Loading branch information
bouchardi committed Mar 15, 2024
1 parent 188c286 commit e230133
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 17 deletions.
1 change: 0 additions & 1 deletion gaussian_splatting/arguments/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,6 @@ class PipelineParams(ParamGroup):
def __init__(self, parser=None):
self.convert_SHs_python = False
self.compute_cov3D_python = False
self.debug = False
super().__init__(parser, "Pipeline Parameters")

class OptimizationParams(ParamGroup):
Expand Down
2 changes: 1 addition & 1 deletion gaussian_splatting/gaussian_renderer/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor,
sh_degree=pc.active_sh_degree,
campos=viewpoint_camera.camera_center,
prefiltered=False,
debug=pipe.debug
debug=False
)

rasterizer = GaussianRasterizer(raster_settings=raster_settings)
Expand Down
15 changes: 5 additions & 10 deletions gaussian_splatting/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ def __init__(
testing_iterations=None,
saving_iterations=None,
checkpoint_iterations=None,
checkpoint=None,
debug_from=-1,
checkpoint_path=None,
quiet=False,
detect_anomaly=False
):
Expand All @@ -36,8 +35,7 @@ def __init__(
checkpoint_iterations = []
self._checkpoint_iterations = checkpoint_iterations

self._checkpoint = checkpoint
self._debug_from = debug_from
self._checkpoint_path = checkpoint_path

safe_state(quiet)

Expand All @@ -54,8 +52,9 @@ def run(
gaussians = GaussianModel(dataset.sh_degree)
scene = Scene(dataset, gaussians)
gaussians.training_setup(opt)
if self._checkpoint:
(model_params, first_iter) = torch.load(self._checkpoint)

if self._checkpoint_path:
(model_params, first_iter) = torch.load(self._checkpoint_path)
gaussians.restore(model_params, opt)

bg_color = [1, 1, 1] if dataset.white_background else [0, 0, 0]
Expand All @@ -82,10 +81,6 @@ def run(
viewpoint_stack = scene.getTrainCameras().copy()
viewpoint_cam = viewpoint_stack.pop(randint(0, len(viewpoint_stack)-1))

# Render
if (iteration - 1) == self._debug_from:
pipe.debug = True

bg = torch.rand((3), device="cuda") if opt.random_background else background

render_pkg = render(viewpoint_cam, gaussians, pipe, bg)
Expand Down
6 changes: 2 additions & 4 deletions scripts/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,22 +20,20 @@
lp = ModelParams(parser)
op = OptimizationParams(parser)
pp = PipelineParams(parser)
parser.add_argument('--debug_from', type=int, default=-1)
parser.add_argument('--detect_anomaly', action='store_true', default=False)
parser.add_argument("--test_iterations", nargs="+", type=int, default=[7_000, 30_000])
parser.add_argument("--save_iterations", nargs="+", type=int, default=[7_000, 30_000])
parser.add_argument("--quiet", action="store_true")
parser.add_argument("--checkpoint_iterations", nargs="+", type=int, default=[])
parser.add_argument("--start_checkpoint", type=str, default = None)
parser.add_argument("--checkpoint_path", type=str, default = None)
args = parser.parse_args(sys.argv[1:])
args.save_iterations.append(args.iterations)

trainer = Trainer(
testing_iterations=args.test_iterations,
saving_iterations=args.save_iterations,
checkpoint_iterations=args.checkpoint_iterations,
checkpoint=args.start_checkpoint,
debug_from=args.debug_from,
checkpoint_path=args.checkpoint_path,
quiet=args.quiet,
detect_anomaly=args.detect_anomaly
)
Expand Down
1 change: 0 additions & 1 deletion scripts/train_modal.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ class Pipeline():
def __init__(self):
self.convert_SHs_python = False
self.compute_cov3D_python = False
self.debug = False

class Optimization():
def __init__(self):
Expand Down

0 comments on commit e230133

Please sign in to comment.