From 8847e5801ac8910aeadb9453cec72819486c28e3 Mon Sep 17 00:00:00 2001 From: Henry Addison Date: Mon, 8 Jul 2024 21:39:22 +0100 Subject: [PATCH] don't both recording val loss before any training it's large --- src/ml_downscaling_emulator/score_sde_pytorch/run_lib.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/ml_downscaling_emulator/score_sde_pytorch/run_lib.py b/src/ml_downscaling_emulator/score_sde_pytorch/run_lib.py index 0aefb72ae..986a03865 100644 --- a/src/ml_downscaling_emulator/score_sde_pytorch/run_lib.py +++ b/src/ml_downscaling_emulator/score_sde_pytorch/run_lib.py @@ -171,12 +171,6 @@ def train(config, workdir): if config.training.random_crop_size > 0: random_crop = torchvision.transforms.RandomCrop(config.training.random_crop_size) - # log val loss before any training - if int(state['epoch']) == 0: - val_set_loss = val_loss(config, eval_dl, eval_step_fn, state) - epoch_metrics = {"epoch/val/loss": val_set_loss} - log_epoch(state['epoch'], epoch_metrics, wandb_run, writer) - for epoch in range(initial_epoch, num_train_epochs + 1): state['epoch'] = epoch train_set_loss = 0.0