Skip to content

Commit

Permalink
don't duplicate logging for validation loss each epoch
Browse files Browse the repository at this point in the history
  • Loading branch information
henryaddison committed Jun 27, 2024
1 parent 4d442dd commit 6f1060b
Showing 1 changed file with 1 addition and 3 deletions.
4 changes: 1 addition & 3 deletions src/ml_downscaling_emulator/score_sde_pytorch/run_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,7 @@ def train(config, workdir):
val_set_loss = val_loss(config, eval_dl, eval_step_fn, state)
epoch_metrics = {"epoch/train/loss": train_set_loss, "epoch/val/loss": val_set_loss}

logging.info("epoch: %d, val_loss: %.5e" % (state['epoch'], val_set_loss))
writer.add_scalar("epoch/val/loss", val_set_loss, global_step=state['epoch'])
log_epoch(state['epoch'], epoch_metrics, wandb_run,writer)
log_epoch(state['epoch'], epoch_metrics, wandb_run, writer)

if (state['epoch'] != 0 and state['epoch'] % config.training.snapshot_freq == 0) or state['epoch'] == num_train_epochs:
# Save the checkpoint.
Expand Down

0 comments on commit 6f1060b

Please sign in to comment.