Skip to content

Commit

Permalink
Reverted to l1 loss for durations.
Browse files Browse the repository at this point in the history
  • Loading branch information
cschaefer26 committed Jul 6, 2020
1 parent 6b275f8 commit 07dd1ac
Show file tree
Hide file tree
Showing 2 changed files with 6 additions and 3 deletions.
3 changes: 2 additions & 1 deletion hparams.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,8 @@
# Training

forward_schedule = [(1e-4, 10_000, 32), # progressive training schedule
(1e-4, 300_000, 32)] # (lr, step, batch_size)
(1e-4, 300_000, 32), # (lr, step, batch_size)
(2e-5, 600_000, 32)] # (lr, step, batch_size)

forward_max_mel_len = 1250 # if you have a couple of extremely long spectrograms you might want to use this
forward_clip_grad_norm = 1.0 # clips the gradient norm to prevent explosion - set to None if not needed
Expand Down
6 changes: 4 additions & 2 deletions trainer/forward_trainer.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import time
import torch.nn.functional as F

from typing import Tuple

import torch
Expand Down Expand Up @@ -65,7 +67,7 @@ def train_session(self, model: ForwardTacotron,
m1_loss = self.l1_loss(m1_hat, m, lens)
m2_loss = self.l1_loss(m2_hat, m, lens)

dur_loss = self.log_l1_loss(dur_hat, dur)
dur_loss = F.l1_loss(dur_hat, dur)

loss = m1_loss + m2_loss + dur_loss
optimizer.zero_grad()
Expand Down Expand Up @@ -117,7 +119,7 @@ def evaluate(self, model: ForwardTacotron, val_set: Dataset) -> Tuple[float, flo
m1_hat, m2_hat, dur_hat = model(x, m, dur)
m1_loss = self.l1_loss(m1_hat, m, lens)
m2_loss = self.l1_loss(m2_hat, m, lens)
dur_loss = self.log_l1_loss(dur_hat, dur)
dur_loss = F.l1_loss(dur_hat, dur)
m_val_loss += m1_loss.item() + m2_loss.item()
dur_val_loss += dur_loss.item()
return m_val_loss / len(val_set), dur_val_loss / len(val_set)
Expand Down

0 comments on commit 07dd1ac

Please sign in to comment.