Skip to content

Commit

Permalink
Add linear layer to time series prediction
Browse files Browse the repository at this point in the history
As is the final network output is modulated with a tanh nonlinearity.
This is undesirable. As a simple / realistic fix we add a final
linear layer.
  • Loading branch information
t-vi authored and soumith committed Nov 9, 2017
1 parent fad7759 commit 62d5ca5
Showing 1 changed file with 9 additions and 6 deletions.
15 changes: 9 additions & 6 deletions time_sequence_prediction/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,23 +12,26 @@ class Sequence(nn.Module):
def __init__(self):
super(Sequence, self).__init__()
self.lstm1 = nn.LSTMCell(1, 51)
self.lstm2 = nn.LSTMCell(51, 1)
self.lstm2 = nn.LSTMCell(51, 51)
self.linear = nn.Linear(51, 1)

def forward(self, input, future = 0):
outputs = []
h_t = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)
c_t = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)
h_t2 = Variable(torch.zeros(input.size(0), 1).double(), requires_grad=False)
c_t2 = Variable(torch.zeros(input.size(0), 1).double(), requires_grad=False)
h_t2 = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)
c_t2 = Variable(torch.zeros(input.size(0), 51).double(), requires_grad=False)

for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):
h_t, c_t = self.lstm1(input_t, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
outputs += [h_t2]
output = self.linear(h_t2)
outputs += [output]
for i in range(future):# if we should predict the future
h_t, c_t = self.lstm1(h_t2, (h_t, c_t))
h_t, c_t = self.lstm1(output, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
outputs += [h_t2]
output = self.linear(h_t2)
outputs += [output]
outputs = torch.stack(outputs, 1).squeeze(2)
return outputs

Expand Down

0 comments on commit 62d5ca5

Please sign in to comment.