-
Notifications
You must be signed in to change notification settings - Fork 339
/
Copy pathpt_example.py
110 lines (93 loc) · 3.34 KB
/
pt_example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import numpy as np
import torch.nn as nn
from ncps.wirings import AutoNCP
from ncps.torch import LTC
import pytorch_lightning as pl
import torch
import torch.utils.data as data
import matplotlib.pyplot as plt
import seaborn as sns
N = 48 # Length of the time-series
out_features = 1
in_features = 2
# Input feature is a sine and a cosine wave
data_x = np.stack(
[np.sin(np.linspace(0, 3 * np.pi, N)), np.cos(np.linspace(0, 3 * np.pi, N))], axis=1
)
data_x = np.expand_dims(data_x, axis=0).astype(np.float32) # Add batch dimension
# Target output is a sine with double the frequency of the input signal
data_y = np.sin(np.linspace(0, 6 * np.pi, N)).reshape([1, N, 1]).astype(np.float32)
print("data_x.shape: ", str(data_x.shape))
print("data_y.shape: ", str(data_y.shape))
data_x = torch.Tensor(data_x)
data_y = torch.Tensor(data_y)
dataloader = data.DataLoader(
data.TensorDataset(data_x, data_y), batch_size=1, shuffle=True, num_workers=4
)
# Let's visualize the training data
sns.set()
plt.figure(figsize=(6, 4))
plt.plot(data_x[0, :, 0], label="Input feature 1")
plt.plot(data_x[0, :, 1], label="Input feature 1")
plt.plot(data_y[0, :, 0], label="Target output")
plt.ylim((-1, 1))
plt.title("Training data")
plt.legend(loc="upper right")
plt.savefig("pt_plot1.png")
# LightningModule for training a RNNSequence module
class SequenceLearner(pl.LightningModule):
def __init__(self, model, lr=0.005):
super().__init__()
self.model = model
self.lr = lr
def training_step(self, batch, batch_idx):
x, y = batch
y_hat, _ = self.model.forward(x)
y_hat = y_hat.view_as(y)
loss = nn.MSELoss()(y_hat, y)
self.log("train_loss", loss, prog_bar=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat, _ = self.model.forward(x)
y_hat = y_hat.view_as(y)
loss = nn.MSELoss()(y_hat, y)
self.log("val_loss", loss, prog_bar=True)
return loss
def test_step(self, batch, batch_idx):
# Here we just reuse the validation_step for testing
return self.validation_step(batch, batch_idx)
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr=self.lr)
wiring = AutoNCP(16, out_features) # 16 units, 1 motor neuron
ltc_model = LTC(in_features, wiring, batch_first=True)
learn = SequenceLearner(ltc_model, lr=0.01)
trainer = pl.Trainer(
logger=pl.loggers.CSVLogger("log"),
max_epochs=400,
gradient_clip_val=1, # Clip gradient to stabilize training
)
# Train the model for 400 epochs (= training steps)
trainer.fit(learn, dataloader)
# Let's visualize how LTC initialy performs before the training
sns.set()
with torch.no_grad():
prediction = ltc_model(data_x)[0].numpy()
plt.figure(figsize=(6, 4))
plt.plot(data_y[0, :, 0], label="Target output")
plt.plot(prediction[0, :, 0], label="NCP output")
plt.ylim((-1, 1))
plt.title("Before training")
plt.legend(loc="upper right")
plt.savefig("pt_plot2.png")
# How does the trained model now fit to the sinusoidal function?
sns.set()
with torch.no_grad():
prediction = ltc_model(data_x)[0].numpy()
plt.figure(figsize=(6, 4))
plt.plot(data_y[0, :, 0], label="Target output")
plt.plot(prediction[0, :, 0], label="NCP output")
plt.ylim((-1, 1))
plt.title("After training")
plt.legend(loc="upper right")
plt.savefig("pt_plot3.png")