-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathencoder.py
76 lines (60 loc) · 2.96 KB
/
encoder.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# encoder.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
class CNNWithPositionalEncoding(nn.Module):
def __init__(self, d_model, height, width):
super(CNNWithPositionalEncoding, self).__init__()
# CNN architecture
self.conv1 = nn.Conv2d(1, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.bn1 = nn.BatchNorm2d(512)
self.pool1 = nn.MaxPool2d(kernel_size=(2, 1), stride=(2, 1))
self.conv2 = nn.Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.bn2 = nn.BatchNorm2d(512)
self.pool2 = nn.MaxPool2d(kernel_size=(1, 2), stride=(1, 2))
self.conv3 = nn.Conv2d(512, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.bn3 = nn.BatchNorm2d(256)
self.conv4 = nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.pool3 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv5 = nn.Conv2d(256, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
self.pool4 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))
self.conv6 = nn.Conv2d(128, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
# Positional encoding
self.positional_encoding = positionalencoding2d(d_model, height, width)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.pool1(x)
x = F.relu(self.bn2(self.conv2(x)))
x = self.pool2(x)
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.conv4(x))
x = self.pool3(x)
x = F.relu(self.conv5(x))
x = self.pool4(x)
x = F.relu(self.conv6(x))
# Add positional encoding
x = x + self.positional_encoding.unsqueeze(0) # Broadcasting the positional encoding
return x
def positionalencoding2d(d_model, height, width):
"""
:param d_model: dimension of the model
:param height: height of the positions
:param width: width of the positions
:return: d_model*height*width position matrix
"""
if d_model % 4 != 0:
raise ValueError("Cannot use sin/cos positional encoding with "
"odd dimension (got dim={:d})".format(d_model))
pe = torch.zeros(d_model, height, width)
# Each dimension use half of d_model
d_model = int(d_model / 2)
div_term = torch.exp(torch.arange(0., d_model, 2) *
-(math.log(10000.0) / d_model))
pos_w = torch.arange(0., width).unsqueeze(1)
pos_h = torch.arange(0., height).unsqueeze(1)
pe[0:d_model:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
pe[1:d_model:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
pe[d_model::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
pe[d_model + 1::2, :, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
return pe