-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest.py
36 lines (28 loc) · 941 Bytes
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import torch
from torch.autograd import Variable
dtype = torch.FloatTensor
W = Variable(torch.FloatTensor([[2, 3, 5], [7, -9, 11], [13, 17, 19]]), requires_grad=True)
b = Variable(torch.FloatTensor([3, 9, -43]), requires_grad=True)
print(W.size())
print(b.size())
m = torch.nn.LogSoftmax()
lossfn = torch.nn.NLLLoss()
x = Variable(torch.FloatTensor([[5, 99, 112], [4, 3, 7]]), requires_grad=False)
# x = Variable(torch.FloatTensor([[5, 99, 112]]), requires_grad=False)
y = Variable(torch.LongTensor([1, 2]), requires_grad=False)
# y = Variable(torch.LongTensor([1]), requires_grad=False)
print(x.size())
l1 = x.mm(W)
l1 = l1.add(b.expand_as(l1)).clamp(min=0)
l2 = l1.mm(W)
l2 = l2.add(b.expand_as(l1)).clamp(min=0)
sm = m(l2) # torch.nn.functional.log_softmax(l1)
#l1 = l1.clamp(min=0)
print(l2)
print('log_softmax', sm)
z = lossfn(sm, y)
# z = l1.dot(x)
print('loss', z)
z.backward()
print('W.grad', W.grad)
print('b.grad', b.grad)