-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathMSE_adagrad.lua
75 lines (56 loc) · 1.67 KB
/
MSE_adagrad.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
require 'torch';
require 'optim';
require 'nn';
require 'image';
local optimState = { learningRate = 1e-4,}
function network(Data,Net,Out)
Data = Data
Out = Out
function feval(Weights)
Gradients:zero()
y = Net:forward(Data)
currLoss = loss:forward(y,Out)
local dE_dy = loss:backward(y, Out)
Net:backward(Data, dE_dy)
return currLoss, Gradients
end
optim.adagrad(feval, Weights, optimState)
return currLoss
end
function Train_Net(Net,Data_Set,Data_out)
for Tepoch = 1,epoch do
print('epoch ' .. Tepoch .. '/' .. epoch)
Total_Loss=0
for loopno=1,Data_out:size(1) do
input = Data_Set[loopno]
output = Data_out[loopno]
local LossTrain = network(input,Net,output)
Total_Loss = Total_Loss + LossTrain
end
Total_Loss = Total_Loss/Data_out:size(1)
print('Training Loss = ' .. Total_Loss)
Train_Error[{{Tepoch},{2}}] = Total_Loss
end
end
------------------------------------------------------------------
------------------------------------------------------------------
-- Training -------------------------------------------
------------------------------------------------------------------
------------------------------------------------------------------
print('Training...')
local net
net = nn:Sequential()
net:add(nn.Linear(784,128))
net:add(nn.ReLU())
net:add(nn.Linear(128,32))
net:add(nn.ReLU())
net:add(nn.Linear(32,128))
net:add(nn.ReLU())
net:add(nn.Linear(128,784))
net:add(nn.ReLU())
loss = nn.MSECriterion()
loss = loss
-- STEP-1
print('At Ladder-1 MSE loss + Adagrad................................')
Weights,Gradients = net:getParameters()
Train_Net(net,Tr_Set,Tr_Set)