-
Notifications
You must be signed in to change notification settings - Fork 18
/
train_nf.py
144 lines (118 loc) · 4.79 KB
/
train_nf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import gc
import sys
from torch.autograd import Variable
import torch
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler
torch.backends.cudnn.benchmark = True
import matplotlib.pyplot as plt
import numpy as np
import FRVSR_models
import Dataset
import pytorch_ssim
from skimage.measure import compare_ssim as ssim
def load_model(model_name, batch_size, width, height):
model = FRVSR_models.SRNet(in_dim=3)
if model_name != '':
model_path = f'./models/{model_name}'
print("successfully loaded the model")
checkpoint = torch.load(model_path, map_location='cpu')
model.load_state_dict(checkpoint)
return model
def run():
# Parameters
num_epochs = 100
output_period = 10
batch_size = 8
width, height = 112, 64
# setup the device for running
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = load_model('', batch_size, width, height)
model = model.to(device)
torch.save(model.state_dict(), "models/FRVSRTest")
train_loader, val_loader = Dataset.get_data_loaders(batch_size, dataset_size=7000, validation_split=0)
num_train_batches = len(train_loader)
num_val_batches = len(val_loader)
#flow_criterion = nn.MSELoss().to(device)
content_criterion = nn.MSELoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-5)
epoch = 1
while epoch <= num_epochs:
running_loss = 0.0
for param_group in optimizer.param_groups:
print('Current learning rate: ' + str(param_group['lr']))
model.train()
for batch_num, (lr_imgs, hr_imgs) in enumerate(train_loader, 1):
lr_imgs = lr_imgs.to(device)
hr_imgs = hr_imgs.to(device)
# print(f'hrimgs.shape is {hr_imgs.shape}')
# print(f'lrimgs.shape is {lr_imgs.shape}')
optimizer.zero_grad()
#model.init_hidden(device)
batch_content_loss = 0
#batch_flow_loss = 0
# lr_imgs = 7 * 4 * 3 * H * W
for lr_img, hr_img in zip(lr_imgs, hr_imgs):
# print(lr_img.shape)
hr_est = model(lr_img)
content_loss = torch.mean((hr_est - hr_img) ** 2)
#ssim-content_loss
#ssim_loss = pytorch_ssim.SSIM(window_size = 11)
#content_loss = ssim_loss(hr_est, hr_img)
# ssim_loss = pytorch_ssim.ssim(hr_est, hr_img).data[0]
# ssim_loss.to(device)
# content_loss = ssim_loss
#flow_loss = flow_criterion(lr_est, lr_img)
#print(f'content_loss is {content_loss}, flow_loss is {flow_loss}')
batch_content_loss += content_loss
#batch_flow_loss += flow_loss
#print(f'loss is {loss}')
loss = batch_content_loss
loss.backward()
print(f'content_loss {batch_content_loss}')
# print("success")
optimizer.step()
running_loss += loss.item()
if batch_num % output_period == 0:
print('[%d:%.2f] loss: %.3f' % (
epoch, batch_num * 1.0 / num_train_batches,
running_loss / output_period
))
running_loss = 0.0
gc.collect()
gc.collect()
# save after every epoch
torch.save(model.state_dict(), "models/LR-5_SRN.%d" % epoch)
# model.eval()
# a helper function to calc topk error
# def calcTopKError(loader, k, name):
# epoch_topk_err = 0.0
#
# for batch_num, (inputs, labels) in enumerate(loader, 1):
# inputs = inputs.to(device)
# labels = labels.to(device)
# outputs = model(inputs)
#
# _,cls = torch.topk(outputs,dim=1,k=k)
# batch_topk_err = (1 - (cls.numel()-torch.nonzero(cls-labels.view(-1,1)).shape[0])/labels.numel())
# epoch_topk_err = epoch_topk_err * ((batch_num-1) / batch_num) \
# + batch_topk_err / batch_num
#
# if batch_num % output_period == 0:
# # print('[%d:%.2f] %s_Topk_error: %.3f' % (
# # epoch,
# # batch_num*1.0/num_val_batches,
# # name,
# # epoch_topk_err/batch_num
# # ))
# gc.collect()
#
#
# return epoch_topk_err
gc.collect()
epoch += 1
if __name__ == "__main__":
print('Starting training')
run()
print('Training terminated')