forked from JiahuiYu/wdsr_ntire2018
-
Notifications
You must be signed in to change notification settings - Fork 0
/
wdsr_a.py
80 lines (68 loc) · 2.34 KB
/
wdsr_a.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import math
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
class Block(nn.Module):
def __init__(
self, n_feats, kernel_size, block_feats, wn, res_scale=1, act=nn.ReLU(True)):
super(Block, self).__init__()
self.res_scale = res_scale
body = []
body.append(
wn(nn.Conv2d(n_feats, block_feats, kernel_size, padding=kernel_size//2)))
body.append(act)
body.append(
wn(nn.Conv2d(block_feats, n_feats, kernel_size, padding=kernel_size//2)))
self.body = nn.Sequential(*body)
def forward(self, x):
res = self.body(x) * self.res_scale
res += x
return res
class MODEL(nn.Module):
def __init__(self, args):
super(MODEL, self).__init__()
# hyper-params
self.args = args
scale = args.scale[0]
n_resblocks = args.n_resblocks
n_feats = args.n_feats
kernel_size = 3
act = nn.ReLU(True)
# wn = lambda x: x
wn = lambda x: torch.nn.utils.weight_norm(x)
self.rgb_mean = torch.autograd.Variable(torch.FloatTensor(
[args.r_mean, args.g_mean, args.b_mean])).view([1, 3, 1, 1])
# define head module
head = []
head.append(
wn(nn.Conv2d(args.n_colors, n_feats, 3, padding=3//2)))
# define body module
body = []
for i in range(n_resblocks):
body.append(
Block(n_feats, kernel_size, args.block_feats, wn=wn, res_scale=args.res_scale, act=act))
# define tail module
tail = []
out_feats = scale*scale*args.n_colors
tail.append(
wn(nn.Conv2d(n_feats, out_feats, 3, padding=3//2)))
tail.append(nn.PixelShuffle(scale))
skip = []
skip.append(
wn(nn.Conv2d(args.n_colors, out_feats, 5, padding=5//2))
)
skip.append(nn.PixelShuffle(scale))
# make object members
self.head = nn.Sequential(*head)
self.body = nn.Sequential(*body)
self.tail = nn.Sequential(*tail)
self.skip = nn.Sequential(*skip)
def forward(self, x):
x = (x - self.rgb_mean.cuda()*255)/127.5
s = self.skip(x)
x = self.head(x)
x = self.body(x)
x = self.tail(x)
x += s
x = x*127.5 + self.rgb_mean.cuda()*255
return x