-
Notifications
You must be signed in to change notification settings - Fork 7
/
utils.py
99 lines (81 loc) · 3.11 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import os
import sys
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
from option import args
from collections import OrderedDict
# tool functions
def get_img_seq(img_seq_dir):
img_seq = []
for root, _, fnames in sorted(os.walk(img_seq_dir)):
for fname in sorted(fnames):
if any(fname.endswith(ext) for ext in args.ext):
img_name = os.path.join(root, fname)
img_seq.append(cv2.imread(img_name))
return img_seq
def features_grad(features):
kernel = [[1 / 8, 1 / 8, 1 / 8], [1 / 8, -1, 1 / 8], [1 / 8, 1 / 8, 1 / 8]]
kernel = torch.FloatTensor(kernel).unsqueeze(0).unsqueeze(0)
kernel = kernel.cuda()
_, c, _, _ = features.shape
c = int(c)
for i in range(c):
feat_grad = F.conv2d(features[:, i:i + 1, :, :], kernel, stride=1, padding=1)
if i == 0:
feat_grads = feat_grad
else:
feat_grads = torch.cat((feat_grads, feat_grad), dim=1)
return feat_grads
# network functions
def pad(pad_type, padding):
pad_type = pad_type.lower()
if padding == 0:
return None
def get_valid_padding(kernel_size, dilation):
kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
padding = (kernel_size - 1) // 2
return padding
def activation(act_type=args.act_type, slope=0.2, n_prelu=1):
act_type = act_type.lower()
if act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=slope)
elif act_type == 'lrelu':
layer = nn.LeakyReLU(negative_slope=slope, inplace=True)
else:
raise NotImplementedError('[ERROR] Activation layer [%s] is not implemented!' % act_type)
return layer
def norm(n_feature, norm_type='bn'):
norm_type = norm_type.lower()
if norm_type == 'bn':
layer = nn.BatchNorm2d(n_feature)
else:
raise NotImplementedError('[ERROR] %s.sequential() does not support OrderedDict' % norm_type)
return layer
def sequential(*args):
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('[ERROR] %s.sequential() does not support OrderedDict' % sys.modules[__name__])
else:
return args[0]
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module:
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def ConvBlock(in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, valid_padding=True, padding=0,
act_type='prelu', norm_type='bn', pad_type='zero'):
if valid_padding:
padding = get_valid_padding(kernel_size, dilation)
else:
pass
p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
act = activation(act_type) if act_type else None
n = norm(out_channels, norm_type) if norm_type else None
return sequential(p, conv, n, act)