-
Notifications
You must be signed in to change notification settings - Fork 0
/
classifier.py
110 lines (90 loc) · 3.68 KB
/
classifier.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import logging
import torch
import torch.nn as nn
import torchvision.models as models
class Classifier(nn.Module):
def __init__(self, args, checkpoint_path=None):
super().__init__()
self.args = args
model = None
# 1) ResNet backbone (up to penultimate layer)
if not self.use_bottleneck:
model = models.__dict__[args.arch](pretrained=True)
modules = list(model.children())[:-1]
self.encoder = nn.Sequential(*modules)
self._output_dim = model.fc.in_features
# 2) ResNet backbone + bottlenck (last fc as bottleneck)
else:
model = models.__dict__[args.arch](pretrained=True)
model.fc = nn.Linear(model.fc.in_features, args.bottleneck_dim)
bn = nn.BatchNorm1d(args.bottleneck_dim)
self.encoder = nn.Sequential(model, bn)
self._output_dim = args.bottleneck_dim
self.fc = nn.Linear(self.output_dim, args.num_classes)
## Initialization and Masking
for m in self.modules():
# if isinstance(m, nn.Conv2d):
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d):
# m.weight.data.fill_(1)
# m.bias.data.zero_()
if isinstance(m, nn.Linear):
nn.init.orthogonal(m.weight.data) # Initializing with orthogonal rows
if self.use_weight_norm:
self.fc = nn.utils.weight_norm(self.fc, dim=args.weight_norm_dim)
if checkpoint_path:
self.load_from_checkpoint(checkpoint_path)
def forward(self, x, return_feats=False):
# 1) encoder feature
feat = self.encoder(x)
feat = torch.flatten(feat, 1)
logits = self.fc(feat)
if return_feats:
return feat, logits
return logits
def load_from_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location="cpu")
state_dict = dict()
for name, param in checkpoint["state_dict"].items():
# get rid of 'module.' prefix brought by DDP
name = name.replace("module.", "")
state_dict[name] = param
msg = self.load_state_dict(state_dict, strict=False)
logging.info(
f"Loaded from {checkpoint_path}; missing params: {msg.missing_keys}"
)
def get_params(self):
"""
Backbone parameters use 1x lr; extra parameters use 10x lr.
"""
backbone_params = []
extra_params = []
# case 1)
if not self.use_bottleneck:
backbone_params.extend(self.encoder.parameters())
# case 2)
else:
resnet = self.encoder[0]
for module in list(resnet.children())[:-1]:
backbone_params.extend(module.parameters())
# bottleneck fc + (bn) + classifier fc
extra_params.extend(resnet.fc.parameters())
extra_params.extend(self.encoder[1].parameters())
extra_params.extend(self.fc.parameters())
# Exclude frozen params
backbone_params = [param for param in backbone_params if param.requires_grad]
extra_params = [param for param in extra_params if param.requires_grad]
return backbone_params, extra_params
@property
def num_classes(self):
return self.fc.weight.shape[0]
@property
def output_dim(self):
return self._output_dim
@property
def use_bottleneck(self):
return self.args.bottleneck_dim > 0
@property
def use_weight_norm(self):
return self.args.weight_norm_dim >= 0