-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmodel.py
124 lines (103 loc) · 4.32 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_v2
slim = tf.contrib.slim
batch_norm_params = {
'decay': 0.997, # batch_norm_decay
'epsilon': 1e-5, # batch_norm_epsilon
'scale': True, # batch_norm_scale
'updates_collections': tf.compat.v1.GraphKeys.UPDATE_OPS, # batch_norm_updates_collections
'is_training': True, # is_training
'fused': None, # Use fused batch norm if possible.
}
def basic_model(inputs,
num_classes,
is_training=True,
is_reuse=tf.compat.v1.AUTO_REUSE,
keep_prob=0.8,
attention_module=None,
scope='basic_model'):
'''
:param inputs: N x H x W x C tensor
:return:
'''
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, end_points = \
resnet_v2.resnet_v2_50(inputs,
num_classes=num_classes,
is_training=is_training,
reuse=is_reuse,
attention_module=attention_module,
scope='resnet_v2_50')
# # Global average pooling.
# net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
# end_points['global_pool'] = net
#
# batch_norm_params['is_training'] = is_training
# # net = slim.batch_norm(net, scope='batch_norm')
# # end_points['batch_norm'] = net
# net = slim.flatten(net, scope='flatten')
# end_points['flatten'] = net
# net = slim.fully_connected(net, 256, normalizer_fn=slim.batch_norm,
# normalizer_params=batch_norm_params, scope='fc1')
# end_points['fc1'] = net
#
# net = slim.fully_connected(net, num_classes, normalizer_fn=slim.batch_norm,
# normalizer_params=batch_norm_params, activation_fn=None, scope='fc2')
# end_points['fc2'] = net
logits = net
return logits, end_points
def deep_cosine_softmax(inputs,
num_classes,
is_training=True,
is_reuse=tf.compat.v1.AUTO_REUSE,
keep_prob=0.6,
attention_module=None,
scope=''):
def batch_norm_fn(x):
return slim.batch_norm(x, scope=tf.compat.v1.get_variable_scope().name + "/bn")
'''
:param inputs: N x V x H x W x C tensor
:return:
'''
with slim.arg_scope(resnet_v2.resnet_arg_scope()):
net, _ = \
resnet_v2.resnet_v2_50(inputs,
num_classes=num_classes,
is_training=is_training,
reuse=is_reuse,
attention_module=attention_module,
scope='resnet_v2_50')
# ##############################
# cosine-softmax
# ##############################
# (?,1,1,2048)
feature_dim = net.get_shape().as_list()[-1]
# print("feature dimensionality: ", feature_dim)
net = slim.flatten(net)
net = slim.dropout(net, keep_prob=keep_prob)
net = slim.fully_connected(
net, feature_dim, normalizer_fn=batch_norm_fn,
weights_regularizer=slim.l2_regularizer(1e-8),
scope="fc1", weights_initializer=tf.truncated_normal_initializer(stddev=1e-3),
biases_initializer=tf.zeros_initializer())
features = net
# Features in rows, normalize axis 1.
features = tf.nn.l2_normalize(features, dim=1)
with slim.variable_scope.variable_scope("ball", reuse=is_reuse):
weights = slim.model_variable(
"mean_vectors", (feature_dim, int(num_classes)),
initializer=tf.truncated_normal_initializer(stddev=1e-3),
regularizer=None)
scale = slim.model_variable(
"scale", (), tf.float32,
initializer=tf.constant_initializer(0., tf.float32),
regularizer=slim.l2_regularizer(1e-1))
# tf.summary.scalar("scale", scale)
scale = tf.nn.softplus(scale)
# Mean vectors in colums, normalize axis 0.
weights_normed = tf.nn.l2_normalize(weights, dim=0)
logits = scale * tf.matmul(features, weights_normed)
return features, logits