-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathcommon.py
127 lines (98 loc) · 4.5 KB
/
common.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
#!/bin/bash
import logging, Logger
import copy
class Enum(set):
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
act = Enum(["sgmoid","tanh","linear","relu"])
lo = Enum(["negative_log_likelihood","least_square","weighted_approximate_rank_pairwise"])
grad = Enum(["sgmoid_negative_log_likelihood",\
"linear_least_square",\
"linear_weighted_approximate_rank_pairwise",\
"sgmoid",\
"tanh",\
"linear",\
"relu"]);
ha_map = {0:act.tanh, 1:act.linear, 2:act.relu};
oa_map = {0:act.sgmoid, 1:act.linear};
lo_map = {0:lo.negative_log_likelihood, 1:lo.least_square, 2:lo.weighted_approximate_rank_pairwise}
def actlo2grad(actid, loid):
if actid == act.sgmoid and loid == lo.negative_log_likelihood:
return grad.sgmoid_negative_log_likelihood
elif actid == act.linear and loid == lo.least_square:
return grad.linear_least_square
elif actid == act.linear and loid == lo.weighted_approximate_rank_pairwise:
return grad.linear_weighted_approximate_rank_pairwise
else:
actname = act_name[actid]
lossname = lo_name[loid]
logger = logging.getLogger(Logger.project_name)
logger.error("Activation (%s) and loss (%s) lead to no grad"%(actname,lossname))
raise Exception("Activation (%s) and loss (%s) lead to no grad"%(actname,lossname))
op = Enum(["gradient","alternative_least_square"]);
op_map = {0:op.gradient, 1:op.alternative_least_square}
st = Enum(["full_sampler","instance_sampler"]);
st_map = {0:st.full_sampler, 1:st.instance_sampler}
m = Enum(["internal_memory", "external_memory"])
m_map = {0: m.internal_memory, 1:m.external_memory}
#################### default
default_params = dict()
default_params["h"] = 100
default_params["ha"] = act.tanh
default_params["oa"] = act.sgmoid
default_params["l"] = lo.negative_log_likelihood
default_params["l2"] = 0.001
default_params["b"] = 10
default_params["i"] = 20
default_params["st"] = st.instance_sampler
default_params["sr"] = 5
default_params["sp"] = 0.01
default_params["op"] = op.gradient
default_params["m"] = m.internal_memory
default_params["r"] = 0.1
default_params["sizes"] = []
default_params["svdsk"] = 1000
##################### default params for representation
rep_default_params = copy.copy(default_params)
rep_default_params["h"] = 100
rep_default_params["ha"] = act.tanh
rep_default_params["oa"] = act.sgmoid
rep_default_params["l"] = lo.negative_log_likelihood
rep_default_params["l2"] = 0.001
rep_default_params["b"] = 10
rep_default_params["i"] = 20
rep_default_params["st"] = st.instance_sampler
rep_default_params["sr"] = 5
rep_default_params["sp"] = 0.01
rep_default_params["op"] = op.gradient
rep_default_params["m"] = m.internal_memory
rep_default_params["r"] = 0.1
##################################### default for leml
leml_default_params = copy.copy(default_params)
leml_default_params["h"] = 100
leml_default_params["ha"] = act.linear
leml_default_params["oa"] = act.linear
leml_default_params["l"] = lo.least_square
leml_default_params["l2"] = 0.001
leml_default_params["b"] = 10
leml_default_params["i"] = 20
leml_default_params["op"] = op.alternative_least_square
leml_default_params["m"] = m.internal_memory
leml_default_params["svdsk"] = 1000
#################################### default for wsabie
wsabie_default_params = copy.copy(default_params)
wsabie_default_params["h"] = 100
wsabie_default_params["l2"] = 0.001
wsabie_default_params["b"] = 10
wsabie_default_params["i"] = 20
wsabie_default_params["sp"] = 0.01
wsabie_default_params["op"] = op.gradient
wsabie_default_params["m"] = m.internal_memory
wsabie_default_params["r"] = 0.1
wsabie_default_params["ha"] = act.linear
wsabie_default_params["oa"] = act.linear
wsabie_default_params["l"] = lo.weighted_approximate_rank_pairwise
wsabie_default_params["op"] = op.gradient
wsabie_default_params["st"] = st.full_sampler