forked from sled-group/Comparative-Learning
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.py
104 lines (86 loc) · 2.66 KB
/
config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
'''
Learning Attributes:
- Color (6)
- Material (3)
- Shape (8)
Additional Attributes:
- Color (2)
- Material (1)
- Shape (3)
Flexibility:
- Camera angle (6)
- Lighting (3)
Variability (Only in testing):
- Size (2) [Default: large]
- Stretch (3) [Default: normal]
- Color shade (2) [Default: base]
Naming convension:
[color]_[material]_[shape]_shade_[]_stretch_[]_scale_[]_brightness_view_[]_[tyimg].png
e.g.
aqua_glass_cone_shade_base_stretch_normal_scale_large_brightness_bright_view_0_-2_3_rgba.png
'''
# Learning attributes:
colors = ['brown', "green", "blue", "aqua", "purple", "red", "yellow", 'white']
materials = ['rubber', 'metal', 'plastic', 'glass']
shapes = ["cube", "cylinder", "sphere", "cone", "torus", "gear",
"torus_knot", "sponge", "spot", "teapot", "suzanne"]
vocabs = colors+materials+shapes
# Flexibility:
views = ['0_3_2', '-2_-2_2', '-2_2_2', '1.5_-1.5_3', '1.5_1.5_3', '0_-2_3']
brightness = ['dim', 'normal', 'bright']
# Variability
scale_train = ['large']
stretch_train = ['normal']
shade_train = ['base']
scale_test = ['small', 'medium', 'large']
stretch_test = ['normal', 'x', 'y', 'z']
shade_test = ['base', 'light', 'dark']
# Types of images
tyimgs = ['rgba', 'depth', 'normal', 'object_coordinates', 'segmentation']
dic_train = {"color": colors,
"material": materials,
"shape": shapes,
"view": views,
'brightness': brightness,
"scale": scale_train,
'stretch': stretch_train,
'shade': shade_train
}
dic_test = {"color": colors,
"material": materials,
"shape": shapes,
"view": views,
'brightness': brightness,
"scale": scale_test,
'stretch': stretch_test,
'shade': shade_test
}
types_learning = ['color', 'material', 'shape']
types_flebility = ['color', 'material', 'shape', 'brightness', 'view']
types_variability = ['scale', 'stretch', 'shade']
types_all = ['color', 'material', 'shape', 'brightness',
'view', 'shade', 'stretch', 'scale']
# paths and filenames
bn_n_train = "bn_n_train.txt" # 23 attrs, -9 combs
bsn_novel_train_1 = "bsn_novel_train_1.txt" # 20 attrs, -9 combs
bsn_novel_train_2 = "bsn_novel_train_2.txt" #
bsn_novel_train_2_nw = "bsn_novel_train_2_nw.txt" #
bsn_novel_train_2_old = "bsn_novel_train_2_old.txt" #
bn_n_test = "bn_n_test.txt" # 23 attrs, all combs
bsn_novel_test_1 = "bsn_novel_test_1.txt"
bsn_novel_test_2_nw = "bsn_novel_test_2_nw.txt"
bsn_novel_test_2_old = "bsn_novel_test_2_old.txt"
bn_test = "bn_test.txt"
bsn_test_1 = "bsn_test_1.txt"
bsn_test_2_nw = "bsn_test_2_nw.txt"
bsn_test_2_old = "bsn_test_2_old.txt"
# train parameters
resize = 224
lr = 1e-3
epochs = 5
sim_batch = 128
gen_batch = 128
batch_size = 32
# model architecture
hidden_dim_clip = 128
latent_dim = 16