-
Notifications
You must be signed in to change notification settings - Fork 0
/
autoMontage.pyx
172 lines (148 loc) · 6.13 KB
/
autoMontage.pyx
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import face_recognition as fr
cimport cython
from keras.preprocessing.image import img_to_array
import imutils
import cv2
from keras.models import load_model
import numpy as np
@cython.boundscheck(False) # Deactivate bounds checking
@cython.wraparound(False) # Deactivate negative indexing.
def autoMontage(imgs):
img1 = imgs[0]
img2 = imgs[1]
img3 = imgs[2]
img4 = imgs[3]
boxes_img1 = fr.face_locations(img1)
boxes_img2 = fr.face_locations(img2)
boxes_img3 = fr.face_locations(img3)
boxes_img4 = fr.face_locations(img4)
faces_map = dict()
cdef int i, j, f, top, bottom, left, right, top2, bottom2, left2, right2
print("Recognizing people in image 1")
for i in range(len(boxes_img1)):
faces_map[i] = [(0, boxes_img1[i])]
print("Recognizing people and cross-referencing in image 2")
defer_add_faces_map = []
for i in range(len(boxes_img2)):
box = boxes_img2[i]
for f in faces_map:
known_faces = faces_map[f]
done = False
for j in range(len(known_faces)):
face = known_faces[j]
top, right, bottom, left = face[1]
face_img = imgs[face[0]][top:bottom,left:right, :]
top2, right2, bottom2, left2 = box
face_img_curr = img2[top2:bottom2,left2:right2, :]
encoding1 = fr.face_encodings(face_img, model='large')
encoding2 = fr.face_encodings(face_img_curr, model='large')
if encoding1 and encoding2 and fr.compare_faces(encoding1, encoding2[0])[0]:
done = True
defer_add_faces_map.append((f, (1, box)))
break
if done:
break
else:
defer_add_faces_map.append((f, (1, box)))
for i in range(len(defer_add_faces_map)):
key, val = defer_add_faces_map[i]
if key in faces_map:
faces_map[key].append(val)
else:
faces_map[key] = [val]
print("Recognizing people and cross-referencing in image 3")
defer_add_faces_map = []
for i in range(len(boxes_img3)):
box = boxes_img3[i]
for f in faces_map:
known_faces = faces_map[f]
done = False
for j in range(len(known_faces)):
face = known_faces[j]
top, right, bottom, left = face[1]
face_img = imgs[face[0]][top:bottom,left:right, :]
top2, right2, bottom2, left2 = box
face_img_curr = img3[top2:bottom2,left2:right2, :]
encoding1 = fr.face_encodings(face_img, model='large')
encoding2 = fr.face_encodings(face_img_curr, model='large')
if encoding1 and encoding2 and fr.compare_faces(encoding1, encoding2[0])[0]:
done = True
defer_add_faces_map.append((f, (2, box)))
break
if done:
break
else:
defer_add_faces_map.append((f, (2, box)))
for i in range(len(defer_add_faces_map)):
key, val = defer_add_faces_map[i]
if key in faces_map:
faces_map[key].append(val)
else:
faces_map[key] = [val]
print("Recognizing people and cross-referencing in image 4")
defer_add_faces_map = []
for i in range(len(boxes_img4)):
box = boxes_img4[i]
for f in faces_map:
known_faces = faces_map[f]
done = False
for j in range(len(known_faces)):
face = known_faces[j]
top, right, bottom, left = face[1]
face_img = imgs[face[0]][top:bottom,left:right, :]
top2, right2, bottom2, left2 = box
face_img_curr = img4[top2:bottom2,left2:right2, :]
encoding1 = fr.face_encodings(face_img, model='large')
encoding2 = fr.face_encodings(face_img_curr, model='large')
if encoding1 and encoding2 and fr.compare_faces(encoding1, encoding2[0])[0]:
done = True
defer_add_faces_map.append((f, (3, box)))
break
if done:
break
else:
defer_add_faces_map.append((f, (3, box)))
for i in range(len(defer_add_faces_map)):
key, val = defer_add_faces_map[i]
if key in faces_map:
faces_map[key].append(val)
else:
faces_map[key] = [val]
print("People recognized and cross referenced. Recognizing emotions")
emotion_model_path = 'models/_mini_XCEPTION.102-0.66.hdf5'
# hyper-parameters for bounding boxes shape
# loading models
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = ["angry" ,"disgust","scared", "happy", "sad", "surprised",
"neutral"]
emotions_map = {"scared" : 1, "disgust" : 2, "sad" : 3, "angry" : 4, "neutral" : 5, "surprised" : 6, "happy" : 7}
new_masks = []
for person in faces_map.values():
max_val = -1
prob = 0
info = None
for i in range(len(person)):
face = person[i]
top, right, bottom, left = face[1]
face_img = imgs[face[0]][top:bottom,left:right, :]
gray = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)
# Extract the ROI of the face from the grayscale image, resize it to a fixed 28x28 pixels, and then prepare
# the ROI for classification via the CNN
roi = gray
roi = cv2.resize(roi, (64, 64))
roi = roi.astype("float") / 255.0
roi = img_to_array(roi)
roi = np.expand_dims(roi, axis=0)
preds = emotion_classifier.predict(roi)[0]
emotion_probability = np.max(preds)
label = EMOTIONS[preds.argmax()]
label_val = emotions_map[label]
if label_val > max_val:
max_val = label_val
prob = emotion_probability
info = face
elif label_val == max_val and emotion_probability > prob:
prob = emotion_probability
info = face
new_masks.append(info)
return new_masks