forked from tamerthamoqa/facenet-realtime-face-recognition
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathserver.py
219 lines (171 loc) · 8.84 KB
/
server.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
#!/usr/bin/env python3
import cv2 # for web camera
import tensorflow as tf
import os
from scipy.misc import imread
from lib.src.align import detect_face # for MTCNN face detection
from flask import Flask, request, render_template
from werkzeug.utils import secure_filename
from waitress import serve
from utils import (
load_model, get_face, get_faces_live, forward_pass, save_embedding, load_embeddings,
identify_face, allowed_file, remove_file_extension, save_image
)
app = Flask(__name__)
app.secret_key = os.urandom(24)
APP_ROOT = os.path.dirname(os.path.abspath(__file__))
uploads_path = os.path.join(APP_ROOT, 'uploads')
embeddings_path = os.path.join(APP_ROOT, 'embeddings')
allowed_set = set(['png', 'jpg', 'jpeg']) # allowed image formats for upload
@app.route('/upload', methods=['POST', 'GET'])
def get_image():
"""Gets an image file via POST request, feeds the image to the FaceNet model then saves both the original image
and its resulting embedding from the FaceNet model in their designated folders.
'uploads' folder: for image files
'embeddings' folder: for embedding numpy files.
"""
if request.method == 'POST':
if 'file' not in request.files:
return "No file part"
file = request.files['file']
filename = file.filename
if filename == "":
return "No selected file"
if file and allowed_file(filename=filename, allowed_set=allowed_set):
filename = secure_filename(filename=filename)
# Read image file as numpy array of RGB dimension
img = imread(name=file, mode='RGB')
# Detect and crop a 160 x 160 image containing a human face in the image file
img = get_face(img=img, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size)
# If a human face is detected
if img is not None:
embedding = forward_pass(
img=img, session=facenet_persistent_session,
images_placeholder=images_placeholder, embeddings=embeddings,
phase_train_placeholder=phase_train_placeholder,
image_size=image_size
)
# Save cropped face image to 'uploads/' folder
save_image(img=img, filename=filename, uploads_path=uploads_path)
# Remove file extension from image filename for numpy file storage being based on image filename
filename = remove_file_extension(filename=filename)
# Save embedding to 'embeddings/' folder
save_embedding(embedding=embedding, filename=filename, embeddings_path=embeddings_path)
return render_template("upload_result.html",
status="Image uploaded and embedded successfully!")
else:
return render_template("upload_result.html",
status="Image upload was unsuccessful! No human face was detected.")
else:
return "POST HTTP method required!"
@app.route('/predictImage', methods=['POST', 'GET'])
def predict_image():
"""Gets an image file via POST request, feeds the image to the FaceNet model, the resulting embedding is then
sent to be compared with the embeddings database. The image file is not stored.
An html page is then rendered showing the prediction result.
"""
if request.method == 'POST':
if 'file' not in request.files:
return "No file part"
file = request.files['file']
filename = file.filename
if filename == "":
return "No selected file"
if file and allowed_file(filename=filename, allowed_set=allowed_set):
# Read image file as numpy array of RGB dimension
img = imread(name=file, mode='RGB')
# Detect and crop a 160 x 160 image containing a human face in the image file
img = get_face(img=img, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size)
# If a human face is detected
if img is not None:
embedding = forward_pass(
img=img, session=facenet_persistent_session,
images_placeholder=images_placeholder, embeddings=embeddings,
phase_train_placeholder=phase_train_placeholder,
image_size=image_size
)
embedding_dict = load_embeddings()
if embedding_dict:
# Compare euclidean distance between this embedding and the embeddings in 'embeddings/'
identity = identify_face(embedding=embedding, embedding_dict=embedding_dict)
return render_template('predict_result.html', identity=identity)
else:
return render_template(
'predict_result.html',
identity="No embedding files detected! Please upload image files for embedding!"
)
else:
return render_template(
'predict_result.html',
identity="Operation was unsuccessful! No human face was detected."
)
else:
return "POST HTTP method required!"
@app.route("/live", methods=['GET', 'POST'])
def face_detect_live():
"""Detects faces in real-time via Web Camera."""
embedding_dict = load_embeddings()
if embedding_dict:
try:
cap = cv2.VideoCapture(0)
while True:
return_code, frame = cap.read() # RGB frame
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if frame.size > 0:
faces, rects = get_faces_live(img=frame, pnet=pnet, rnet=rnet, onet=onet, image_size=image_size)
# If there are human faces detected
if faces:
for i in range(len(faces)):
face_img = faces[i]
rect = rects[i]
face_embedding = forward_pass(
img=face_img, session=facenet_persistent_session,
images_placeholder=images_placeholder, embeddings=embeddings,
phase_train_placeholder=phase_train_placeholder,
image_size=image_size
)
# Compare euclidean distance between this embedding and the embeddings in 'embeddings/'
identity = identify_face(embedding=face_embedding, embedding_dict=embedding_dict)
cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (255, 215, 0), 2)
W = int(rect[2] - rect[0]) // 2
H = int(rect[3] - rect[1]) // 2
cv2.putText(frame, identity, (rect[0]+W-(W//2), rect[1]-7),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 215, 0), 1, cv2.LINE_AA)
cv2.imshow('Video', frame)
# Keep showing camera stream even if no human faces are detected
cv2.imshow('Video', frame)
else:
continue
cap.release()
cv2.destroyAllWindows()
return render_template('index.html')
except Exception as e:
print(e)
else:
return "No embedding files detected! Please upload image files for embedding!"
@app.route("/")
def index_page():
"""Renders the 'index.html' page for manual image file uploads."""
return render_template("index.html")
@app.route("/predict")
def predict_page():
"""Renders the 'predict.html' page for manual image file uploads for prediction."""
return render_template("predict.html")
if __name__ == '__main__':
"""Server and FaceNet Tensorflow configuration."""
# Load FaceNet model and configure placeholders for forward pass into the FaceNet model to calculate embeddings
model_path = 'model/20170512-110547/20170512-110547.pb'
facenet_model = load_model(model_path)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
image_size = 160
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Initiate persistent FaceNet model in memory
facenet_persistent_session = tf.Session(graph=facenet_model, config=config)
# Create Multi-Task Cascading Convolutional (MTCNN) neural networks for Face Detection
pnet, rnet, onet = detect_face.create_mtcnn(sess=facenet_persistent_session, model_path=None)
# Start flask application on waitress WSGI server
serve(app=app, host='0.0.0.0', port=5000)