-
Notifications
You must be signed in to change notification settings - Fork 29
/
Copy pathfood_volume_estimation_app.py
149 lines (130 loc) · 5.71 KB
/
food_volume_estimation_app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
import argparse
import numpy as np
import cv2
import tensorflow as tf
from keras.models import Model, model_from_json
from food_volume_estimation.volume_estimator import VolumeEstimator, DensityDatabase
from food_volume_estimation.depth_estimation.custom_modules import *
from food_volume_estimation.food_segmentation.food_segmentator import FoodSegmentator
from flask import Flask, request, jsonify, make_response, abort
import base64
app = Flask(__name__)
estimator = None
density_db = None
def load_volume_estimator(depth_model_architecture, depth_model_weights,
segmentation_model_weights, density_db_source):
"""Loads volume estimator object and sets up its parameters."""
# Create estimator object and intialize
global estimator
estimator = VolumeEstimator(arg_init=False)
with open(depth_model_architecture, 'r') as read_file:
custom_losses = Losses()
objs = {'ProjectionLayer': ProjectionLayer,
'ReflectionPadding2D': ReflectionPadding2D,
'InverseDepthNormalization': InverseDepthNormalization,
'AugmentationLayer': AugmentationLayer,
'compute_source_loss': custom_losses.compute_source_loss}
model_architecture_json = json.load(read_file)
estimator.monovideo = model_from_json(model_architecture_json,
custom_objects=objs)
estimator._VolumeEstimator__set_weights_trainable(estimator.monovideo,
False)
estimator.monovideo.load_weights(depth_model_weights)
estimator.model_input_shape = (
estimator.monovideo.inputs[0].shape.as_list()[1:])
depth_net = estimator.monovideo.get_layer('depth_net')
estimator.depth_model = Model(inputs=depth_net.inputs,
outputs=depth_net.outputs,
name='depth_model')
print('[*] Loaded depth estimation model.')
# Depth model configuration
MIN_DEPTH = 0.01
MAX_DEPTH = 10
estimator.min_disp = 1 / MAX_DEPTH
estimator.max_disp = 1 / MIN_DEPTH
estimator.gt_depth_scale = 0.35 # Ground truth expected median depth
# Create segmentator object
estimator.segmentator = FoodSegmentator(segmentation_model_weights)
# Set plate adjustment relaxation parameter
estimator.relax_param = 0.01
# Need to define default graph due to Flask multiprocessing
global graph
graph = tf.get_default_graph()
# Load food density database
global density_db
density_db = DensityDatabase(density_db_source)
@app.route('/predict', methods=['POST'])
def volume_estimation():
"""Receives an HTTP multipart request and returns the estimated
volumes of the foods in the image given.
Multipart form data:
img: The image file to estimate the volume in.
plate_diameter: The expected plate diamater to use for depth scaling.
If omitted then no plate scaling is applied.
Returns:
The array of estimated volumes in JSON format.
"""
# Decode incoming byte stream to get an image
try:
content = request.get_json()
img_encoded = content['img']
img_byte_string = ' '.join([str(x) for x in img_encoded]) # If in byteArray
#img_byte_string = base64.b64decode(img_encoded) # Decode if in base64
np_img = np.fromstring(img_byte_string, np.int8, sep=' ')
img = cv2.imdecode(np_img, cv2.IMREAD_COLOR)
except Exception as e:
abort(406)
# Get food type
try:
food_type = content['food_type']
except Exception as e:
abort(406)
# Get expected plate diameter from form data or set to 0 and ignore
try:
plate_diameter = float(content['plate_diameter'])
except Exception as e:
plate_diameter = 0
# Estimate volumes
with graph.as_default():
volumes = estimator.estimate_volume(img, fov=70,
plate_diameter_prior=plate_diameter)
# Convert to mL
volumes = [v * 1e6 for v in volumes]
# Convert volumes to weight - assuming a single food type
db_entry = density_db.query(food_type)
density = db_entry[1]
weight = 0
for v in volumes:
weight += v * density
# Return values
return_vals = {
'food_type_match': db_entry[0],
'weight': weight
}
return make_response(jsonify(return_vals), 200)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Food volume estimation API.')
parser.add_argument('--depth_model_architecture', type=str,
help='Path to depth model architecture (.json).',
metavar='/path/to/architecture.json',
required=True)
parser.add_argument('--depth_model_weights', type=str,
help='Path to depth model weights (.h5).',
metavar='/path/to/depth/weights.h5',
required=True)
parser.add_argument('--segmentation_model_weights', type=str,
help='Path to segmentation model weights (.h5).',
metavar='/path/to/segmentation/weights.h5',
required=True)
parser.add_argument('--density_db_source', type=str,
help=('Path to food density database (.xlsx) ' +
'or Google Sheets ID.'),
metavar='/path/to/plot/database.xlsx or <ID>',
required=True)
args = parser.parse_args()
load_volume_estimator(args.depth_model_architecture,
args.depth_model_weights,
args.segmentation_model_weights,
args.density_db_source)
app.run(host='0.0.0.0')