-
Notifications
You must be signed in to change notification settings - Fork 3
/
coral_util.py
165 lines (136 loc) · 5.42 KB
/
coral_util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TensorFlow Lite Utils.
Copyright (c) 2020 Nobuo Tsukamoto
Modified 2023 Chris Maunder @ CodeProject
This software is released under the MIT License.
See the LICENSE file in the project root for more information.
"""
from ctypes import *
from typing import Tuple
import numpy as np
def make_interpreter(tpu_model_file: str, cpu_model_file: str = None,
num_of_threads: int = 1) -> Tuple[any, bool]:
""" make tf-lite interpreter.
If tpu_model_file is provided, but no cpu_model_file, then we assume the
caller has determined the libraries and hardware that is available and has
supplied a suitable file. Otherwise, this method will assume the model file
is an edgetpu model but will sniff libraries and hardware and fallback to
cpu_model_file if edge TPU support isn't available.
Args:
tpu_model_file: Model file path for TPUs.
cpu_model_file: Model file path for CPUs.
num_of_threads: Num of threads.
Return:
tf-lite interpreter.
"""
# First determine if we have TensorFlow-Lite runtime installed, or the whole Tensorflow
# In either case we're looking to load TFLite models
try:
from tflite_runtime.interpreter import Interpreter, load_delegate
except ImportError:
import tensorflow as tf
Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate
# Initially try loading EdgeTPU delegates for the Coral TPU. If this fails fallback.
# For Coral edge TPU you load up a delegate that will handle the TPU computations, and
# pass that to the Interpreter constructor. Everything else is vanilla TFLite.
# https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu
delegates = None
# Only try and load delegates if we're trying to use a TPU
if tpu_model_file:
try:
import platform
delegate = {
'Linux': 'libedgetpu.so.1',
'Darwin': 'libedgetpu.1.dylib',
'Windows': 'edgetpu.dll'}[platform.system()]
delegates = [load_delegate(delegate)]
except Exception as ex:
pass
interpreter = None
edge_tpu = False
if delegates and tpu_model_file:
try:
# TensorFlow-Lite loading a TF-Lite TPU model
# CRASH: On Windows, the interpreter.__init__ method accepts experimental
# delegates. These are used in self._interpreter.ModifyGraphWithDelegate,
# which fails on Windows
interpreter = Interpreter(model_path=tpu_model_file, experimental_delegates=delegates)
edge_tpu = True
except Exception as ex:
# Fall back
if cpu_model_file:
interpreter = Interpreter(model_path=cpu_model_file)
else:
# TensorFlow loading a TF-Lite CPU model
if cpu_model_file:
interpreter = Interpreter(model_path=cpu_model_file)
return (interpreter, edge_tpu)
"""
if "edgetpu.tflite" in model_file and EDGETPU_SHARED_LIB:
print("EdgeTpu delegate")
return tflite.Interpreter(
model_path=model_file,
experimental_delegates=[tflite.load_delegate(EDGETPU_SHARED_LIB)],
)
elif delegate_library is not None:
print("{} delegate".format(os.path.splitext(os.path.basename(delegate_library))[0]))
option = {"backends": "CpuAcc",
"logging-severity": "info",
"number-of-threads": str(num_of_threads),
"enable-fast-math":"true"}
print(option)
return tflite.Interpreter(
model_path=model_file,
experimental_delegates=[
tflite.load_delegate(delegate_library, options=option)
],
)
else:
return tflite.Interpreter(model_path=model_file, num_threads=num_of_threads)
"""
def set_input_tensor(interpreter, image):
""" Sets the input tensor.
Args:
interpreter: Interpreter object.
image: a function that takes a (width, height) tuple,
and returns an RGB image resized to those dimensions.
"""
tensor_index = interpreter.get_input_details()[0]["index"]
input_tensor = interpreter.tensor(tensor_index)()[0]
input_tensor[:, :] = image.copy()
def get_output_tensor(interpreter, index):
""" Returns the output tensor at the given index.
Args:
interpreter
index
Returns:
tensor
"""
output_details = interpreter.get_output_details()[index]
tensor = np.squeeze(interpreter.get_tensor(output_details["index"]))
return tensor
def get_output_results(interpreter, field: str):
""" Returns the output tensor at the given index.
Args:
interpreter
index
Returns:
tensor
"""
tensor = None
for index in range(4):
output_details = interpreter.get_output_details()[index]
tensor = interpreter.get_tensor(output_details["index"])
dimensions = np.ndim(tensor)
if dimensions == 3 and field == "boxes":
break
if dimensions == 1 and field == "count":
break
if dimensions == 2:
if tensor.max() > 1.0 and field == "classes":
break
if tensor.max() <= 1.0 and field == "scores":
break
return np.squeeze(tensor)