-
Notifications
You must be signed in to change notification settings - Fork 18
/
Copy pathinference.py
executable file
·136 lines (117 loc) · 3.66 KB
/
inference.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import cv2
import numpy as np
from pyvino_utils.models.openvino_base.base_model import Base
__all__ = [
"FaceDetection",
"MaskDetection",
]
COLOR = {"Green": (0, 255, 0), "Red": (0, 0, 255)}
class FaceDetection(Base):
"""Class for the Face Detection Model."""
def __init__(
self,
model_name,
source_width=None,
source_height=None,
device="CPU",
threshold=0.60,
extensions=None,
**kwargs,
):
super().__init__(
model_name,
source_width,
source_height,
device,
threshold,
extensions,
**kwargs,
)
def preprocess_output(self, inference_results, image, show_bbox=False, **kwargs):
"""Draw bounding boxes onto the Face Detection frame."""
results = {}
if not (self._init_image_w and self._init_image_h):
raise RuntimeError("Initial image width and height cannot be None.")
if len(inference_results) == 1:
inference_results = inference_results[0]
bbox_coord = []
for box in inference_results[0][0]: # Output shape is 1x1xNx7
conf = box[2]
if conf >= self.threshold:
xmin = int(box[3] * self._init_image_w)
ymin = int(box[4] * self._init_image_h)
xmax = int(box[5] * self._init_image_w)
ymax = int(box[6] * self._init_image_h)
bbox_coord.append((xmin, ymin, xmax, ymax))
if show_bbox:
self.draw_output(image, xmin, ymin, xmax, ymax, **kwargs)
results["image"] = image
results["bbox_coord"] = bbox_coord
return results
@staticmethod
def draw_output(
image,
xmin,
ymin,
xmax,
ymax,
label="Person",
padding_size=(0.05, 0.25),
scale=2,
thickness=2,
**kwargs,
):
_label = None
if kwargs.get("mask_detected"):
_label = (
(f"{label} Wearing Mask", COLOR["Green"])
if float(kwargs.get("mask_detected")) > kwargs.get("threshold", 0.1)
else (f"{label} NOT wearing a Mask!!!", COLOR["Red"])
)
# print(_label)
label = _label if _label is not None else (label, COLOR["Green"])
cv2.rectangle(
image, (xmin, ymin), (xmax, ymax,), color=label[1], thickness=thickness,
)
((label_width, label_height), _) = cv2.getTextSize(
label[0], cv2.FONT_HERSHEY_PLAIN, fontScale=scale, thickness=thickness,
)
cv2.putText(
image,
label[0],
org=(image.shape[0] // 3, image.shape[1] // 3),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=scale,
color=label[1],
thickness=thickness,
)
class MaskDetection(Base):
"""Class for the Mask Detection Model."""
def __init__(
self,
model_name,
source_width=None,
source_height=None,
device="CPU",
threshold=0.60,
extensions=None,
**kwargs,
):
super().__init__(
model_name,
source_width,
source_height,
device,
threshold,
extensions,
**kwargs,
)
def preprocess_output(self, inference_results, image, show_bbox=False, **kwargs):
results = {}
results["flattened_predictions"] = np.vstack(inference_results).ravel()
results["image"] = image
return results
def draw_output(
self, image, inference_results, **kwargs,
):
pass