-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathobject_detection_retina_a311d.pbtxt
executable file
·109 lines (100 loc) · 3.03 KB
/
object_detection_retina_a311d.pbtxt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# Images on CPU coming into and out of the graph.
input_stream: "input_frame"
output_stream: "output_detect"
# Transforms the input image on CPU to a 320x320 image. To scale the image, by
# default it uses the STRETCH scale mode that maps the entire input image to the
# entire transformed image. As a result, image aspect ratio may be changed and
# objects in the image may be deformed (stretched or squeezed), but the object
# detection model used in this graph is agnostic to that deformation.
node {
calculator: "ImageTransformationCalculator"
input_stream: "IMAGE:input_frame"
output_stream: "IMAGE:transformed_input_frame"
node_options: {
[type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] {
output_width: 960
output_height: 544
}
}
}
# Converts the transformed input image on CPU into an image tensor stored as a
# TfLiteTensor.
node {
calculator: "TengineConverterCalculator"
input_stream: "IMAGE:transformed_input_frame"
output_stream: "ARRAYS:image_tensor"
node_options: {
[type.googleapis.com/mediapipe.TengineConverterCalculatorOptions] {
tensor_mean: {val1:104 val2:117 val3:123}
tensor_scale: {val1:1 val2:1 val3:1}
}
}
}
node {
calculator: "TengineInferenceCalculator"
input_stream: "ARRAYS:image_tensor"
output_stream: "ARRAYS:detection_tensors"
output_stream: "TENSOR_SHAPE:tensor_shapes"
output_stream: "QUANT_PARAM:quant_param"
node_options: {
[type.googleapis.com/mediapipe.TengineInferenceCalculatorOptions] {
model_path: "../models/retinaface_multi_uint8.tmfile"
data_type: "uint8"
output_num: 2
max_dim: 4
tengine_backend: "timvx"
}
}
}
node {
calculator: "RetinaAnchorsCalculator"
output_side_packet: "anchors"
node_options: {
[type.googleapis.com/mediapipe.RetinaAnchorsCalculatorOptions] {
num_layers: 3
min_scale: 0.2
max_scale: 0.95
input_size_height: 544
input_size_width: 960
anchor_offset_x: 0.5
anchor_offset_y: 0.5
strides: 8
strides: 16
strides: 32
aspect_ratios: 1.0
aspect_ratios: 2.5
reduce_boxes_in_lowest_layer: false
}
}
}
node {
calculator: "TengineTensorsToDetectionsCalculator"
input_stream: "ARRAYS:detection_tensors"
input_side_packet: "ANCHORS:anchors"
input_stream: "QUANT_PARAM:quant_param"
output_stream: "DETECTIONS:detections"
node_options: {
[type.googleapis.com/mediapipe.TengineTensorsToDetectionsCalculatorOptions] {
num_classes: 4
num_boxes: 85680
num_coords: 4
min_score_thresh: 0.5
sigmoid_score: true
data_type: "uint8"
}
}
}
# Performs non-max suppression to remove excessive detections.
node {
calculator: "NonMaxSuppressionCalculator"
input_stream: "detections"
output_stream: "output_detect"
node_options: {
[type.googleapis.com/mediapipe.NonMaxSuppressionCalculatorOptions] {
min_suppression_threshold: 0.4
# max_num_detections: 100
overlap_type: INTERSECTION_OVER_UNION
return_empty_detections: true
}
}
}