forked from sophon-ai-algo/examples
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathinference2.h
131 lines (97 loc) · 4.05 KB
/
inference2.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
//
// Created by hsyuan on 2021-02-22.
//
#ifndef INFERENCE_FRAMEWORK_INFERENCE_H
#define INFERENCE_FRAMEWORK_INFERENCE_H
#include "bmutility.h"
#include "thread_queue.h"
namespace bm {
// declare before
template<typename T> class BMInferencePipe;
template<typename T>
class DetectorDelegate {
protected:
using DetectedFinishFunc = std::function<void(T &of)>;
DetectedFinishFunc m_pfnDetectFinish = nullptr;
BMInferencePipe<T>* m_nextInferPipe = nullptr;
public:
virtual ~DetectorDelegate() {}
void set_next_inference_pipe(BMInferencePipe<T> *nextPipe) { m_nextInferPipe = nextPipe; }
int set_detected_callback(DetectedFinishFunc func) { m_pfnDetectFinish = func; return 0; }
virtual int preprocess(std::vector<T> &frames) = 0;
virtual int forward(std::vector<T> &frames) = 0;
virtual int postprocess(std::vector<T> &frames) = 0;
};
struct DetectorParam {
DetectorParam() {
preprocess_queue_size = 5;
preprocess_thread_num = 4;
inference_queue_size = 5;
inference_thread_num = 1;
postprocess_queue_size = 5;
postprocess_thread_num = 2;
batch_size = 4;
}
int preprocess_queue_size;
int preprocess_thread_num;
int inference_queue_size;
int inference_thread_num;
int postprocess_queue_size;
int postprocess_thread_num;
int batch_size;
};
template<typename T>
class BMInferencePipe {
DetectorParam m_param;
std::shared_ptr<DetectorDelegate<T>> m_detect_delegate;
BlockingQueue<T> m_preprocessQue;
BlockingQueue<T> m_postprocessQue;
BlockingQueue<T> m_forwardQue;
WorkerPool<T> m_preprocessWorkerPool;
WorkerPool<T> m_forwardWorkerPool;
WorkerPool<T> m_postprocessWorkerPool;
public:
BMInferencePipe() {
}
virtual ~BMInferencePipe() {
}
int init(const DetectorParam ¶m, std::shared_ptr<DetectorDelegate<T>> delegate) {
m_param = param;
m_detect_delegate = delegate;
m_preprocessWorkerPool.init(&m_preprocessQue, param.preprocess_thread_num, param.batch_size, param.batch_size);
m_preprocessWorkerPool.startWork([this](std::vector<T> &items) {
if (m_preprocessQue.size() > m_param.preprocess_queue_size) {
std::cout << "WARNING:preprocess queue_size(" << m_preprocessQue.size() << ") > "
<< m_param.preprocess_queue_size << std::endl;
}
m_detect_delegate->preprocess(items);
this->m_forwardQue.push(items);
});
m_forwardWorkerPool.init(&m_forwardQue, param.inference_thread_num, 1, 8);
m_forwardWorkerPool.startWork([this](std::vector<T> &items) {
if (m_forwardQue.size() > m_param.inference_queue_size) {
std::cout << "WARNING:forward queue_size(" << m_forwardQue.size() << ") > "
<< m_param.inference_queue_size << std::endl;
}
m_detect_delegate->forward(items);
this->m_postprocessQue.push(items);
});
m_postprocessWorkerPool.init(&m_postprocessQue, param.postprocess_thread_num, 1, 8);
m_postprocessWorkerPool.startWork([this](std::vector<T> &items) {
if (m_postprocessQue.size() > m_param.postprocess_queue_size) {
std::cout << "WARNING:postprocess queue_size(" << m_postprocessQue.size() << ") > "
<< m_param.postprocess_queue_size << std::endl;
}
m_detect_delegate->postprocess(items);
});
return 0;
}
int flush_frame() {
return m_preprocessWorkerPool.flush();
}
int push_frame(T *frame) {
return m_preprocessQue.push(*frame);
}
};
} // end namespace bm
#endif //INFERENCE_FRAMEWORK_INFERENCE_H