-
Notifications
You must be signed in to change notification settings - Fork 0
/
CVProcessorThread.py
154 lines (126 loc) · 6.76 KB
/
CVProcessorThread.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
# Worker thread that processes images for vision operations. Reads images from CVCam.get_raw_image()
import threading
import TCEnumerations
import imutils
import cv2
import time
import numpy as np
class CVProcessorThread(threading.Thread):
def __init__(self, __cvcam, __operation, __frame_manger):
threading.Thread.__init__(self)
self.operation = __operation
self.isRunning = False
self.frameManager = __frame_manger # Main unit to write and read CVFrames
self.face_cascade = cv2.CascadeClassifier('./haarcascade_frontalface_default.xml')
self.compFrame = None
self.has_motion_detected = False
self.start_time = time.time()
self.record_on_motion_detection = False
# Instantiate the camera module
self.camera = __cvcam
def run(self):
while self.isRunning:
# RAW IMAGE OUTPUT
if self.operation == TCEnumerations.CV_RAW_IMAGE:
grabbed, img = self.camera.get_raw_image()
self.frameManager.create_frame(img, TCEnumerations.CV_RAW_IMAGE)
# FACE DETECTION
elif self.operation == TCEnumerations.CV_FACE_DETECTION:
grabbed, img = self.camera.get_raw_image()
height = len(img)
width = len(img[0])
face_detect_scale = 1 # resizing factor before we apply HAAR Cascade
# This is the one used for face detection. Full resolution is not necessary.
img_for_faces = imutils.resize(img, width=int(width * face_detect_scale),
height=int(height * face_detect_scale))
if not grabbed:
return
gray = cv2.cvtColor(img_for_faces, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
frame = cv2.rectangle(img, (int(x / face_detect_scale), int(y / face_detect_scale)), (
int(x / face_detect_scale + w / face_detect_scale),
int(y / face_detect_scale + h / face_detect_scale)),
(255, 0, 0), 2)
self.frameManager.create_frame(img, TCEnumerations.CV_FACE_DETECTION)
# MOTION DETECTION
# This code was modified from code found on the follow website:
# http://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
elif self.operation == TCEnumerations.CV_MOTION_DETECTION:
# global frameDeltaSumPrev
# global frameDeltaSumCurr
# global frameDelta
grabbed, frame = self.camera.get_raw_image()
if not grabbed:
break
# Converts the image from rgb to gray and blurs the gray image
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# Sets the comparison frame to gray during the first run through this loop
if self.compFrame is None:
self.compFrame = gray
continue
# Resets the comparison frame every five seconds
timeElapsed = time.time() - self.start_time
if timeElapsed > 5:
self.start_time = time.time()
self.compFrame = gray
# Resets timeElapsed counter if camera detects movement
# if frameDelta is not None:
# frameDeltaSumPrev = np.sum(np.sum(frameDelta))
# frameDeltaSumCurr = np.sum(np.sum(cv2.absdiff(firstFrame, gray)))
# if abs(int(frameDeltaSumPrev) - int(frameDeltaSumCurr)) > 20000:
# start = time.time()
# Computes the absolute difference in pixel values of the comparison
# frame and the current frame
frameDelta = cv2.absdiff(self.compFrame, gray)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
(_, cnts, _) = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# Draws rectangles around the areas where motion was detected
if len(cnts) == 0:
self.has_motion_detected = False
for c in cnts:
if cv2.contourArea(c) < 500:
continue
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
self.has_motion_detected = True
if self.record_on_motion_detection:
self.camera.start_recording()
self.frameManager.create_frame(frame, TCEnumerations.CV_MOTION_DETECTION)
# CANNY EDGE DETECTION
elif self.operation == TCEnumerations.CV_CANNY_EDGE_DETECTION:
grabbed, img = self.camera.get_raw_image()
img = cv2.Canny(img, 100, 200)
self.frameManager.create_frame(img, TCEnumerations.CV_CANNY_EDGE_DETECTION)
# CORNER DETECTION
elif self.operation == TCEnumerations.CV_CORNER_DETECTION:
grabbed, img = self.camera.get_raw_image()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.float32(gray)
dst = cv2.cornerHarris(gray, 2, 7, 0.04)
# result is dilated for marking the corners, not important
dst = cv2.dilate(dst, None)
# Threshold for an optimal value, it may vary depending on the image.
img[dst > 0.01 * dst.max()] = [0, 0, 255]
self.frameManager.create_frame(img, TCEnumerations.CV_CORNER_DETECTION)
# KEYPOINT DETECTION
elif self.operation == TCEnumerations.CV_KEYPOINT_DETECTION:
grabbed, img = self.camera.get_raw_image()
if not grabbed:
break
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(gray, None)
frame = cv2.drawKeypoints(gray, kp, img)
self.frameManager.create_frame(img, TCEnumerations.CV_KEYPOINT_DETECTION)
def get_operation(self):
return self.operation
def set_operation(self, input_operation):
self.operation = input_operation
def record_on_motion_detection(self, is_on):
self.record_on_motion_detection = is_on
def stop_processing(self):
self.isRunning = False