-
Notifications
You must be signed in to change notification settings - Fork 0
/
shape_det_cat.py
114 lines (87 loc) · 3.52 KB
/
shape_det_cat.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
# -*- coding: utf-8 -*-
"""
Created on Tue May 18 12:25:23 2021
@author: gat06
"""
from __future__ import print_function # Python 2/3 compatibility
import cv2 # Import the OpenCV library
import numpy as np # Import Numpy library
# Project: Object Tracking
# Author: Addison Sears-Collins
# Website: https://automaticaddison.com
# Date created: 06/13/2020
# Python version: 3.7
def main():
"""
Main method of the program.
"""
# Create a VideoCapture object
cap = cv2.VideoCapture(1)
# Create the background subtractor object
# Use the last 700 video frames to build the background
back_sub = cv2.createBackgroundSubtractorMOG2(history=1000,
varThreshold=30, detectShadows=True)
# Create kernel for morphological operation
# You can tweak the dimensions of the kernel
# e.g. instead of 20,20 you can try 30,30.
kernel = np.ones((30,30),np.uint8)
while(True):
# Capture frame-by-frame
# This method returns True/False as well
# as the video frame.
ret, frame = cap.read()
# Use every frame to calculate the foreground mask and update
# the background
fg_mask = back_sub.apply(frame)
#fg_mask = cv2.cvtColor(fg_mask, cv2.COLOR_BGR2GRAY)
# Close dark gaps in foreground object using closing
fg_mask = cv2.morphologyEx(fg_mask, cv2.MORPH_CLOSE, kernel)
fg_mask = cv2.GaussianBlur(fg_mask, (9,9), 0)
# Remove salt and pepper noise with a median filter
fg_mask = cv2.medianBlur(fg_mask, 9)
# Threshold the image to make it either black or white
_, fg_mask = cv2.threshold(fg_mask,120,255,cv2.THRESH_BINARY)
fg_mask = cv2.dilate(fg_mask, None, iterations=3)
# Find the index of the largest contour and draw bounding box
fg_mask_bb = fg_mask
contours, hierarchy = cv2.findContours(fg_mask_bb,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)[-2:]
areas = [cv2.contourArea(c) for c in contours]
# If there are no countours
if len(areas) < 1:
# Display the resulting frame
cv2.imshow('frame',frame)
# If "q" is pressed on the keyboard,
# exit this loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Go to the top of the while loop
continue
else:
# Find the largest moving object in the image
max_index = np.argmax(areas)
# Draw the bounding box
cnt = contours[max_index]
x,y,w,h = cv2.boundingRect(cnt)
#cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)
# Draw circle in the center of the bounding box
x2 = x + int(w/2)
y2 = y + int(h/2)
cv2.circle(frame,(x2,y2),4,(0,255,0),-1)
# Print the centroid coordinates (we'll use the center of the
# bounding box) on the image
text = "x: " + str(x2) + ", y: " + str(y2)
cv2.putText(frame, text, (x2 , y2),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
# Display the resulting frame
cv2.imshow('frame',frame)
# If "q" is pressed on the keyboard,
# exit this loop
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print(x2,y2)
# Close down the video stream
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
print(__doc__)
main()