This repository has been archived by the owner on Jul 10, 2022. It is now read-only.
forked from arjunc246/Traffic-Management-System
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathfinal_script.py
83 lines (67 loc) · 2.68 KB
/
final_script.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import cv2
import numpy as np
#from sklearn.externals import joblib
def calcFrame(x, y):
frame_time = int((x * 60 + y) * 30)
return frame_time
if __name__ == "__main__":
vid = cv2.VideoCapture('latestData.mp4')
#model = joblib.load("model (1).cpickle")
# setting the video frame#
lane1_start_time = calcFrame(1, 60)
lane1_end_time = calcFrame(2, 35)
vid.set(1, lane1_start_time)
# reading the reference image#
refIm = cv2.imread('refFrame.jpg')
refIm2 = cv2.cvtColor(refIm, cv2.COLOR_BGR2GRAY)
roi = np.ones(refIm2.shape, "uint8")
cv2.rectangle(roi, (62, 60), (242, 180), 255, -1)
while vid.get(1) <= lane1_end_time + 1000:
ret, frame = vid.read()
vidClone = frame.copy()
#vidClone=vidClone[29:89, 33:75]
#frame = frame[29:89, 33:75]
if ret:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Absolute difference#
bg = refIm2.copy()
bg=cv2.bitwise_and(bg,roi)
gray=cv2.bitwise_and(gray,roi)
print(gray.shape)
cv2.imshow("background",bg)
cv2.waitKey(1)
diff = cv2.absdiff(bg.astype('uint8'), gray)
# threshold logic#
thresh = 53
thresholded = cv2.threshold(diff, thresh, 255, cv2.THRESH_BINARY)[1]
# Opening logic#
k = 3
kernel = np.ones((k, k), "uint8")
opening = cv2.morphologyEx(thresholded, cv2.MORPH_OPEN, kernel)
cv2.imshow('opening', opening)
# dilation logic#
dilate = 15
dilated = cv2.dilate(opening, None, iterations=dilate)
# change to _,contour,_ for latest version#
contour, _ = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Finding the area of each contour#
for i in range(len(contour)):
z = cv2.drawContours(vidClone, contour, i, (0, 255, 0))
#cv2.imshow("z",z)
#cv2.waitKey(1)
#area = cv2.contourArea(contour[i])
M = cv2.moments(contour[i])
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
area = cv2.contourArea(contour[i])
cv2.putText(vidClone, str(area), (cx - 10, cy - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
cv2.imshow("video clone", vidClone)
#time=model.predict(area)
#print(time)
keypress = cv2.waitKey(30) & 0xFF
# if the user pressed "q", then stop looping
if keypress == ord('q'):
break
# workbook.close()
vid.release
cv2.destroyAllWindows()