-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpredictRPi.py
115 lines (97 loc) · 3.43 KB
/
predictRPi.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
'''
how to run the code
python predict.py
-m location/simple_nn1.model
-l location/simple_nn_lb1.pickle
-w 32 -e 32 -f 1
-f 1 for SNN and -1 for VGG
'''
# import the necessary packages
from keras.models import load_model
from keras.preprocessing.image import img_to_array
from imutils.video import VideoStream
import numpy as np
import imutils
import argparse
import time
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True,
help="path to trained Keras model")
# we don't really need pickle file for RPi but keep for future use
ap.add_argument("-l", "--label-bin", required=True,
help="path to label binarizer")
ap.add_argument("-w", "--width", type=int, default=28,
help="target spatial dimension width")
ap.add_argument("-e", "--height", type=int, default=28,
help="target spatial dimension height")
ap.add_argument("-f", "--flatten", type=int, default=-1,
help="whether or not we should flatten the image")
# To use any argument in the code, args["write name of the model"]
args = vars(ap.parse_args())
# load the model and label binarizer
print('-'*90)
print("[INFO] loading network and label binarizer...")
print('-'*90)
model = load_model(args["model"])
# pickle won't work on RPi so you will need to add it by yourself
#lb = pickle.loads(open(args["label_bin"], "rb").read())
lb = ('Arabic', 'English', 'Japanese')
# initialize the video stream and allow the camera sensor to warm up
print('-'*90)
print("[INFO] starting video stream")
print('-'*90)
# for pc cam
vs = VideoStream(src=0).start()
# for RPI
##vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=600)
# prepare the image to be classified by our deep learning network
a = args["width"]
b = args["height"]
image = cv2.resize(frame, (a, b))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# check to see if we should flatten the image and add a batch
# dimension
if args["flatten"] > 0:
image = image.flatten()
image = image.reshape((1, image.shape[0]))
# otherwise, we must be working with a CNN -- don't flatten the
# image, simply add the batch dimension
else:
image = image.reshape((1, image.shape[0], image.shape[1],
image.shape[2]))
# make a prediction on the image
preds = model.predict(image)
# find the class label index with the largest corresponding
# probability
i = preds.argmax(axis=1)[0]
label = lb[i]
#print(preds)
#print(lb.classes_)
# draw the class label + probability on the output image
text = "{}: {:.3f}%".format(label, preds[0][i] * 100)
# (frame, text, location, font type, font size, font color, font boldness)
frame = cv2.putText(frame, text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.2 ,
(128, 0, 0), 3)
# show the output image
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# quite the program
if key == ord("q"):
break
# close all windows
print('-'*90)
print("[INFO] closing up")
print('-'*90)
cv2.destroyAllWindows()
vs.stop()