Skip to content

Commit

Permalink
Merge pull request edgeimpulse#420 from dtischler/main
Browse files Browse the repository at this point in the history
Counting OpenMV RT1062
  • Loading branch information
dtischler authored Oct 4, 2024
2 parents f9e2b3e + 13a395c commit 7f11cc1
Show file tree
Hide file tree
Showing 285 changed files with 689 additions and 2 deletions.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file not shown.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
# This script controls a Dobot conveyor belt, reads real-time serial data, visualizes nut sizes using Tkinter, and displays a live video feed using OpenCV.
# Date: 2024-09-29 15:30:00
# Author: Thomas Vikström

import tkinter as tk
from tkinter import ttk
from serial import Serial
from serial.tools import list_ports
import threading
import time
import cv2 # OpenCV for video capture and display

# Dobot-related imports (assuming these are present in the working environment)
from dobot_extensions import Dobot

# Initialize Dobot
port = list_ports.comports()[0].device # Selects the first available port
port = 'COM18' # You may need to update this if the port changes
device = Dobot(port=port)

# Serial port setup for OpenMV Cam (modify to your specific settings)
serial_port = Serial(port='COM21', baudrate=115200, timeout=1)

# Tkinter setup
root = tk.Tk()
root.title("Nut Count Visualization")

# Create the main frame for the GUI
mainframe = ttk.Frame(root, padding="20 20 20 20")
mainframe.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))

# Dictionary to keep track of nut sizes and counts
nut_counts = {}

# Tkinter variables for display
total_count_var = tk.StringVar()
total_count_var.set("Total Count: 0")

# Create labels for the total count
ttk.Label(mainframe, textvariable=total_count_var, font=("Helvetica", 16)).grid(row=0, column=0, columnspan=2, pady=10)

# Placeholder labels for individual nut counts (to be updated dynamically)
nut_labels = {}

# Global variable to track video feed status
video_feed_ready = threading.Event()

# Function to parse the serial string with error handling
def parse_serial_string(serial_string):
try:
if ',' not in serial_string:
return 0, {}
parts = serial_string.split(", ")
if len(parts) < 1 or not parts[0].isdigit():
return 0, {}
total_count = int(parts[0])
counts = {}
for item in parts[1:]:
if ": " in item:
nut, count = item.split(": ")
counts[nut] = int(count)
return total_count, counts
except Exception as e:
print(f"Error parsing serial string '{serial_string}': {e}")
return 0, {}

# Function to update the GUI with the new counts
def update_gui(total_count, counts):
total_count_var.set(f"Total Count: {total_count}")
for nut_label in nut_labels:
nut_labels[nut_label].set("0")
for nut, count in counts.items():
if nut not in nut_labels:
nut_labels[nut] = tk.StringVar()
row = len(nut_labels)
ttk.Label(mainframe, text=f"{nut}: ", font=("Helvetica", 14)).grid(row=row, column=0, sticky=tk.E, padx=5)
ttk.Label(mainframe, textvariable=nut_labels[nut], font=("Helvetica", 14)).grid(row=row, column=1, sticky=tk.W, padx=5)
nut_labels[nut].set(f"{count}")

# Function to read from the serial port and update counts
def read_serial_data():
while True:
try:
if serial_port.in_waiting > 0:
line = serial_port.readline().decode("utf-8").strip()
if line:
total, counts = parse_serial_string(line)
update_gui(total, counts)
except Exception as e:
print(f"Error reading serial data: {e}")
time.sleep(0.1)

# Function to control the conveyor belt in a loop
def control_conveyor_belt():
# Wait until the video feed is ready
video_feed_ready.wait()
print("Video feed is active. Starting the conveyor belt.")
while True:
device.conveyor_belt_distance(10, 15, 1, 0)
time.sleep(0.5)

# Function to show live video feed using OpenCV
def show_video_feed():
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Error: Cannot open webcam")
return

print("Starting video feed...")
while True:
ret, frame = cap.read()
if ret:
cv2.imshow("Live Video Feed", frame)
video_feed_ready.set() # Set the event when the first frame is successfully shown

if cv2.waitKey(1) & 0xFF == ord('q'):
break

cap.release()
cv2.destroyAllWindows()

# Thread to handle the Dobot movements
dobot_thread = threading.Thread(target=control_conveyor_belt, daemon=True)
dobot_thread.start()

# Thread to handle reading and updating serial data
serial_thread = threading.Thread(target=read_serial_data, daemon=True)
serial_thread.start()

# Thread to show live video feed using OpenCV
video_thread = threading.Thread(target=show_video_feed, daemon=True)
video_thread.start()

# Run the Tkinter main event loop
root.mainloop()

# Cleanup after exiting the GUI
device.close()
serial_port.close()
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# Edge Impulse - OpenMV FOMO Object Detection Example
#
# This work is licensed under the MIT license.
# Copyright (c) 2013-2024 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE

import sensor, image, time, os, ml, math, uos, gc
from ulab import numpy as np

sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240)) # Set 240x240 window.
sensor.skip_frames(time=2000) # Let the camera adjust.

net = None
labels = None
min_confidence = 0.5

try:
# load the model, alloc the model file on the heap if we have at least 64K free after loading
net = ml.Model("trained.tflite", load_to_fb=uos.stat('trained.tflite')[6] > (gc.mem_free() - (64*1024)))
except Exception as e:
raise Exception('Failed to load "trained.tflite", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')

try:
labels = [line.rstrip('\n') for line in open("labels.txt")]
except Exception as e:
raise Exception('Failed to load "labels.txt", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')

colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
(255, 0, 0),
( 0, 255, 0),
(255, 255, 0),
( 0, 0, 255),
(255, 0, 255),
( 0, 255, 255),
(255, 255, 255),
]

threshold_list = [(math.ceil(min_confidence * 255), 255)]

def fomo_post_process(model, inputs, outputs):
ob, oh, ow, oc = model.output_shape[0]

x_scale = inputs[0].roi[2] / ow
y_scale = inputs[0].roi[3] / oh

scale = min(x_scale, y_scale)

x_offset = ((inputs[0].roi[2] - (ow * scale)) / 2) + inputs[0].roi[0]
y_offset = ((inputs[0].roi[3] - (ow * scale)) / 2) + inputs[0].roi[1]

l = [[] for i in range(oc)]

for i in range(oc):
img = image.Image(outputs[0][0, :, :, i] * 255)
blobs = img.find_blobs(
threshold_list, x_stride=1, y_stride=1, area_threshold=1, pixels_threshold=1
)
for b in blobs:
rect = b.rect()
x, y, w, h = rect
score = (
img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0
)
x = int((x * scale) + x_offset)
y = int((y * scale) + y_offset)
w = int(w * scale)
h = int(h * scale)
l[i].append((x, y, w, h, score))
return l

clock = time.clock()
while(True):
clock.tick()

img = sensor.snapshot()

for i, detection_list in enumerate(net.predict([img], callback=fomo_post_process)):
if i == 0: continue # background class
if len(detection_list) == 0: continue # no detections for this class?

print("********** %s **********" % labels[i])
for x, y, w, h, score in detection_list:
center_x = math.floor(x + (w / 2))
center_y = math.floor(y + (h / 2))
print(f"x {center_x}\ty {center_y}\tscore {score}")
img.draw_circle((center_x, center_y, 12), color=colors[i])

print(clock.fps(), "fps", end="\n\n")
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
background
M10
M12
M6
M8
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
# Edge Impulse - OpenMV FOMO Object Detection Example
#
# This work is licensed under the MIT license.
# Copyright (c) 2013-2024 OpenMV LLC. All rights reserved.
# https://github.com/openmv/openmv/blob/master/LICENSE

import sensor, image, time, os, ml, math, uos, gc
from ulab import numpy as np

sensor.reset() # Reset and initialize the sensor.
sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE)
sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240)
sensor.set_windowing((240, 240)) # Set 240x240 window.
sensor.skip_frames(time=2000) # Let the camera adjust.

net = None
labels = None
min_confidence = 0.5

try:
# load the model, alloc the model file on the heap if we have at least 64K free after loading
net = ml.Model("trained.tflite", load_to_fb=uos.stat('trained.tflite')[6] > (gc.mem_free() - (64*1024)))
except Exception as e:
raise Exception('Failed to load "trained.tflite", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')

try:
labels = [line.rstrip('\n') for line in open("labels.txt")]
except Exception as e:
raise Exception('Failed to load "labels.txt", did you copy the .tflite and labels.txt file onto the mass-storage device? (' + str(e) + ')')

colors = [ # Add more colors if you are detecting more than 7 types of classes at once.
(255, 0, 0),
( 0, 255, 0),
(255, 255, 0),
( 0, 0, 255),
(255, 0, 255),
( 0, 255, 255),
(255, 255, 255),
]

threshold_list = [(math.ceil(min_confidence * 255), 255)]

def fomo_post_process(model, inputs, outputs):
ob, oh, ow, oc = model.output_shape[0]

x_scale = inputs[0].roi[2] / ow
y_scale = inputs[0].roi[3] / oh

scale = min(x_scale, y_scale)

x_offset = ((inputs[0].roi[2] - (ow * scale)) / 2) + inputs[0].roi[0]
y_offset = ((inputs[0].roi[3] - (ow * scale)) / 2) + inputs[0].roi[1]

l = [[] for i in range(oc)]

for i in range(oc):
img = image.Image(outputs[0][0, :, :, i] * 255)
blobs = img.find_blobs(
threshold_list, x_stride=1, y_stride=1, area_threshold=1, pixels_threshold=1
)
for b in blobs:
rect = b.rect()
x, y, w, h = rect
score = (
img.get_statistics(thresholds=threshold_list, roi=rect).l_mean() / 255.0
)
x = int((x * scale) + x_offset)
y = int((y * scale) + y_offset)
w = int(w * scale)
h = int(h * scale)
l[i].append((x, y, w, h, score))
return l

clock = time.clock()
while(True):
clock.tick()

img = sensor.snapshot()

for i, detection_list in enumerate(net.predict([img], callback=fomo_post_process)):
if i == 0: continue # background class
if len(detection_list) == 0: continue # no detections for this class?

print("********** %s **********" % labels[i])
for x, y, w, h, score in detection_list:
center_x = math.floor(x + (w / 2))
center_y = math.floor(y + (h / 2))
print(f"x {center_x}\ty {center_y}\tscore {score}")
img.draw_circle((center_x, center_y, 12), color=colors[i])

print(clock.fps(), "fps", end="\n\n")
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
background
M10
M12
M6
M8
Binary file not shown.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Diff not rendered.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Dataset Capture Script - By: thv - Sat Sep 28 2024

# Use this script to control how your OpenMV Cam captures images for your dataset.
# You should apply the same image pre-processing steps you expect to run on images
# that you will feed to your model during run-time.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # Modify as you like.
sensor.set_framesize(sensor.QVGA) # Modify as you like.
sensor.skip_frames(time = 2000)

clock = time.clock()

while(True):
clock.tick()
img = sensor.snapshot()
# Apply lens correction if you need it.
# img.lens_corr()
# Apply rotation correction if you need it.
# img.rotation_corr()
# Apply other filters...
# E.g. mean/median/mode/midpoint/etc.
print(clock.fps())
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Dataset Capture Script - By: thv - Sat Sep 28 2024

# Use this script to control how your OpenMV Cam captures images for your dataset.
# You should apply the same image pre-processing steps you expect to run on images
# that you will feed to your model during run-time.

import sensor, image, time

sensor.reset()
sensor.set_pixformat(sensor.RGB565) # Modify as you like.
sensor.set_framesize(sensor.QVGA) # Modify as you like.
sensor.skip_frames(time = 2000)

clock = time.clock()

while(True):
clock.tick()
img = sensor.snapshot()
img.scale(x_scale=1.2, roi=(50, 55, 540, 240))

# Apply lens correction if you need it.
img.lens_corr()
# Apply rotation correction if you need it.
# img.rotation_corr()
# Apply other filters...
# E.g. mean/median/mode/midpoint/etc.
# print(clock.fps())
Loading

0 comments on commit 7f11cc1

Please sign in to comment.