diff --git a/README.md b/README.md index 0f43061..cf4427b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,10 @@ # Lambda - Deco3801 +# Hi Rosti 🪩 Please read! +We really wanted to have our READMEs finished for the snapshot so we could get your feedback on them but unfortunately we're all a bit slammed until the weekend and probably won't make it to them in time :( +We'll just do our best to finish it up & maybe ping you a lot until next studio instead. Hope that's okay! And sorry for the lack of good docs in most places at the moment. +*Promise* they'll be shipshape for the real submission. + ## Getting started To make it easy for you to get started with GitLab, here's a list of recommended next steps. diff --git a/assets/image-test/test2.png b/assets/image-test/test2.png index 8ed0d6e..1839e21 100644 Binary files a/assets/image-test/test2.png and b/assets/image-test/test2.png differ diff --git a/build-documentation/Lamba_Circuit_Layout.pdf b/build-documentation/Lamba_Circuit_Layout.pdf new file mode 100644 index 0000000..b5a64ca Binary files /dev/null and b/build-documentation/Lamba_Circuit_Layout.pdf differ diff --git a/build/README.md b/build/README.md index e62dd82..2891c9e 100644 --- a/build/README.md +++ b/build/README.md @@ -2,7 +2,7 @@ Build code goes here when project underway ## Build Instructions in order of process 1. [Database](../build/db-handler/README.md) -2. [Camera](../build/README.md) +2. [Camera](../build/vision/README.md) 3. [Display](../build/ui-display/frontend-app/README.md) 4. [Microcontroller](../build/microcontroller/README.md) 5. [Microcontroller Communication](../build/figurines/README.md) diff --git a/build/camera/__init__.py b/build/camera/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/build/camera/app6.py b/build/camera/app6.py deleted file mode 100644 index d15be01..0000000 --- a/build/camera/app6.py +++ /dev/null @@ -1,30 +0,0 @@ -import time - -import cv2 as cv - -cam_port = 1 -cam = cv.VideoCapture(cam_port, cv.CAP_DSHOW) -cam.set(cv.CAP_PROP_FRAME_WIDTH, 352) -cam.set(cv.CAP_PROP_FRAME_HEIGHT, 288) - -result, image = cam.read() -result2 = cam.isOpened() -print(result, result2) -time.sleep(2.0) -if result: - print('cam read') - # showing result, it take frame name and image - # output - cv.imshow('test', image) - - # saving image in local storage - # cv.imwrite("test.png", image) - - # If keyboard interrupt occurs, destroy image - # window - cv.waitKey(0) - cv.destroyWindow('test') - -# If captured image is corrupted, moving to else part -else: - print('No image detected. Please! try again') diff --git a/build/camera/app7.py b/build/camera/app7.py deleted file mode 100644 index b361d5e..0000000 --- a/build/camera/app7.py +++ /dev/null @@ -1,82 +0,0 @@ -# import the necessary packages -import imutils -import datetime -import json -import time -import cv2 -import os - -# adapted from Adrian Rosebrock's tutorial on: -# https://pyimagesearch.com/2015/06/01/home-surveillance-and-motion-detection-with-the-raspberry-pi-python-and-opencv/ -# Following the license found on: -# https://pyimagesearch.com/faqs/single-faq/what-is-the-code-license-associated-with-your-examples/ -# A screenshot of the license is also found in /Lambda-Deco3081/assets/pyimagesearchlicense.png - -cv2.destroyAllWindows() -path = '../../assets/image-test/test2.png' -vs = cv2.VideoCapture(1, cv2.CAP_DSHOW) -vs.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) -vs.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) -time.sleep(1.0) -noMotionTime = 0 -noMotionTimeStamp = datetime.datetime.now() -avg = None -thereWasMotion = True -while True: - truth, frame = vs.read() - timestamp = datetime.datetime.now() - if frame is None: - break - - # frame = imutils.resize(frame, width=960, height=540) - grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - grey = cv2.GaussianBlur(grey, (21, 21), 0) - - if avg is None: - avg = grey.copy().astype('float') - continue - - cv2.accumulateWeighted(grey, avg, 0.5) - frameDelta = cv2.absdiff(grey, cv2.convertScaleAbs(avg)) - thresh = cv2.threshold(frameDelta, 15, 255, cv2.THRESH_BINARY)[1] - thresh = cv2.dilate(thresh, None, iterations=2) - - contours = cv2.findContours( - thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE - ) - contours = imutils.grab_contours(contours) - - for c in contours: - if cv2.contourArea(c) < 500: - continue - (x, y, w, h) = cv2.boundingRect(c) - cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) - noMotionTimeStamp = datetime.datetime.now() - thereWasMotion = True - - noMotionTime = timestamp - noMotionTimeStamp - if noMotionTime > datetime.timedelta(seconds=3) and thereWasMotion is True: - text = timestamp.strftime('%A %d %B %Y %I:%M:%S%p') - cv2.putText( - frame, - text, - (10, 20), - cv2.FONT_HERSHEY_SIMPLEX, - 0.5, - (0, 255, 0), - 2, - ) - cv2.imwrite(path, frame) - noMotionTimeStamp = datetime.datetime.now() - thereWasMotion = False - # show the frame and record if the user presses a key - cv2.imshow('Epic Motion Epicness Detection Camera', frame) - key = cv2.waitKey(1) & 0xFF - - # if the `q` key is pressed, break from the loop - if key == ord('q'): - break - -# cleanup the camera and close any open windows -vs.release() -cv2.destroyAllWindows() diff --git a/build/camera/environment.yaml b/build/camera/environment.yaml deleted file mode 100644 index 9899f30..0000000 --- a/build/camera/environment.yaml +++ /dev/null @@ -1,85 +0,0 @@ -name: camera-handler -channels: - - defaults -dependencies: - - blas=1.0=openblas - - bzip2=1.0.8=h620ffc9_4 - - c-ares=1.19.1=h80987f9_0 - - ca-certificates=2023.05.30=hca03da5_0 - - cairo=1.16.0=h302bd0f_5 - - cyrus-sasl=2.1.28=h9131b1a_1 - - eigen=3.3.7=h525c30c_1 - - expat=2.5.0=h313beb8_0 - - ffmpeg=4.2.2=h04105a8_0 - - fontconfig=2.14.1=hee714a5_2 - - freetype=2.12.1=h1192e45_0 - - gettext=0.21.0=h13f89a0_1 - - giflib=5.2.1=h80987f9_3 - - glib=2.69.1=h514c7bf_2 - - gmp=6.2.1=hc377ac9_3 - - gnutls=3.6.15=h887c41c_0 - - graphite2=1.3.14=hc377ac9_1 - - gst-plugins-base=1.14.1=h313beb8_1 - - gstreamer=1.14.1=h80987f9_1 - - harfbuzz=4.3.0=he9eebac_1 - - hdf5=1.12.1=h05c076b_3 - - icu=68.1=hc377ac9_0 - - jpeg=9e=h80987f9_1 - - krb5=1.20.1=hf3e1bf2_1 - - lame=3.100=h1a28f6b_0 - - lerc=3.0=hc377ac9_0 - - libclang=14.0.6=default_h1b80db6_1 - - libclang13=14.0.6=default_h24352ff_1 - - libcurl=8.2.1=h3e2b118_0 - - libcxx=14.0.6=h848a8c0_0 - - libdeflate=1.17=h80987f9_0 - - libedit=3.1.20221030=h80987f9_0 - - libev=4.33=h1a28f6b_1 - - libffi=3.4.4=hca03da5_0 - - libgfortran=5.0.0=11_3_0_hca03da5_28 - - libgfortran5=11.3.0=h009349e_28 - - libiconv=1.16=h1a28f6b_2 - - libidn2=2.3.4=h80987f9_0 - - libllvm14=14.0.6=h7ec7a93_3 - - libnghttp2=1.52.0=h62f6fdd_1 - - libopenblas=0.3.21=h269037a_0 - - libopus=1.3=h1a28f6b_1 - - libpng=1.6.39=h80987f9_0 - - libpq=12.15=h02f6b3c_1 - - libssh2=1.10.0=h02f6b3c_2 - - libtasn1=4.19.0=h80987f9_0 - - libtiff=4.5.1=h313beb8_0 - - libunistring=0.9.10=h1a28f6b_0 - - libvpx=1.10.0=hc377ac9_0 - - libwebp=1.2.4=ha3663a8_1 - - libwebp-base=1.2.4=h80987f9_1 - - libxml2=2.10.4=h372ba2a_0 - - libxslt=1.1.37=habca612_0 - - llvm-openmp=14.0.6=hc6e5704_0 - - lz4-c=1.9.4=h313beb8_0 - - mysql=5.7.24=ha71a6ea_2 - - ncurses=6.4=h313beb8_0 - - nettle=3.7.3=h84b5d62_1 - - numpy=1.25.2=py311he598dae_0 - - numpy-base=1.25.2=py311hfbfe69c_0 - - opencv=4.6.0=py311hbae66a1_5 - - openh264=1.8.0=h98b2900_0 - - openssl=3.0.10=h1a28f6b_2 - - pcre=8.45=hc377ac9_0 - - pip=23.2.1=py311hca03da5_0 - - pixman=0.40.0=h1a28f6b_0 - - python=3.11.4=hb885b13_0 - - qt-main=5.15.2=h9b4df51_9 - - qt-webengine=5.15.9=h2903aaf_7 - - qtwebkit=5.212=h19f419d_5 - - readline=8.2=h1a28f6b_0 - - setuptools=68.0.0=py311hca03da5_0 - - sqlite=3.41.2=h80987f9_0 - - tk=8.6.12=hb8d0fd4_0 - - tzdata=2023c=h04d1e81_0 - - wheel=0.38.4=py311hca03da5_0 - - x264=1!152.20180806=h1a28f6b_0 - - xz=5.4.2=h80987f9_0 - - zlib=1.2.13=h5a0b063_0 - - zstd=1.5.5=hd90d995_0 -prefix: camera-handler diff --git a/build/camera/motion.py b/build/camera/motion.py deleted file mode 100644 index 1c9d97f..0000000 --- a/build/camera/motion.py +++ /dev/null @@ -1,41 +0,0 @@ -import cv2 - -background = cv2.imread('background2.png') -background = cv2.cvtColor(background, cv2.COLOR_BGR2GRAY) -background = cv2.GaussianBlur(background, (21, 21), 0) - -video = cv2.VideoCapture('Test3.avi') - -while True: - status, frame = video.read() - gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) - gray = cv2.GaussianBlur(gray, (21, 21), 0) - - diff = cv2.absdiff(background, gray) - - thresh = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY)[1] - thresh = cv2.dilate(thresh, None, iterations=2) - - cnts, res = cv2.findContours( - thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE - ) - - for contour in cnts: - if cv2.contourArea(contour) < 10000: - continue - (x, y, w, h) = cv2.boundingRect(contour) - cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 3) - - cv2.imshow('All Contours', frame) - - # cv2.imshow("Threshold Video",thresh) - - # cv2.imshow("Diff Video",diff) - # cv2.imshow("Gray Video",gray) - - key = cv2.waitKey(1) - if key == ord('q'): - break - -video.release() -cv2.destroyWindows() diff --git a/build/db-handler/testing.py b/build/db-handler/testing.py new file mode 100644 index 0000000..a89bdd1 --- /dev/null +++ b/build/db-handler/testing.py @@ -0,0 +1,18 @@ +import requests + +headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json' +} + +r = requests.post('http://127.0.0.1:8000/whiteboard/user_4', headers = headers, json={ + "body": [ + { + "day": "tuesday", + "time_slot": 12, + "data": "minecraft gaming" + } + ] +}) + +print(r.json()) diff --git a/build/figurines/README.md b/build/figurines/README.md new file mode 100644 index 0000000..6c297b7 --- /dev/null +++ b/build/figurines/README.md @@ -0,0 +1,5 @@ +# Microcontroller Communications +1. Connect the Seeduino Xiao to a serial port using provided USB-C to USB-A cable +2. Double click the .bat file located in this directory +3. The file will download all required python libraries, so please wait for it to finish +4. The program will run on completion of installations \ No newline at end of file diff --git a/build/microcontroller/Circuit_Layout.jpg b/build/microcontroller/Circuit_Layout.jpg new file mode 100644 index 0000000..40efec1 Binary files /dev/null and b/build/microcontroller/Circuit_Layout.jpg differ diff --git a/build/microcontroller/Lamba_Circuit_Layout.pdf b/build/microcontroller/Lamba_Circuit_Layout.pdf new file mode 100644 index 0000000..b5a64ca Binary files /dev/null and b/build/microcontroller/Lamba_Circuit_Layout.pdf differ diff --git a/build/microcontroller/README.md b/build/microcontroller/README.md new file mode 100644 index 0000000..3838266 --- /dev/null +++ b/build/microcontroller/README.md @@ -0,0 +1,32 @@ +# Microcontroller + +## Microcontroller Circuitry +### Required Components +#### Physical Layout Components +- 1 x Atmega 328P Microcontroller flashed with Arduino Uno Bootloader +- 1 x 16MHz Crystal Oscillator +- 1 x Seeeduino Xiao +- 1 x Piezzo Buzzer +- 4 x 5010G 180 degree Servo motor +- 2 x 22pF ceramic capacitors, through hole +- 1 x 1000uF electrolytic capacitor +- 2 x Breadboards with dual power rails +- 1 x 4 AA Battery holder, plastic +- 1 x 2 Way PCB mount Screw terminals, 5mm Pitch + +#### Programming Components +- 1 x Duinotech ISP Programmer for Arduino and AVR +- 1 x Duinotech Arduino Compatible AVR ISP 10pin to 6pin Adaptor + +## Circuit Layout +![image](Circuit_Layout.jpg) + +## Programming the microcontroller +The Atmega328P microcontroller will already come flashed with code to run this project. However if you are interested in flashing the controller, follow the below steps. +1. If you do not have the AVRDude or Zandig configuration files downloaded, follow the documentation instructions [here](https://www.jaycar.com.au/medias/sys_master/images/images/9963160993822/XC4627-manualMain.pdf) with any mentions to the software zip file being found [here](https://www.jaycar.com.au/duinotech-isp-programmer-for-arduino-and-avr/p/XC4627) +2. Connect the ISP programmer to computer and board per the layout circuit above +3. Open the atmega328p_lambda_mc.ino file in Arduino IDE +4. Verify the code file +5. Under the Sketch tab, click export compiled binary +6. In the AVRDude GUI, select the atmega328p_lambda_mc folder as the directory and select the binary export file to upload +7. Click the upload button and wait for the upload sequence to finish \ No newline at end of file diff --git a/build/vision/README.md b/build/vision/README.md index e69de29..71f1dbf 100644 --- a/build/vision/README.md +++ b/build/vision/README.md @@ -0,0 +1,121 @@ +# Dependencies + +These programs rely on the following libraries + +``` +numpy +Pillow +Pytesseract +opencv-python-headless +``` + +The calibration script relies on the above, as well as + +``` +matplotlib +``` + +For your convenience, there is an `environment.yaml` for you to create a conda +virtual environment with our libraries installed already for this section. +You can create and activate this environment with the following. + +``` +conda env create -f environment.yaml +conda activate lambda-vision +``` + +Or you can create your own environment and install the packages with + +``` +conda create -n your_env_name +conda activate your_env_name +conda install numpy pillow pytesseract matplotlib +pip install opencv-python-headless +``` + +When installation of dependencies is complete, from the environment you are using +run + +``` +which python +``` + +and put the result of that into constants.py in place of python_path + +# Reader Script + +This script uses an image of the calendar and given some presets created by a +calibration script will go through the image and look at each time slot in the +calendar and determine if there is anything written in each cell. If something +is, then it will record what colour (red, blue, or black) and if applicable, will +use Pytesseract for OCR and save the text written in the cell. + +This information is then saved in json format to a file called `coloured_time_slots.json` +to be read by another program. + +## Usage + +### Setup and calibration + +Run the calibration script with + +``` +python calibrate.py +``` + +This will then open a open cv window containing a picture that you should +take manually with the camera set up in the position it will be in during program +run time. Using this you can decide on a rotation angle for the image (must be one +of 0, 90, 180, or 270) in degrees for a clockwise rotation. + +Then the program will open a matplotlib window with an image taken at this rotation +angle + +Using this matplotlib window and image, zoom in to find the top left corner of the +calendar (as in the top left corner of the top left most time slot, not in the column +or row names) and record the x, y coordinate of this. Then record the height and width of +each time slot cell. Then, when prompted, enter each into the terminal after closing +the matplotlib window. Now the we have the calibration for your set up. + +##### [constants.py](./constants.py) + +Your camera may get different colour levels to mine, so if you are running into issues, +look into the `constants.py` file and change around some of the thresholds to ensure you +are not losing pixels that should be certain colours. + +### Running + +This script is typically run by `capture.py` but if you want to manually check if the +program works in debugging, you can do this with: + +``` +python reader.py local/path/to/image.jpg +``` + +# Capture Script + +This is the main script that handles opening the webcam, detecting movement. Then +taking an image when there is no movement detected for 3 seconds, after movement. +This prevents having to process images when nothing has changed, and also ensures +we wait until after the user has finished with the whiteboard before processing +an image. + +## Usage + +As this script calls [reader.py](#reader-script), we need to ensure we have first done the usage +instruction in its [section](#setup-and-calibration) + +### Running + +We can then run the script with + +``` +python capture.py [-v] [-t] +``` + +The optional arguments give power over the following + +- \[-v\] - visualiser. This will open a cv2 window to see what the camera + is seeing as well as where the motion detected contours are. For use in trouble shooting. +- \[-t\] - timer. This will control printing to stdout how long it takes for the + reader script to process an image. For use in trouble shooting and optimisation tests. diff --git a/build/vision/calibrate.py b/build/vision/calibrate.py new file mode 100644 index 0000000..c094065 --- /dev/null +++ b/build/vision/calibrate.py @@ -0,0 +1,261 @@ +import sys +from collections import namedtuple + +import cv2 +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +from constants import num_days +from constants import num_time_slots +from PIL import Image + +matplotlib.use('qtagg') + +Point = namedtuple('Point', ['X', 'Y']) +TimeSlot = namedtuple('TimeSlot', ['top_left', 'bottom_right']) + +""" +NOTE: This file contains lots of ensuring all windows are closed and pre set +information for different libraries. This is due to an issue when importing +both open cv and matplotlib. These should fix all problems but may not work +on your machine. +""" + + +def get_rot_angle(path: str = 'cap.jpg') -> int: + """ + get_rot_angle() + --------------- + method for prompting user to tell the orientation of their camera + + :param path: path to the image we are checking. Defaults to cap.jpg + :return: the rotation angle + """ + # default to 90 as calendar is portrait so makes sense + angle = 90 + cap = cv2.VideoCapture(0) + if not cap.isOpened(): + print('Error: Could not open camera') + exit() + + cv2.namedWindow('Webcam', cv2.WINDOW_NORMAL) + exit_flag = False + + while not exit_flag: + ret, frame = cap.read() + if not ret: + print('Error: Could not read frame') + break + + rows, cols, _ = frame.shape + if angle == 0: + rotated_frame = frame + elif angle == 90: + rotated_frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE) + elif angle == 180: + rotated_frame = cv2.rotate(frame, cv2.ROTATE_180) + elif angle == 270: + rotated_frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE) + + cv2.imshow('Webcam', rotated_frame) + key = cv2.waitKey(1) + + if key == 27: # wait for `esc` key to be pressed + check = float( + input('Please enter rotation angle, or -1 to exit: ') + ) + exit_flag = check == -1 + if exit_flag: + cv2.imwrite(path, rotated_frame) + else: + angle = check + + cv2.destroyAllWindows() + cap.release() + plt.close('all') + return angle + + +def get_top_left(img: Image) -> (int, int): + """ + get_top_left() + -------------- + This method prompts user to find the top left corner of the information + space of the calendar. + + :param img: The image that we are considering, after rotation. + :return: a tuple of (left, top) which are the x, y coordinates of the + corner + """ + img_array = np.array(img) + plt.imshow(img_array) + plt.show(block=False) + left = int( + input( + 'Please enter the x value for the top left corner of the calendar: ' + ) + ) + top = int( + input( + 'Please enter the y value for the top left corner of the calendar: ' + ) + ) + plt.close('all') + return (left, top) + + +def show_crop(img: Image, top_left: (int, int)) -> bool: + """ + show_crop() + ----------- + This method shows the user a quick sanity check to ensure they are happy + with their choice. + + :param img: the image we are considering. + :param top_left: the coordinates they gave. + :return: boolean based on if they are happy with their choice. + """ + cropped = img.crop((top_left[0], top_left[1], img.size[0], img.size[1])) + plt.imshow(cropped) + plt.show(block=False) + check = input('Is this a good crop? (y, [n]): ') + + plt.close('all') + return check == 'y' + + +def get_cell_dims(img: Image, top_left: (int, int)) -> (float, float): + """ + get_cell_dims() + --------------- + Prompt user for height and width in pixels of a time cell on the calendar + based on the image provided + + :param img: the image we are considering. + :param top_left: the user provided top left corner in (x, y) form. + :return: a tuple of floats in the form (width, height) containing the + dimensions of a time cell from the calendar + """ + cropped = img.crop((top_left[0], top_left[1], img.size[0], img.size[1])) + cropped_array = np.array(cropped) + plt.imshow(cropped_array) + plt.show(block=False) + width = float(input('Please enter the width of a cell')) + height = float(input('Please enter the height of a cell')) + + plt.close('all') + return (width, height) + + +def show_cells( + img: Image, top_left: (int, int), cell_dims: (float, float) +) -> bool: + """ + show_cells() + ------------ + shows the image of the calendar seperated into each time cell based on + their provided top left crop and dimensions of a time cell. + + :param img: the image we are considering. + :param top_left: the user provided top left conrner in (x, y) form. + :param cell_dims: the user provided cell dimensions in (width, height) form. + :return: boolean of if the user is happy with their provided dimensions. + """ + img = img.crop((top_left[0], top_left[1], img.size[0], img.size[1])) + day_time_slots = [ # yay list comprehension :) + TimeSlot( + top_left=Point(cell_dims[0] * i, 0), + bottom_right=Point(cell_dims[0] * (i + 1), np.floor(cell_dims[1])), + ) + for i in range(num_days) + ] + day_time_crops = [] + + for day, (top, bot) in enumerate(day_time_slots): + time_crops = [] + for time_slot in range(0, num_time_slots): + time_crops.append( + img.crop( + ( + top.X, + np.floor(top.Y + time_slot * cell_dims[1]), + bot.X, + np.floor(bot.Y + time_slot * cell_dims[1]), + ) + ) + ) + day_time_crops.append(time_crops) + + fig, axes = plt.subplots(num_time_slots, num_days, figsize=(8, 12)) + axis_index = 0 + for i in range(num_time_slots): + for j in range(num_days): + axes.flat[axis_index].imshow(day_time_crops[j][i]) + axes.flat[axis_index].axis('off') + axis_index += 1 + plt.tight_layout() + plt.show(block=False) + + check = input( + 'Does this look like an accurate display of the cells? (y, [n]): ' + ) + + return check == 'y' + + +def save_settings(top_left: (int, int), cell_dims: (float, float), angle: int): + """ + save_settings() + --------------- + Writes the users input to the camera_constants file for use by other + scripts in order to ensure consistency. + + :param top_left: the user provided top left corner in (x, y) form + :param cell_dims: the user provided cell dimensions in (width, height) form + :param angle: the user provided rotation angle of the image + """ + with open('camera_constants.py', 'w') as file: + file.write(f'left = {top_left[0]}\n') + file.write(f'top = {top_left[1]}\n') + file.write(f'time_slot_width = {cell_dims[0]}\n') + file.write(f'time_slot_height = {cell_dims[1]}\n') + file.write(f'rotation_angle = {angle}\n') + print('done') + + +if __name__ == '__main__': + path = sys.path[0] + '/images/calibrate.jpg' + plt.close('all') + cv2.destroyAllWindows() + angle = get_rot_angle(path) + plt.close('all') + cv2.destroyAllWindows() + img = Image.open(path) + plt.close('all') + cv2.destroyAllWindows() + top_left = (0, 0) + good_bounds = False + while not good_bounds: + plt.close('all') + cv2.destroyAllWindows() + top_left = get_top_left(img) + plt.close('all') + cv2.destroyAllWindows() + good_bounds = show_crop(img, top_left) + plt.close('all') + cv2.destroyAllWindows() + cell_dims = ( + np.floor(img.size[0] / num_days) + top_left[0], + np.floor(img.size[1] / num_time_slots + top_left[1]), + ) + good_bounds = False + while not good_bounds: + plt.close('all') + cv2.destroyAllWindows() + cell_dims = get_cell_dims(img, top_left) + plt.close('all') + cv2.destroyAllWindows() + good_bounds = show_cells(img, top_left, cell_dims) + plt.close('all') + cv2.destroyAllWindows() + save_settings(top_left, cell_dims, angle) diff --git a/build/vision/camera_constants.py b/build/vision/camera_constants.py new file mode 100644 index 0000000..2a17bdd --- /dev/null +++ b/build/vision/camera_constants.py @@ -0,0 +1,5 @@ +left = 203 +top = 180 +time_slot_width = 115.0 +time_slot_height = 38.0 +rotation_angle = 90 diff --git a/build/vision/capture.py b/build/vision/capture.py new file mode 100644 index 0000000..b827e22 --- /dev/null +++ b/build/vision/capture.py @@ -0,0 +1,159 @@ +import datetime +import os +import subprocess +import time +from argparse import ArgumentParser +from dataclasses import dataclass +from typing import Union + +import cv2 +import numpy as np +from camera_constants import rotation_angle +from constants import python_path + +# adapted from Adrian Rosebrock's tutorial on: +# https://pyimagesearch.com/2015/06/01/home-surveillance-and-motion-detection-with-the-raspberry-pi-python-and-opencv/ +# Following the license found on: +# https://pyimagesearch.com/faqs/single-faq/what-is-the-code-license-associated-with-your-examples/ +# A screenshot of the license is also found in assets/pyimagesearchlicense.png + + +@dataclass +class MotionInfo: + """ + This class is used for containing the information about when motion has + happened etc. Mostly used as a concise way of passing information around + """ + noMotionTime: datetime.timedelta + noMotionTimeStamp: datetime.datetime + avg: np.ndarray + thereWasMotion: bool + v: bool + t: bool + + +parser = ArgumentParser(description='Capture data, with optional flags') +parser.add_argument( + '-v', '--visualise', action='store_true', help='Enable visualiser mode' +) +parser.add_argument( + '-t', '--time', action='store_true', help='Enable timer for reader.py' +) +args = parser.parse_args() + + +def do_cap(v: bool, t: bool): + """ + do_cap() + -------- + Performs the capture operations and contains the main loop + + :param v: the visualise flag + :param t: the time flag + """ + cap = cv2.VideoCapture(0) + time.sleep(1.0) # ensure webcam is opened and balanced first + motion = MotionInfo(0, datetime.datetime.now(), None, True, v, t) + while not isinstance(motion, bool): + motion = detect_motion(cap, motion) + + # cleanup the camera and close any open windows + cap.release() + cv2.destroyAllWindows() + + +def detect_motion( + cap: cv2.VideoCapture, motion: MotionInfo +) -> Union[bool, MotionInfo]: + """ + detect_motion() + --------------- + This method handles the logic for detecting motion and when to prompt to take + an image and process the information. + + :param cap: the cv2.VideoCapture object that is the cameras current output + :param motion: a MotionInfo object containing all important info about the current + running + :return: the MotionInfo object with updated values. + """ + _, frame = cap.read() + timestamp = datetime.datetime.now() + if frame is None: + # implies frame get error. Need to break out of program + return False + + grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + grey = cv2.GaussianBlur(grey, (21, 21), 0) + + if motion.avg is None: + motion.avg = grey.copy().astype('float') + return motion + + cv2.accumulateWeighted(grey, motion.avg, 0.5) + frameDelta = cv2.absdiff(grey, cv2.convertScaleAbs(motion.avg)) + thresh = cv2.threshold(frameDelta, 15, 255, cv2.THRESH_BINARY)[1] + thresh = cv2.dilate(thresh, None, iterations=2) + + (contours, *_) = cv2.findContours( + thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE + ) + + for c in contours: + if cv2.contourArea(c) < 500: + continue + (x, y, w, h) = cv2.boundingRect(c) + cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) + motion.noMotionTimeStamp = datetime.datetime.now() + motion.thereWasMotion = True + + motion.noMotionTime = timestamp - motion.noMotionTimeStamp + if ( + motion.noMotionTime > datetime.timedelta(seconds=3) + and motion.thereWasMotion is True + ): + take_image(frame, motion) + + # if visualise option: show the frame and record if the user presses a key + if motion.v: + cv2.imshow('Motion Detect', frame) + key = cv2.waitKey(1) & 0xFF + + # if the `q`, or `esc` key is pressed, break from the loop + if key == ord('q') or key == 27: + return False + + return motion + + +def take_image(frame: np.ndarray, motion: MotionInfo): + """ + take_image() + ------------ + Takes the image, rotates it appropriately, then saves it to the path directory + + :param frame: the ndarray created by reading the current frame of the camera + :param motion: the MotionInfo object with important information + """ + if rotation_angle == 90: + frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE) + elif rotation_angle == 180: + frame = cv2.rotate(frame, cv2.ROTATE_180) + elif rotation_angle == 270: + frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE) + cv2.imwrite(path, frame) + motion.thereWasMotion = False + motion.noMotionTimeStamp = datetime.datetime.now() + subprocess.run(f'{python_path} {vision_path} {path}', shell=True) + # if time option: print time it takes to run reader script + if motion.t: + print(datetime.datetime.now() - motion.noMotionTimeStamp) + + +if __name__ == '__main__': + cv2.destroyAllWindows() + path = './images/capture.jpg' + path = os.path.abspath(path) + vision_path = './reader.py' + vision_path = os.path.abspath(vision_path) + + do_cap(args.visualise, args.time) diff --git a/build/vision/coloured_time_slots.json b/build/vision/coloured_time_slots.json new file mode 100644 index 0000000..dd8d7c5 --- /dev/null +++ b/build/vision/coloured_time_slots.json @@ -0,0 +1,230 @@ +[ + { + "day": "monday", + "time_slot": 5, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 6, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 7, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 8, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 9, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 10, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 11, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 12, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 13, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 14, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 15, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 16, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 17, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 18, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 19, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 20, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 21, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 22, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 23, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 24, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 25, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 26, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 27, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 28, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 29, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 30, + "data": "", + "colour": "red" + }, + { + "day": "monday", + "time_slot": 31, + "data": "", + "colour": "red" + }, + { + "day": "tuesday", + "time_slot": 0, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 1, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 2, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 3, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 4, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 5, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 6, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 7, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 8, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 9, + "data": "", + "colour": "black" + }, + { + "day": "tuesday", + "time_slot": 13, + "data": "", + "colour": "red" + } +] \ No newline at end of file diff --git a/build/vision/constants.py b/build/vision/constants.py new file mode 100644 index 0000000..9794f61 --- /dev/null +++ b/build/vision/constants.py @@ -0,0 +1,26 @@ +import numpy as np + +python_path = '/Users/alexviller/micromamba/envs/tmp/bin/python' + + +colour_thresholds = { + 'red_min': np.array([100, 0, 0]), + 'red_max': np.array([255, 100, 100]), + 'blue_min': np.array([0, 0, 100]), + 'blue_max': np.array([100, 100, 255]), + 'black_min': np.array([0, 0, 0]), + 'black_max': np.array([75, 75, 75]), +} + +num_time_slots = 35 +num_days = 7 + +days_of_the_week = { + 0: 'monday', + 1: 'tuesday', + 2: 'wednesday', + 3: 'thursday', + 4: 'friday', + 5: 'saturday', + 6: 'sunday', +} diff --git a/build/vision/environment.yaml b/build/vision/environment.yaml index 16db501..abd3ea5 100644 --- a/build/vision/environment.yaml +++ b/build/vision/environment.yaml @@ -1,28 +1,68 @@ name: lambda-vision channels: -- conda-forge + - defaults dependencies: -- _libgcc_mutex=0.1=conda_forge -- _openmp_mutex=4.5=2_gnu -- bzip2=1.0.8=h7f98852_4 -- ca-certificates=2023.7.22=hbcca054_0 -- ld_impl_linux-64=2.40=h41732ed_0 -- libexpat=2.5.0=hcb278e6_1 -- libffi=3.4.2=h7f98852_5 -- libgcc-ng=13.2.0=h807b86a_1 -- libgomp=13.2.0=h807b86a_1 -- libnsl=2.0.0=h7f98852_0 -- libsqlite=3.43.0=h2797004_0 -- libuuid=2.38.1=h0b41bf4_0 -- libzlib=1.2.13=hd590300_5 -- ncurses=6.4=hcb278e6_0 -- openssl=3.1.2=hd590300_0 -- pip=23.2.1=pyhd8ed1ab_0 -- python=3.11.5=hab00c5b_0_cpython -- readline=8.2=h8228510_1 -- setuptools=68.2.2=pyhd8ed1ab_0 -- tk=8.6.12=h27826a3_0 -- tzdata=2023c=h71feb2d_0 -- wheel=0.41.2=pyhd8ed1ab_0 -- xz=5.2.6=h166bdaf_0 - + - blas=1.0=openblas + - brotli=1.0.9=h1a28f6b_7 + - brotli-bin=1.0.9=h1a28f6b_7 + - bzip2=1.0.8=h620ffc9_4 + - ca-certificates=2023.08.22=hca03da5_0 + - contourpy=1.0.5=py311h48ca7d4_0 + - cycler=0.11.0=pyhd3eb1b0_0 + - fonttools=4.25.0=pyhd3eb1b0_0 + - freetype=2.12.1=h1192e45_0 + - giflib=5.2.1=h80987f9_3 + - icu=73.1=h313beb8_0 + - jpeg=9e=h80987f9_1 + - kiwisolver=1.4.4=py311h313beb8_0 + - lcms2=2.12=hba8e193_0 + - leptonica=1.82.0=h37c441e_0 + - lerc=3.0=hc377ac9_0 + - libarchive=3.6.2=h62fee54_2 + - libbrotlicommon=1.0.9=h1a28f6b_7 + - libbrotlidec=1.0.9=h1a28f6b_7 + - libbrotlienc=1.0.9=h1a28f6b_7 + - libcxx=14.0.6=h848a8c0_0 + - libdeflate=1.17=h80987f9_1 + - libffi=3.4.4=hca03da5_0 + - libgfortran=5.0.0=11_3_0_hca03da5_28 + - libgfortran5=11.3.0=h009349e_28 + - libiconv=1.16=h1a28f6b_2 + - libopenblas=0.3.21=h269037a_0 + - libpng=1.6.39=h80987f9_0 + - libtiff=4.5.1=h313beb8_0 + - libwebp=1.3.2=ha3663a8_0 + - libwebp-base=1.3.2=h80987f9_0 + - libxml2=2.10.4=h0dcf63f_1 + - llvm-openmp=14.0.6=hc6e5704_0 + - lz4-c=1.9.4=h313beb8_0 + - matplotlib=3.7.2=py311hca03da5_0 + - matplotlib-base=3.7.2=py311h7aedaa7_0 + - munkres=1.1.4=py_0 + - ncurses=6.4=h313beb8_0 + - numpy=1.26.0=py311he598dae_0 + - numpy-base=1.26.0=py311hfbfe69c_0 + - openjpeg=2.3.0=h7a6adac_2 + - openssl=3.0.11=h1a28f6b_2 + - packaging=23.1=py311hca03da5_0 + - pillow=10.0.1=py311h3b245a6_0 + - pip=23.2.1=py311hca03da5_0 + - pyparsing=3.0.9=py311hca03da5_0 + - pytesseract=0.3.10=py311hca03da5_0 + - python=3.11.5=hb885b13_0 + - python-dateutil=2.8.2=pyhd3eb1b0_0 + - readline=8.2=h1a28f6b_0 + - setuptools=68.0.0=py311hca03da5_0 + - six=1.16.0=pyhd3eb1b0_1 + - sqlite=3.41.2=h80987f9_0 + - tesseract=5.2.0=hc377ac9_0 + - tk=8.6.12=hb8d0fd4_0 + - tornado=6.3.3=py311h80987f9_0 + - tzdata=2023c=h04d1e81_0 + - wheel=0.41.2=py311hca03da5_0 + - xz=5.4.2=h80987f9_0 + - zlib=1.2.13=h5a0b063_0 + - zstd=1.5.5=hd90d995_0 + - pip: + - opencv-python-headless==4.8.1.78 +prefix: /Users/alexviller/miniconda3/envs/lambda-vision diff --git a/build/vision/images/capture.jpg b/build/vision/images/capture.jpg new file mode 100644 index 0000000..de815f6 Binary files /dev/null and b/build/vision/images/capture.jpg differ diff --git a/build/vision/images/test1.jpg b/build/vision/images/test1.jpg deleted file mode 100644 index 12be05f..0000000 Binary files a/build/vision/images/test1.jpg and /dev/null differ diff --git a/build/vision/images/test2.jpg b/build/vision/images/test2.jpg deleted file mode 100644 index 4c1571c..0000000 Binary files a/build/vision/images/test2.jpg and /dev/null differ diff --git a/build/vision/images/test3.jpg b/build/vision/images/test3.jpg deleted file mode 100644 index 66afc91..0000000 Binary files a/build/vision/images/test3.jpg and /dev/null differ diff --git a/build/vision/reader.py b/build/vision/reader.py index ef95dcc..d3ecc2c 100644 --- a/build/vision/reader.py +++ b/build/vision/reader.py @@ -1,121 +1,161 @@ import json +import sys from collections import namedtuple -import matplotlib.pyplot as plt import numpy as np import pytesseract +from camera_constants import left +from camera_constants import time_slot_height +from camera_constants import time_slot_width +from camera_constants import top +from constants import colour_thresholds +from constants import days_of_the_week +from constants import num_days +from constants import num_time_slots from PIL import Image -from tqdm import tqdm - -days_of_the_week = { - 0: 'Monday', - 1: 'Tuesday', - 2: 'Wednesday', - 3: 'Thursday', - 4: 'Friday', - 5: 'Saturday', - 6: 'Sunday', -} Point = namedtuple('Point', ['X', 'Y']) TimeSlot = namedtuple('TimeSlot', ['top_left', 'bottom_right']) -time_slot_width = 141 -time_slot_height = 49 -time_slot_size = time_slot_width * time_slot_height -num_time_slots = 35 -num_days = 7 - def main(image: Image): - threshold = 100 + """ + main() + ------ + This is the main loop of the program. This handles almost all logic. - day_time_slots = [ # yay list comprehension :) + :param image: The image to be processed + """ + day_time_slots = [ TimeSlot( top_left=Point(time_slot_width * i, 0), - bottom_right=Point(time_slot_width * (i + 1), time_slot_height), + bottom_right=Point( + time_slot_width * (i + 1), np.floor(time_slot_height) + ), ) for i in range(num_days) ] - day_time_crops = [] coloured_time_slots = [] for day, (top_left, bottom_right) in enumerate(day_time_slots): - time_crops = [] - print(f'On day {day}') - for time_slot in tqdm(range(0, num_time_slots)): + for time_slot in range(0, num_time_slots): time_slot_crop = image.crop( ( top_left.X, - top_left.Y + time_slot * time_slot_height, + np.floor(top_left.Y + time_slot * time_slot_height), bottom_right.X, - bottom_right.Y + time_slot * time_slot_height, + np.floor(bottom_right.Y + time_slot * time_slot_height), ) ) time_slot_array = np.array(time_slot_crop) - threshold_mask = (time_slot_array <= threshold).all(axis=-1) - num_coloured = np.sum(threshold_mask) - is_coloured = (num_coloured / time_slot_size) * 100 >= 3 - time_crops.append((time_slot_crop, is_coloured)) + is_coloured, colour, ocr_result = get_info(time_slot_array) + if is_coloured: - ocr_result = pytesseract.image_to_string(time_slot_crop) coloured_time_slots.append( { 'day': days_of_the_week.get(day), 'time_slot': time_slot, 'data': ocr_result, + 'colour': ','.join(colour), # logic to get out of list } ) - day_time_crops.append(time_crops) - fig, axes = plt.subplots(num_time_slots, num_days, figsize=(8, 12)) - axis_index = 0 - print('Mon\tTue\tWed\tThu\tFri\tSat\tSun') - for time in range(5): - print( - f'{day_time_crops[0][time][1]}\t{day_time_crops[1][time][1]}\t' - + f'{day_time_crops[2][time][1]}\t{day_time_crops[3][time][1]}\t' - + f'{day_time_crops[4][time][1]}\t{day_time_crops[5][time][1]}\t' - + f'{day_time_crops[6][time][1]}' - ) - for i in range(num_time_slots): - for j in range(num_days): - axes.flat[axis_index].imshow(day_time_crops[j][i][0]) - axes.flat[axis_index].axis('off') - axis_index += 1 - plt.tight_layout() - plt.show() + with open('coloured_time_slots.json', 'w') as json_file: json.dump(coloured_time_slots, json_file, indent=4) -def normalise_image(img_path: str) -> Image: +def get_info(time_slot_array: np.ndarray) -> (bool, [str], str): + """ + get_info() + ---------- + Gets the information out of a time slot array. + + :param time_slot_array: the array created from converting an image to array. + :return: a tuple containing boolean value for if the time slot is coloured, + a list containing strings of the colours that it is coloured with, + the string containing ocr result if applicable + """ + time_slot_size = time_slot_array.shape[0] * time_slot_array.shape[1] + + red_mask = np.all( + (time_slot_array >= colour_thresholds.get('red_min')) + & (time_slot_array <= colour_thresholds.get('red_max')), + axis=-1, + ) + blue_mask = np.all( + (time_slot_array >= colour_thresholds.get('blue_min')) + & (time_slot_array <= colour_thresholds.get('blue_max')), + axis=-1, + ) + black_mask = np.all( + (time_slot_array >= colour_thresholds.get('black_min')) + & (time_slot_array <= colour_thresholds.get('black_max')), + axis=-1, + ) + + # setting non coloured pixels all to white + output_red = np.ones_like(time_slot_array) * 255 + output_blue = np.ones_like(time_slot_array) * 255 + output_black = np.ones_like(time_slot_array) * 255 + + # setting coloured pixels all to black + output_red[red_mask] = [0, 0, 0] + output_blue[blue_mask] = [0, 0, 0] + output_black[black_mask] = [0, 0, 0] + + # get the counts of the coloured pixels + num_coloured_red = np.count_nonzero(red_mask) + num_coloured_blue = np.count_nonzero(blue_mask) + num_coloured_black = np.count_nonzero(black_mask) + + # find what percent they are of the whole image + pcent_red = (num_coloured_red / time_slot_size) * 100 + pcent_blue = (num_coloured_blue / time_slot_size) * 100 + pcent_black = (num_coloured_black / time_slot_size) * 100 + + # threshold this for how many pixels need to be coloured to think important + is_blue = pcent_blue >= 2 + is_red = pcent_red >= 2 + is_black = pcent_black >= 18 + + is_coloured = is_blue or is_red or is_black + colour = [] + + # add to the colour list + if pcent_blue >= 2: + colour.append('blue') + if pcent_red >= 2: + colour.append('red') + if pcent_black >= 18: + colour.append('black') + + ocr_result = pytesseract.image_to_string(output_black) + + return (is_coloured, colour, ocr_result) + + +def crop_image(img_path: str) -> Image: """ normalise_image() ----------------- - method for normalising our input image. + method for cropping our input image to the top left corner of the calendar. """ - img = Image.open(img_path) - - left = 1013 - top = 219 - bottom = 1940 - right = 1990 - - img = img.crop((left, top, right, bottom)) - img_array = np.array(img) - threshold = 150 - - white_mask = (img_array >= threshold).all(axis=-1) - img_array[white_mask] = [255, 255, 255] - img_array[~white_mask] = [0, 0, 0] - new_img = Image.fromarray(img_array) - - return new_img + try: + img = Image.open(img_path) + img = img.crop((left, top, img.size[0], img.size[1])) + return img.convert('RGB') + except Exception as e: + # Error for bad path + print(f'Error: {e} has occurred.') + print(f'Check path: {img_path}') + exit() if __name__ == '__main__': - path = './images/test1.jpg' - image = normalise_image(path) - # plt.imshow(image) - # plt.show() + if len(sys.argv) > 1: + path = sys.argv[1] + else: + print('Please provide a path to an image') + exit() + image = crop_image(path) main(image) diff --git a/environment.yaml b/environment.yaml index 558ac68..bc191ba 100644 --- a/environment.yaml +++ b/environment.yaml @@ -1,98 +1,104 @@ +--- name: lambda-env -channels: -- anaconda -- conda-forge +channels: [conda-forge, defaults] dependencies: -- annotated-types=0.5.0=pyhd8ed1ab_0 -- anyio=3.7.1=pyhd8ed1ab_0 -- brotli=1.1.0=hb547adb_0 -- brotli-bin=1.1.0=hb547adb_0 -- bzip2=1.0.8=h3422bc3_4 -- ca-certificates=2023.7.22=hf0a4a13_0 -- certifi=2023.7.22=pyhd8ed1ab_0 -- cffi=1.15.1=py311hae827db_3 -- click=8.1.7=unix_pyh707e725_0 -- contourpy=1.1.1=py311he4fd1f5_0 -- cryptography=41.0.4=py311h5fb2c35_0 -- cycler=0.11.0=pyhd8ed1ab_0 -- dnspython=2.4.2=pyhd8ed1ab_0 -- exceptiongroup=1.1.3=pyhd8ed1ab_0 -- fastapi=0.103.1=pyhd8ed1ab_0 -- fonttools=4.42.1=py311heffc1b2_0 -- freetype=2.12.1=hadb7bae_2 -- h11=0.14.0=pyhd8ed1ab_0 -- h2=4.1.0=pyhd8ed1ab_0 -- hpack=4.0.0=py_0 -- httpcore=0.13.2=pyhd3eb1b0_0 -- httptools=0.6.0=py311heffc1b2_0 -- hyperframe=6.0.1=pyhd3eb1b0_0 -- idna=3.4=pyhd8ed1ab_0 -- kiwisolver=1.4.5=py311he4fd1f5_0 -- lcms2=2.15=h40e5a24_2 -- lerc=4.0.0=h9a09cb3_0 -- libblas=3.9.0=18_osxarm64_openblas -- libbrotlicommon=1.1.0=hb547adb_0 -- libbrotlidec=1.1.0=hb547adb_0 -- libbrotlienc=1.1.0=hb547adb_0 -- libcblas=3.9.0=18_osxarm64_openblas -- libcxx=16.0.6=h4653b0c_0 -- libdeflate=1.19=hb547adb_0 -- libexpat=2.5.0=hb7217d7_1 -- libffi=3.4.2=h3422bc3_5 -- libgfortran=5.0.0=13_2_0_hd922786_1 -- libgfortran5=13.2.0=hf226fd6_1 -- libjpeg-turbo=2.1.5.1=hb547adb_1 -- liblapack=3.9.0=18_osxarm64_openblas -- libopenblas=0.3.24=openmp_hd76b1f2_0 -- libpng=1.6.39=h76d750c_0 -- libsqlite=3.43.0=hb31c410_0 -- libtiff=4.6.0=h77c4dce_1 -- libuv=1.46.0=hb547adb_0 -- libwebp-base=1.3.2=hb547adb_0 -- libxcb=1.15=hf346824_0 -- libzlib=1.2.13=h53f4e23_5 -- llvm-openmp=16.0.6=h1c12783_0 -- matplotlib=3.8.0=py311ha1ab1f8_0 -- matplotlib-base=3.8.0=py311h3bc9839_0 -- munkres=1.1.4=pyh9f0ad1d_0 -- ncurses=6.4=h7ea286d_0 -- numpy=1.26.0=py311hb8f3215_0 -- openjpeg=2.5.0=h4c1507b_3 -- openssl=3.1.3=h53f4e23_0 -- packaging=23.1=pyhd8ed1ab_0 -- pillow=10.0.1=py311he9c13d2_0 -- pip=23.2.1=pyhd8ed1ab_0 -- pthread-stubs=0.4=h27ca646_1001 -- pycparser=2.21=pyhd3eb1b0_0 -- pydantic=2.3.0=pyhd8ed1ab_0 -- pydantic-core=2.6.3=py311h0563b04_0 -- pymongo=4.5.0=py311ha891d26_0 -- pyparsing=3.1.1=pyhd8ed1ab_0 -- pytesseract=0.3.12=pyhd8ed1ab_0 -- python=3.11.5=h47c9636_0_cpython -- python-dateutil=2.8.2=pyhd8ed1ab_0 -- python-dotenv=1.0.0=pyhd8ed1ab_1 -- python_abi=3.11=4_cp311 -- pyyaml=6.0.1=py311heffc1b2_0 -- readline=8.2=h92ec313_1 -- setuptools=68.2.2=pyhd8ed1ab_0 -- six=1.16.0=pyh6c4a22f_0 -- sniffio=1.3.0=pyhd8ed1ab_0 -- starlette=0.27.0=pyhd8ed1ab_0 -- tk=8.6.12=he1e0b03_0 -- tornado=6.3.3=py311heffc1b2_0 -- typing-extensions=4.8.0=hd8ed1ab_0 -- typing_extensions=4.8.0=pyha770c72_0 -- tzdata=2023c=h71feb2d_0 -- uvicorn=0.23.2=py311h267d04e_0 -- uvicorn-standard=0.23.2=ha1ab1f8_0 -- uvloop=0.17.0=py311he2be06e_1 -- watchfiles=0.20.0=py311h0563b04_0 -- websockets=11.0.3=py311heffc1b2_0 -- wheel=0.41.2=pyhd8ed1ab_0 -- xorg-libxau=1.0.11=hb547adb_0 -- xorg-libxdmcp=1.1.3=h27ca646_0 -- xz=5.2.6=h57fd34a_0 -- yaml=0.2.5=h3422bc3_2 -- zstd=1.5.5=h4f39d0f_0 - + - annotated-types=0.6.0=pyhd8ed1ab_0 + - anyio=3.7.1=pyhd8ed1ab_0 + - blas=1.0=openblas + - brotli=1.0.9=h1a28f6b_7 + - brotli-bin=1.0.9=h1a28f6b_7 + - bzip2=1.0.8=h620ffc9_4 + - ca-certificates=2023.7.22=hf0a4a13_0 + - certifi=2023.7.22=pyhd8ed1ab_0 + - cffi=1.16.0=py311h4a08483_0 + - click=8.1.7=unix_pyh707e725_0 + - contourpy=1.0.5=py311h48ca7d4_0 + - cryptography=41.0.4=py311h5fb2c35_0 + - cycler=0.11.0=pyhd3eb1b0_0 + - dnspython=2.4.2=pyhd8ed1ab_0 + - exceptiongroup=1.1.3=pyhd8ed1ab_0 + - fastapi=0.103.2=pyhd8ed1ab_0 + - fonttools=4.25.0=pyhd3eb1b0_0 + - freetype=2.12.1=h1192e45_0 + - giflib=5.2.1=h80987f9_3 + - h11=0.14.0=pyhd8ed1ab_0 + - h2=4.1.0=pyhd8ed1ab_0 + - hpack=4.0.0=pyh9f0ad1d_0 + - httpcore=1.0.0=pyhd8ed1ab_0 + - httptools=0.6.0=py311heffc1b2_1 + - hyperframe=6.0.1=pyhd8ed1ab_0 + - icu=73.1=h313beb8_0 + - idna=3.4=pyhd8ed1ab_0 + - jpeg=9e=h80987f9_1 + - kiwisolver=1.4.4=py311h313beb8_0 + - lcms2=2.12=hba8e193_0 + - leptonica=1.82.0=h37c441e_0 + - lerc=3.0=hc377ac9_0 + - libarchive=3.6.2=h62fee54_2 + - libbrotlicommon=1.0.9=h1a28f6b_7 + - libbrotlidec=1.0.9=h1a28f6b_7 + - libbrotlienc=1.0.9=h1a28f6b_7 + - libcxx=16.0.6=h4653b0c_0 + - libdeflate=1.17=h80987f9_1 + - libexpat=2.5.0=hb7217d7_1 + - libffi=3.4.4=hca03da5_0 + - libgfortran=5.0.0=11_3_0_hca03da5_28 + - libgfortran5=11.3.0=h009349e_28 + - libiconv=1.16=h1a28f6b_2 + - libopenblas=0.3.21=h269037a_0 + - libpng=1.6.39=h80987f9_0 + - libsqlite=3.43.2=h091b4b1_0 + - libtiff=4.5.1=h313beb8_0 + - libuv=1.46.0=hb547adb_0 + - libwebp=1.3.2=ha3663a8_0 + - libwebp-base=1.3.2=h80987f9_0 + - libxml2=2.10.4=h0dcf63f_1 + - libzlib=1.2.13=h53f4e23_5 + - llvm-openmp=14.0.6=hc6e5704_0 + - lz4-c=1.9.4=h313beb8_0 + - matplotlib=3.7.2=py311hca03da5_0 + - matplotlib-base=3.7.2=py311h7aedaa7_0 + - munkres=1.1.4=py_0 + - ncurses=6.4=h313beb8_0 + - numpy=1.26.0=py311he598dae_0 + - numpy-base=1.26.0=py311hfbfe69c_0 + - openjpeg=2.3.0=h7a6adac_2 + - openssl=3.1.3=h53f4e23_0 + - packaging=23.1=py311hca03da5_0 + - pillow=10.0.1=py311h3b245a6_0 + - pip=23.2.1=py311hca03da5_0 + - pycparser=2.21=pyhd8ed1ab_0 + - pydantic=2.4.2=pyhd8ed1ab_0 + - pydantic-core=2.10.1=py311h0563b04_0 + - pymongo=4.5.0=py311ha891d26_1 + - pyparsing=3.0.9=py311hca03da5_0 + - pytesseract=0.3.10=py311hca03da5_0 + - python=3.11.6=h47c9636_0_cpython + - python-dateutil=2.8.2=pyhd3eb1b0_0 + - python-dotenv=1.0.0=pyhd8ed1ab_1 + - python_abi=3.11=4_cp311 + - pyyaml=6.0.1=py311heffc1b2_1 + - readline=8.2=h1a28f6b_0 + - setuptools=68.0.0=py311hca03da5_0 + - six=1.16.0=pyhd3eb1b0_1 + - sniffio=1.3.0=pyhd8ed1ab_0 + - sqlite=3.41.2=h80987f9_0 + - starlette=0.27.0=pyhd8ed1ab_0 + - tesseract=5.2.0=hc377ac9_0 + - tk=8.6.13=hb31c410_0 + - tornado=6.3.3=py311h80987f9_0 + - typing-extensions=4.8.0=hd8ed1ab_0 + - typing_extensions=4.8.0=pyha770c72_0 + - tzdata=2023c=h04d1e81_0 + - uvicorn=0.23.2=py311h267d04e_1 + - uvicorn-standard=0.23.2=ha1ab1f8_1 + - uvloop=0.17.0=py311heffc1b2_2 + - watchfiles=0.20.0=py311h0563b04_2 + - websockets=11.0.3=py311heffc1b2_1 + - wheel=0.41.2=py311hca03da5_0 + - xz=5.4.2=h80987f9_0 + - yaml=0.2.5=h3422bc3_2 + - zlib=1.2.13=h53f4e23_5 + - zstd=1.5.5=hd90d995_0 + - pip: [opencv-python-headless==4.8.1.78] +prefix: /Users/alexviller/miniconda3/envs/lambda-vision diff --git a/kickstarter-video/decoclock.blend b/kickstarter-video/decoclock.blend new file mode 100644 index 0000000..fcfbee9 Binary files /dev/null and b/kickstarter-video/decoclock.blend differ diff --git a/kickstarter-video/intro-sequence-storyboard.mp4 b/kickstarter-video/intro-sequence-storyboard.mp4 new file mode 100644 index 0000000..abea846 Binary files /dev/null and b/kickstarter-video/intro-sequence-storyboard.mp4 differ