diff --git a/.env-example b/.env-example index dd5893a..5f0be1d 100644 --- a/.env-example +++ b/.env-example @@ -8,11 +8,20 @@ MONGODB_CONNECTION_URI="" # Hookdeck Project API Key # Hookdeck Dashboard -> Settings -> Secrets HOOKDECK_PROJECT_API_KEY="" +HOOKDECK_WEBHOOK_SECRET= # Replicate API Token REPLICATE_API_TOKEN="" +# Replicate Dashboard -> Account Settings -> Webhooks -> Show signing key +REPLICATE_WEBHOOKS_SECRET + +# Note: The following are auto-populated +# when you run create-hookdeck-connections.py + +# Hookdeck Source inbound queue +HOOKDECK_REPLICATE_API_QUEUE_API_KEY= # Hookdeck Source URLs -# These will be automatically populated for you in the next step +HOOKDECK_REPLICATE_API_QUEUE_URL= AUDIO_WEBHOOK_URL="" EMBEDDINGS_WEBHOOK_URL="" \ No newline at end of file diff --git a/README.md b/README.md index 300e866..4e69fd9 100644 --- a/README.md +++ b/README.md @@ -94,7 +94,7 @@ poetry run python -m flask --app app --debug run Create localtunnels to receive webhooks from the two Hookdeck Connections: ```sh -hookdeck listen '*' 5000 +hookdeck listen 5000 '*' ``` Navigate to `localhost:5000` within your web browser. diff --git a/allthethings/generators.py b/allthethings/generators.py index 685d93d..24b6041 100644 --- a/allthethings/generators.py +++ b/allthethings/generators.py @@ -1,37 +1,40 @@ -import replicate - +import httpx from config import Config class AsyncEmbeddingsGenerator: - def __init__(self): - self.WEBHOOK_URL = Config.EMBEDDINGS_WEBHOOK_URL - self.model = replicate.models.get("replicate/all-mpnet-base-v2") - self.version = self.model.versions.get( - "b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305" - ) - - def generate(self, text): - input = {"text": text} - prediction = replicate.predictions.create( - version=self.version, - input=input, - webhook=self.WEBHOOK_URL, - webhook_events_filter=["completed"], + def generate(self, id, text): + payload = { + "version": "b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305", + "input": {"text": text}, + "webhook": f"{Config.EMBEDDINGS_WEBHOOK_URL}/{id}", + "webhook_events_filter": ["completed"], + } + + response = httpx.request( + "POST", + f"{Config.HOOKDECK_REPLICATE_API_QUEUE_URL}/predictions", + headers=Config.HOOKDECK_QUEUE_AUTH_HEADERS, + json=payload, ) - return prediction + return response.json() class SyncEmbeddingsGenerator: - def generate(self, text): - - input = {"text": text} - output = replicate.run( - "replicate/all-mpnet-base-v2:b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305", - input=input, + payload = { + "version": "b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305", + "input": {"text": text}, + } + + response = httpx.request( + "POST", + "https://api.replicate.com/v1/predictions", + headers={**Config.REPLICATE_API_AUTH_HEADERS, "Prefer": "wait"}, + json=payload, + timeout=180, ) - return output + return response.json() diff --git a/allthethings/processors.py b/allthethings/processors.py index 69026e9..4a3b445 100644 --- a/allthethings/processors.py +++ b/allthethings/processors.py @@ -1,5 +1,4 @@ -import replicate - +import httpx from config import Config @@ -17,14 +16,7 @@ def get_asset_processor( class AudioProcessor: - def __init__(self): - self.WEBHOOK_URL = Config.AUDIO_WEBHOOK_URL - self.model = replicate.models.get("openai/whisper") - self.version = self.model.versions.get( - "cdd97b257f93cb89dede1c7584e3f3dfc969571b357dbcee08e793740bedd854" - ) - - def process(self, url): + def process(self, id, url): input = { "audio": url, "model": "large-v3", @@ -40,11 +32,18 @@ def process(self, url): "temperature_increment_on_fallback": 0.2, } - prediction = replicate.predictions.create( - version=self.version, - input=input, - webhook=self.WEBHOOK_URL, - webhook_events_filter=["completed"], + payload = { + "version": "cdd97b257f93cb89dede1c7584e3f3dfc969571b357dbcee08e793740bedd854", + "input": input, + "webhook": f"{Config.AUDIO_WEBHOOK_URL}/{id}", + "webhook_events_filter": ["completed"], + } + + response = httpx.request( + "POST", + f"{Config.HOOKDECK_REPLICATE_API_QUEUE_URL}/predictions", + headers=Config.HOOKDECK_QUEUE_AUTH_HEADERS, + json=payload, ) - return prediction + return response.json() diff --git a/app.py b/app.py index c6ffdac..6c52a37 100644 --- a/app.py +++ b/app.py @@ -1,11 +1,16 @@ -import urllib +import httpx from urllib.parse import urlparse +from bson import ObjectId +import hmac +import hashlib +import base64 from flask import Flask, jsonify, request, render_template, redirect, url_for, flash from config import Config from allthethings.mongo import Database from allthethings.processors import get_asset_processor + from allthethings.generators import ( AsyncEmbeddingsGenerator, SyncEmbeddingsGenerator, @@ -49,32 +54,15 @@ def index(): return render_template("home.html", indexes=results) -@app.route("/search", methods=["GET"]) -def search(): - return render_template("search.html", results=[]) - - -@app.route("/search", methods=["POST"]) -def search_post(): - query = request.form["query"] - - app.logger.info("Query submitted") - app.logger.debug(query) - - results = query_vector_search(query) - - results = format_results(results) - - # TODO: look into warning logged here - app.logger.debug("Formatted search results", results) - - return render_template("search.html", results=results, query=query) - - @app.route("/process", methods=["POST"]) def process(): url = request.form["url"] + parsed_url = urlparse(url) + if not all([parsed_url.scheme, parsed_url.netloc]): + flash("Invalid URL") + return redirect(url_for("index")) + database = Database() collection = database.get_collection() @@ -85,15 +73,21 @@ def process(): # Only do a HEAD request to avoid downloading the whole file # This offloads the file downloading Replicate - req = urllib.request.Request(url, method="HEAD") - fetch = urllib.request.urlopen(req) + response = httpx.request("HEAD", url) - if fetch.status != 200: + if response.status_code != 200: flash("URL is not reachable") return redirect(url_for("index")) - content_length = fetch.headers["Content-Length"] - content_type = fetch.headers["Content-Type"] + content_length = response.headers["Content-Length"] + content_type = response.headers["Content-Type"] + + app.logger.debug( + "Processing URL: %s, Content-Type: %s, Content-Length: %s", + url, + content_type, + content_length, + ) processor = get_asset_processor(content_type) @@ -101,7 +95,7 @@ def process(): flash('Unsupported content type "' + content_type + '"') return redirect(url_for("index")) - collection.insert_one( + asset = collection.insert_one( { "url": url, "content_type": content_type, @@ -110,23 +104,28 @@ def process(): } ) - prediction = processor.process(url) + try: + response = processor.process(asset.inserted_id, url) + except Exception as e: + app.logger.error("Error processing asset: %s", e) + collection.update_one( + filter={"url": url}, + update={ + "$set": { + "status": "PROCESSING_ERROR", + "error": str(e), + } + }, + ) + flash("Error processing asset") + return redirect(url_for("index")) collection.update_one( filter={"url": url}, update={ "$set": { "status": "PROCESSING", - "replicate_process_id": prediction.id, - "replicate_request": { - "model": prediction.model, - "version": prediction.version, - "status": prediction.status, - "input": prediction.input, - "logs": prediction.logs, - "created_at": prediction.created_at, - "urls": prediction.urls, - }, + "processor_response": response, } }, ) @@ -139,13 +138,36 @@ def process(): return redirect(url_for("index")) +@app.route("/search", methods=["GET"]) +def search(): + query = request.args.get("query") + if query is None: + return render_template("search.html", results=[]) + + app.logger.info("Query submitted") + app.logger.debug(query) + + results = query_vector_search(query) + + if results is None: + flash("Search embeddings generation failed") + return redirect(url_for("search")) + + results = format_results(results) + + # TODO: look into warning logged here + app.logger.debug("Formatted search results", results) + + return render_template("search.html", results=results, query=query) + + def request_embeddings(id): app.logger.info("Requesting embeddings for %s", id) database = Database() collection = database.get_collection() - asset = collection.find_one({"_id": id}) + asset = collection.find_one({"_id": ObjectId(id)}) if asset is None: raise RuntimeError("Asset not found") @@ -155,41 +177,51 @@ def request_embeddings(id): generator = AsyncEmbeddingsGenerator() - generate_request = generator.generate(asset["text"]) + try: + response = generator.generate(id, asset["text"]) + except Exception as e: + app.logger.error("Error generating embeddings for %s: %s", id, e) + raise collection.update_one( - filter={"_id": id}, + filter={"_id": ObjectId(id)}, update={ "$set": { "status": "GENERATING_EMBEDDINGS", - "replicate_embedding_id": generate_request.id, + "generator_response": response, } }, ) # Inspiration https://www.mongodb.com/developer/products/atlas/how-use-cohere-embeddings-rerank-modules-mongodb-atlas/#query-mongodb-vector-index-using--vectorsearch -def query_vector_search(q, prefilter={}, postfilter={}, path="embedding", topK=2): +def query_vector_search(q): # Because the search is user-driven, we use the synchronous generator generator = SyncEmbeddingsGenerator() - generate_response = generator.generate(q) + try: + generator_response = generator.generate(q) + app.logger.debug(generator_response) + except Exception as e: + app.logger.error("Error generating embeddings: %s", e) + return None + + if generator_response["output"] is None: + app.logger.debug("Embeddings generation timed out") + return None - query_embedding = generate_response[0]["embedding"] + query_embedding = generator_response["output"][0]["embedding"] app.logger.info("Query embedding generated") app.logger.debug(query_embedding) vs_query = { "index": "vector_index", - "path": path, + "path": "embedding", "queryVector": query_embedding, - "numCandidates": 10, - "limit": topK, + "numCandidates": 100, + "limit": 10, } - if len(prefilter) > 0: - app.logger.info("Creating vector search query with pre filter") - vs_query["filter"] = prefilter new_search_query = {"$vectorSearch": vs_query} @@ -210,23 +242,39 @@ def query_vector_search(q, prefilter={}, postfilter={}, path="embedding", topK=2 database = Database() collection = database.get_collection() - if len(postfilter.keys()) > 0: - app.logger.info("Vector search query with post filter") - postFilter = {"$match": postfilter} - res = list(collection.aggregate([new_search_query, project, postFilter])) - else: - app.logger.info("Vector search query without post filter") - res = list(collection.aggregate([new_search_query, project])) + res = list(collection.aggregate([new_search_query, project])) app.logger.info("Vector search query run") app.logger.debug(res) return res -@app.route("/webhooks/audio", methods=["POST"]) -def webhook_audio(): +def verify_webhook(request): + if Config.HOOKDECK_WEBHOOK_SECRET is None: + app.logger.error("No HOOKDECK_WEBHOOK_SECRET found.") + return False + + hmac_header = request.headers.get("x-hookdeck-signature") + + hash = base64.b64encode( + hmac.new( + Config.HOOKDECK_WEBHOOK_SECRET.encode(), request.data, hashlib.sha256 + ).digest() + ).decode() + + verified = hash == hmac_header + app.logger.debug("Webhook signature verification: %s", verified) + return verified + + +@app.route("/webhooks/audio/", methods=["POST"]) +def webhook_audio(id): + if not verify_webhook(request): + app.logger.error("Webhook signature verification failed") + return jsonify({"error": "Webhook signature verification failed"}), 401 + payload = request.json - app.logger.info("Audio payload recieved") + app.logger.info("Audio payload received for id %s", id) app.logger.debug(payload) database = Database() @@ -237,7 +285,7 @@ def webhook_audio(): ) result = collection.find_one_and_update( - filter={"replicate_process_id": payload["id"]}, + filter={"_id": ObjectId(id)}, update={ "$set": { "status": status, @@ -249,21 +297,23 @@ def webhook_audio(): ) if result is None: - app.logger.error( - "No document found for id %s to add audio transcript", payload["id"] - ) - return jsonify({"error": "No document found to add audio transcript"}), 500 + app.logger.error("No document found for id %s to add audio transcript", id) + return jsonify({"error": "No document found to add audio transcript"}), 404 app.logger.info("Transcription updated") app.logger.debug(result) - request_embeddings(result["_id"]) + request_embeddings(id) return "OK" -@app.route("/webhooks/embedding", methods=["POST"]) -def webhook_embeddings(): +@app.route("/webhooks/embedding/", methods=["POST"]) +def webhook_embeddings(id): + if not verify_webhook(request): + app.logger.error("Webhook signature verification failed") + return jsonify({"error": "Webhook signature verification failed"}), 401 + payload = request.json app.logger.info("Embeddings payload recieved") app.logger.debug(payload) @@ -278,7 +328,7 @@ def webhook_embeddings(): collection = database.get_collection() result = collection.update_one( - filter={"replicate_embedding_id": payload["id"]}, + filter={"_id": ObjectId(id)}, update={ "$set": { "status": status, @@ -292,6 +342,6 @@ def webhook_embeddings(): app.logger.error( "No document found for id %s to update embedding", payload["id"] ) - return jsonify({"error": "No document found to update embedding"}), 500 + return jsonify({"error": "No document found to update embedding"}), 404 return "OK" diff --git a/config.py b/config.py index ded2f8d..04617bd 100644 --- a/config.py +++ b/config.py @@ -5,10 +5,26 @@ class Config: + SECRET_KEY = os.getenv("SECRET_KEY") + MONGODB_CONNECTION_URI = os.getenv("MONGODB_CONNECTION_URI") DB_NAME = "iaat" COLLECTION_NAME = "assets" - SECRET_KEY = os.getenv("SECRET_KEY") + + HOOKDECK_WEBHOOK_SECRET = os.getenv("HOOKDECK_WEBHOOK_SECRET") + HOOKDECK_QUEUE_API_KEY_HEADER_NAME = "x-iaat-queue-api-key" + HOOKDECK_REPLICATE_API_QUEUE_API_KEY = os.getenv( + "HOOKDECK_REPLICATE_API_QUEUE_API_KEY" + ) + HOOKDECK_QUEUE_AUTH_HEADERS = {} + HOOKDECK_QUEUE_AUTH_HEADERS[HOOKDECK_QUEUE_API_KEY_HEADER_NAME] = ( + HOOKDECK_REPLICATE_API_QUEUE_API_KEY + ) + HOOKDECK_REPLICATE_API_QUEUE_URL = os.getenv("HOOKDECK_REPLICATE_API_QUEUE_URL") AUDIO_WEBHOOK_URL = os.getenv("AUDIO_WEBHOOK_URL") EMBEDDINGS_WEBHOOK_URL = os.getenv("EMBEDDINGS_WEBHOOK_URL") HOOKDECK_PROJECT_API_KEY = os.getenv("HOOKDECK_PROJECT_API_KEY") + + REPLICATE_API_TOKEN = os.getenv("REPLICATE_API_TOKEN") + REPLICATE_API_AUTH_HEADERS = {"Authorization": f"Bearer {REPLICATE_API_TOKEN}"} + REPLICATE_WEBHOOKS_SECRET = os.getenv("REPLICATE_WEBHOOKS_SECRET") diff --git a/create-hookdeck-connections.py b/create-hookdeck-connections.py index f0a18dd..237d99d 100644 --- a/create-hookdeck-connections.py +++ b/create-hookdeck-connections.py @@ -1,9 +1,9 @@ -import http.client -import json - -from config import Config +import httpx import re +import hashlib +import os +from config import Config # Define the headers for the Hookdeck API request headers = { @@ -13,26 +13,78 @@ def create_connection(payload): - conn = http.client.HTTPSConnection("api.hookdeck.com") - conn.request( - "PUT", "/latest/connections", body=json.dumps(payload), headers=headers + response = httpx.request( + "PUT", + "https://api.hookdeck.com/latest/connections", + headers=headers, + json=payload, ) - response = conn.getresponse() - data = response.read().decode() - conn.close() + data = response.json() - if response.status != 200: + if response.status_code != 200: raise Exception(f"Failed to create connection: {data}") - return json.loads(data) + return data +# Outbound Replicate API Queue +replicate_api_queue_api_key = hashlib.sha256(os.urandom(32)).hexdigest() +replicate_api_queue = { + "name": "replicate-api-queue", + "source": { + "name": "replicate-api-inbound", + "verification": { + "type": "API_KEY", + "configs": { + "header_key": Config.HOOKDECK_QUEUE_API_KEY_HEADER_NAME, + "api_key": replicate_api_queue_api_key, + }, + }, + }, + "rules": [ + { + "type": "retry", + "strategy": "exponential", + "count": 5, + "interval": 30000, + "response_status_codes": ["429", "500"], + } + ], + "destination": { + "name": "replicate-api", + "url": "https://api.replicate.com/v1/", + "auth_method": { + "type": "BEARER_TOKEN", + "config": { + "token": Config.REPLICATE_API_TOKEN, + }, + }, + }, +} + +replicate_api_connection = create_connection(replicate_api_queue) + # Create Replicate Audio Connection replicate_audio = { "name": "replicate-audio", "source": { "name": "replicate-audio", + # "verification": { + # "type": "REPLICATE", + # "configs": { + # "webhook_secret_key": Config.REPLICATE_WEBHOOKS_SECRET, + # }, + # }, }, + "rules": [ + { + "type": "retry", + "strategy": "exponential", + "count": 5, + "interval": 30000, + "response_status_codes": ["!200", "!404"], + } + ], "destination": { "name": "cli-replicate-audio", "cli_path": "/webhooks/audio", @@ -46,7 +98,22 @@ def create_connection(payload): "name": "replicate-embedding", "source": { "name": "replicate-embedding", + # "verification": { + # "type": "REPLICATE", + # "configs": { + # "webhook_secret_key": Config.REPLICATE_WEBHOOKS_SECRET, + # }, + # }, }, + "rules": [ + { + "type": "retry", + "strategy": "exponential", + "count": 5, + "interval": 30000, + "response_status_codes": ["!200", "!404"], + } + ], "destination": { "name": "cli-replicate-embedding", "cli_path": "/webhooks/embedding", @@ -59,10 +126,21 @@ def create_connection(payload): with open(".env", "r") as file: env_content = file.read() +replicate_api_connection_url = replicate_api_connection["source"]["url"] audio_webhook_url = replicate_audio_connection["source"]["url"] embedding_webhook_url = replicate_embedding_connection["source"]["url"] -# Replace the webhooks URLs in the .env content +# Replace the .env URLs in the .env content +env_content = re.sub( + r"HOOKDECK_REPLICATE_API_QUEUE_API_KEY=.*", + f"HOOKDECK_REPLICATE_API_QUEUE_API_KEY={replicate_api_queue_api_key}", + env_content, +) +env_content = re.sub( + r"HOOKDECK_REPLICATE_API_QUEUE_URL=.*", + f"HOOKDECK_REPLICATE_API_QUEUE_URL={replicate_api_connection_url}", + env_content, +) env_content = re.sub( r"AUDIO_WEBHOOK_URL=.*", f"AUDIO_WEBHOOK_URL={audio_webhook_url}", env_content ) @@ -74,3 +152,5 @@ def create_connection(payload): with open(".env", "w") as file: file.write(env_content) + +print("Connections created successfully!") diff --git a/create-indexes.py b/create-indexes.py index 7328af8..35771ad 100644 --- a/create-indexes.py +++ b/create-indexes.py @@ -6,9 +6,14 @@ database = Database() collection = database.get_collection() +if collection.name not in collection.database.list_collection_names(): + print("Creating empty collection so indexes can be created.") + collection.database.create_collection(collection.name) + def create_or_update_search_index(index_name, index_definition, index_type): indexes = list(collection.list_search_indexes(index_name)) + if len(indexes) == 0: print(f'Creating search index: "{index_name}"') index_model = SearchIndexModel( @@ -16,18 +21,14 @@ def create_or_update_search_index(index_name, index_definition, index_type): name=index_name, type=index_type, ) - result = collection.create_search_index(model=index_model) + collection.create_search_index(model=index_model) else: print(f'Search index "{index_name}" already exists. Updating.') - result = collection.update_search_index( - name=index_name, definition=index_definition - ) - - return result + collection.update_search_index(name=index_name, definition=index_definition) -vector_result = create_or_update_search_index( +create_or_update_search_index( "vector_index", { "fields": [ @@ -41,19 +42,19 @@ def create_or_update_search_index(index_name, index_definition, index_type): }, "vectorSearch", ) -print(vector_result) -index_result = create_or_update_search_index( - "replicate_by_embedding_id_index", +create_or_update_search_index( + "url_index", { - "mappings": {"dynamic": True}, - "fields": [ - { - "type": "string", - "path": "replicate_embedding_id", - } - ], + "mappings": { + "fields": { + "url": { + "type": "string", + }, + }, + } }, "search", ) -print(index_result) + +print("Indexes created successfully!") diff --git a/poetry.lock b/poetry.lock index 011987b..a0797be 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,16 +1,5 @@ # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. -[[package]] -name = "annotated-types" -version = "0.7.0" -description = "Reusable constraint types to use with typing.Annotated" -optional = false -python-versions = ">=3.8" -files = [ - {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, - {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, -] - [[package]] name = "anyio" version = "4.4.0" @@ -317,129 +306,6 @@ files = [ {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, ] -[[package]] -name = "pydantic" -version = "2.8.2" -description = "Data validation using Python type hints" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pydantic-2.8.2-py3-none-any.whl", hash = "sha256:73ee9fddd406dc318b885c7a2eab8a6472b68b8fb5ba8150949fc3db939f23c8"}, - {file = "pydantic-2.8.2.tar.gz", hash = "sha256:6f62c13d067b0755ad1c21a34bdd06c0c12625a22b0fc09c6b149816604f7c2a"}, -] - -[package.dependencies] -annotated-types = ">=0.4.0" -pydantic-core = "2.20.1" -typing-extensions = [ - {version = ">=4.12.2", markers = "python_version >= \"3.13\""}, - {version = ">=4.6.1", markers = "python_version < \"3.13\""}, -] - -[package.extras] -email = ["email-validator (>=2.0.0)"] - -[[package]] -name = "pydantic-core" -version = "2.20.1" -description = "Core functionality for Pydantic validation and serialization" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:3acae97ffd19bf091c72df4d726d552c473f3576409b2a7ca36b2f535ffff4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41f4c96227a67a013e7de5ff8f20fb496ce573893b7f4f2707d065907bffdbd6"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f239eb799a2081495ea659d8d4a43a8f42cd1fe9ff2e7e436295c38a10c286a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53e431da3fc53360db73eedf6f7124d1076e1b4ee4276b36fb25514544ceb4a3"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1f62b2413c3a0e846c3b838b2ecd6c7a19ec6793b2a522745b0869e37ab5bc1"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5d41e6daee2813ecceea8eda38062d69e280b39df793f5a942fa515b8ed67953"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d482efec8b7dc6bfaedc0f166b2ce349df0011f5d2f1f25537ced4cfc34fd98"}, - {file = "pydantic_core-2.20.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e93e1a4b4b33daed65d781a57a522ff153dcf748dee70b40c7258c5861e1768a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e7c4ea22b6739b162c9ecaaa41d718dfad48a244909fe7ef4b54c0b530effc5a"}, - {file = "pydantic_core-2.20.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4f2790949cf385d985a31984907fecb3896999329103df4e4983a4a41e13e840"}, - {file = "pydantic_core-2.20.1-cp310-none-win32.whl", hash = "sha256:5e999ba8dd90e93d57410c5e67ebb67ffcaadcea0ad973240fdfd3a135506250"}, - {file = "pydantic_core-2.20.1-cp310-none-win_amd64.whl", hash = "sha256:512ecfbefef6dac7bc5eaaf46177b2de58cdf7acac8793fe033b24ece0b9566c"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d2a8fa9d6d6f891f3deec72f5cc668e6f66b188ab14bb1ab52422fe8e644f312"}, - {file = "pydantic_core-2.20.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:175873691124f3d0da55aeea1d90660a6ea7a3cfea137c38afa0a5ffabe37b88"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37eee5b638f0e0dcd18d21f59b679686bbd18917b87db0193ae36f9c23c355fc"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:25e9185e2d06c16ee438ed39bf62935ec436474a6ac4f9358524220f1b236e43"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:150906b40ff188a3260cbee25380e7494ee85048584998c1e66df0c7a11c17a6"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ad4aeb3e9a97286573c03df758fc7627aecdd02f1da04516a86dc159bf70121"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3f3ed29cd9f978c604708511a1f9c2fdcb6c38b9aae36a51905b8811ee5cbf1"}, - {file = "pydantic_core-2.20.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b0dae11d8f5ded51699c74d9548dcc5938e0804cc8298ec0aa0da95c21fff57b"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:faa6b09ee09433b87992fb5a2859efd1c264ddc37280d2dd5db502126d0e7f27"}, - {file = "pydantic_core-2.20.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9dc1b507c12eb0481d071f3c1808f0529ad41dc415d0ca11f7ebfc666e66a18b"}, - {file = "pydantic_core-2.20.1-cp311-none-win32.whl", hash = "sha256:fa2fddcb7107e0d1808086ca306dcade7df60a13a6c347a7acf1ec139aa6789a"}, - {file = "pydantic_core-2.20.1-cp311-none-win_amd64.whl", hash = "sha256:40a783fb7ee353c50bd3853e626f15677ea527ae556429453685ae32280c19c2"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:595ba5be69b35777474fa07f80fc260ea71255656191adb22a8c53aba4479231"}, - {file = "pydantic_core-2.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4f55095ad087474999ee28d3398bae183a66be4823f753cd7d67dd0153427c9"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f9aa05d09ecf4c75157197f27cdc9cfaeb7c5f15021c6373932bf3e124af029f"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e97fdf088d4b31ff4ba35db26d9cc472ac7ef4a2ff2badeabf8d727b3377fc52"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc633a9fe1eb87e250b5c57d389cf28998e4292336926b0b6cdaee353f89a237"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d573faf8eb7e6b1cbbcb4f5b247c60ca8be39fe2c674495df0eb4318303137fe"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26dc97754b57d2fd00ac2b24dfa341abffc380b823211994c4efac7f13b9e90e"}, - {file = "pydantic_core-2.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:33499e85e739a4b60c9dac710c20a08dc73cb3240c9a0e22325e671b27b70d24"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:bebb4d6715c814597f85297c332297c6ce81e29436125ca59d1159b07f423eb1"}, - {file = "pydantic_core-2.20.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:516d9227919612425c8ef1c9b869bbbee249bc91912c8aaffb66116c0b447ebd"}, - {file = "pydantic_core-2.20.1-cp312-none-win32.whl", hash = "sha256:469f29f9093c9d834432034d33f5fe45699e664f12a13bf38c04967ce233d688"}, - {file = "pydantic_core-2.20.1-cp312-none-win_amd64.whl", hash = "sha256:035ede2e16da7281041f0e626459bcae33ed998cca6a0a007a5ebb73414ac72d"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:0827505a5c87e8aa285dc31e9ec7f4a17c81a813d45f70b1d9164e03a813a686"}, - {file = "pydantic_core-2.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:19c0fa39fa154e7e0b7f82f88ef85faa2a4c23cc65aae2f5aea625e3c13c735a"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa223cd1e36b642092c326d694d8bf59b71ddddc94cdb752bbbb1c5c91d833b"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c336a6d235522a62fef872c6295a42ecb0c4e1d0f1a3e500fe949415761b8a19"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7eb6a0587eded33aeefea9f916899d42b1799b7b14b8f8ff2753c0ac1741edac"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70c8daf4faca8da5a6d655f9af86faf6ec2e1768f4b8b9d0226c02f3d6209703"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9fa4c9bf273ca41f940bceb86922a7667cd5bf90e95dbb157cbb8441008482c"}, - {file = "pydantic_core-2.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:11b71d67b4725e7e2a9f6e9c0ac1239bbc0c48cce3dc59f98635efc57d6dac83"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:270755f15174fb983890c49881e93f8f1b80f0b5e3a3cc1394a255706cabd203"}, - {file = "pydantic_core-2.20.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c81131869240e3e568916ef4c307f8b99583efaa60a8112ef27a366eefba8ef0"}, - {file = "pydantic_core-2.20.1-cp313-none-win32.whl", hash = "sha256:b91ced227c41aa29c672814f50dbb05ec93536abf8f43cd14ec9521ea09afe4e"}, - {file = "pydantic_core-2.20.1-cp313-none-win_amd64.whl", hash = "sha256:65db0f2eefcaad1a3950f498aabb4875c8890438bc80b19362cf633b87a8ab20"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4745f4ac52cc6686390c40eaa01d48b18997cb130833154801a442323cc78f91"}, - {file = "pydantic_core-2.20.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a8ad4c766d3f33ba8fd692f9aa297c9058970530a32c728a2c4bfd2616d3358b"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41e81317dd6a0127cabce83c0c9c3fbecceae981c8391e6f1dec88a77c8a569a"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:04024d270cf63f586ad41fff13fde4311c4fc13ea74676962c876d9577bcc78f"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaad4ff2de1c3823fddf82f41121bdf453d922e9a238642b1dedb33c4e4f98ad"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:26ab812fa0c845df815e506be30337e2df27e88399b985d0bb4e3ecfe72df31c"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c5ebac750d9d5f2706654c638c041635c385596caf68f81342011ddfa1e5598"}, - {file = "pydantic_core-2.20.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2aafc5a503855ea5885559eae883978c9b6d8c8993d67766ee73d82e841300dd"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4868f6bd7c9d98904b748a2653031fc9c2f85b6237009d475b1008bfaeb0a5aa"}, - {file = "pydantic_core-2.20.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa2f457b4af386254372dfa78a2eda2563680d982422641a85f271c859df1987"}, - {file = "pydantic_core-2.20.1-cp38-none-win32.whl", hash = "sha256:225b67a1f6d602de0ce7f6c1c3ae89a4aa25d3de9be857999e9124f15dab486a"}, - {file = "pydantic_core-2.20.1-cp38-none-win_amd64.whl", hash = "sha256:6b507132dcfc0dea440cce23ee2182c0ce7aba7054576efc65634f080dbe9434"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:b03f7941783b4c4a26051846dea594628b38f6940a2fdc0df00b221aed39314c"}, - {file = "pydantic_core-2.20.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1eedfeb6089ed3fad42e81a67755846ad4dcc14d73698c120a82e4ccf0f1f9f6"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:635fee4e041ab9c479e31edda27fcf966ea9614fff1317e280d99eb3e5ab6fe2"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:77bf3ac639c1ff567ae3b47f8d4cc3dc20f9966a2a6dd2311dcc055d3d04fb8a"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ed1b0132f24beeec5a78b67d9388656d03e6a7c837394f99257e2d55b461611"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c6514f963b023aeee506678a1cf821fe31159b925c4b76fe2afa94cc70b3222b"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d4204d8ca33146e761c79f83cc861df20e7ae9f6487ca290a97702daf56006"}, - {file = "pydantic_core-2.20.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d036c7187b9422ae5b262badb87a20a49eb6c5238b2004e96d4da1231badef1"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9ebfef07dbe1d93efb94b4700f2d278494e9162565a54f124c404a5656d7ff09"}, - {file = "pydantic_core-2.20.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6b9d9bb600328a1ce523ab4f454859e9d439150abb0906c5a1983c146580ebab"}, - {file = "pydantic_core-2.20.1-cp39-none-win32.whl", hash = "sha256:784c1214cb6dd1e3b15dd8b91b9a53852aed16671cc3fbe4786f4f1db07089e2"}, - {file = "pydantic_core-2.20.1-cp39-none-win_amd64.whl", hash = "sha256:d2fe69c5434391727efa54b47a1e7986bb0186e72a41b203df8f5b0a19a4f669"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a45f84b09ac9c3d35dfcf6a27fd0634d30d183205230a0ebe8373a0e8cfa0906"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d02a72df14dfdbaf228424573a07af10637bd490f0901cee872c4f434a735b94"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2b27e6af28f07e2f195552b37d7d66b150adbaa39a6d327766ffd695799780f"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084659fac3c83fd674596612aeff6041a18402f1e1bc19ca39e417d554468482"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:242b8feb3c493ab78be289c034a1f659e8826e2233786e36f2893a950a719bb6"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:38cf1c40a921d05c5edc61a785c0ddb4bed67827069f535d794ce6bcded919fc"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:e0bbdd76ce9aa5d4209d65f2b27fc6e5ef1312ae6c5333c26db3f5ade53a1e99"}, - {file = "pydantic_core-2.20.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:254ec27fdb5b1ee60684f91683be95e5133c994cc54e86a0b0963afa25c8f8a6"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:407653af5617f0757261ae249d3fba09504d7a71ab36ac057c938572d1bc9331"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:c693e916709c2465b02ca0ad7b387c4f8423d1db7b4649c551f27a529181c5ad"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b5ff4911aea936a47d9376fd3ab17e970cc543d1b68921886e7f64bd28308d1"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:177f55a886d74f1808763976ac4efd29b7ed15c69f4d838bbd74d9d09cf6fa86"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:964faa8a861d2664f0c7ab0c181af0bea66098b1919439815ca8803ef136fc4e"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4dd484681c15e6b9a977c785a345d3e378d72678fd5f1f3c0509608da24f2ac0"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f6d6cff3538391e8486a431569b77921adfcdef14eb18fbf19b7c0a5294d4e6a"}, - {file = "pydantic_core-2.20.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a6d511cc297ff0883bc3708b465ff82d7560193169a8b93260f74ecb0a5e08a7"}, - {file = "pydantic_core-2.20.1.tar.gz", hash = "sha256:26ca695eeee5f9f1aeeb211ffc12f10bcb6f71e2989988fda61dabd65db878d4"}, -] - -[package.dependencies] -typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" - [[package]] name = "pymongo" version = "4.8.0" @@ -526,23 +392,6 @@ files = [ [package.extras] cli = ["click (>=5.0)"] -[[package]] -name = "replicate" -version = "0.32.1" -description = "Python client for Replicate" -optional = false -python-versions = ">=3.8" -files = [ - {file = "replicate-0.32.1-py3-none-any.whl", hash = "sha256:981d4f4a1065f6be7e2f3694648470c2f00412e223aa5ca0cb8cffc557fe40c8"}, - {file = "replicate-0.32.1.tar.gz", hash = "sha256:2c807c2d7d0598d5726e585028a7b29c72ffb61a4cbb5a12eaf841feeed8def0"}, -] - -[package.dependencies] -httpx = ">=0.21.0,<1" -packaging = "*" -pydantic = ">1.10.7" -typing-extensions = ">=4.5.0" - [[package]] name = "sniffio" version = "1.3.1" @@ -554,17 +403,6 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] -[[package]] -name = "typing-extensions" -version = "4.12.2" -description = "Backported and Experimental Type Hints for Python 3.8+" -optional = false -python-versions = ">=3.8" -files = [ - {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, - {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, -] - [[package]] name = "werkzeug" version = "3.0.4" @@ -585,4 +423,4 @@ watchdog = ["watchdog (>=2.3)"] [metadata] lock-version = "2.0" python-versions = "^3.11" -content-hash = "fc601fc3ff6a05a11052e75f1a1e85af406a9036d43bbb2a5b804f1ee189d6fe" +content-hash = "ce7734fe1d0d3dd4e019c17712ac2640661c01f86f98c9fcdfdae0fcf8ed502a" diff --git a/pyproject.toml b/pyproject.toml index f42efc4..b8526fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -11,8 +11,8 @@ python = "^3.11" flask = "^3.0.3" python-dotenv = "^1.0.1" pymongo = "^4.8.0" -replicate = "^0.32.1" gunicorn = "^23.0.0" +httpx = "^0.27.2" [build-system] diff --git a/templates/home.html b/templates/home.html index bf0a91b..8ea908f 100644 --- a/templates/home.html +++ b/templates/home.html @@ -54,6 +54,10 @@ "name": "Bruce (mp3)", "url": "https://replicate.delivery/pbxt/KAX5G2BccyZabQssOPmBa9YnK6wkgstfXz3idL8CLKDXSkeB/bruce.mp3", }, + { + "name": "Bria (mp3)", + "url": "https://replicate.delivery/pbxt/KMZ4G6zMeSQRlcVp6CGZpBMSTbUT9M7fsZzBxTM0aYloh0Se/bria.mp3" + }, { "name": "Other 1 (wav)", "url": "https://replicate.delivery/pbxt/KASkhrd696JJqYQcdHq8hSXV6deWYmfxa1yRQFH0iC3xIwVG/2086-149220-0033.wav", diff --git a/templates/search.html b/templates/search.html index 461edaa..7cdf0bf 100644 --- a/templates/search.html +++ b/templates/search.html @@ -24,7 +24,7 @@ {% block content %}
-
+
diff --git a/tutorial/README.md b/tutorial/README.md new file mode 100644 index 0000000..c5811b9 --- /dev/null +++ b/tutorial/README.md @@ -0,0 +1,1094 @@ +# H1. Index All the Things: Using Replicate, MongoDB, and Hookdeck to Build Scalable Content Type Agnostic Vector Search with Python and Flask + +## H2. Introduction + +In this tutorial, we'll build a Flask application that allows a user to index and then search anything on the Internet that has a publically accessible URL. That's right! Ask the app to index an MP3 or WAV file, an HTML or Text file, or a MOV or MP4 file, and it will use the power of Replicate AI to create textual representation of that file and the results will be stored in MongoDB Atlas. As long as there's an LLM that can analyze the resource and create a textual representation, it can be indexed. Then, all those indexed files, no matter the originating file type, can be searched using text using MongoDB Atlas. We'll use the Hookdeck event gateway as a serverless queue, managing API requests and asynchronous webhook callbacks between Replicate and our Flask app to ensure our architecture is structured to scale with demand. + +We'll begin by getting setting up the required services and getting the Flask application up and running. Then, we'll follow the journey of data through key components and code within the app, covering the indexing request is submitted, the content-type analyzed, a textual representation is generated, a vector embedding is generated and stored, and the content is ultimately made available for search within a vector search index. + +## H2. Architecture Overview + +Scalability is often overhyped, but it remains an important aspect of building robust applications. One of the benefits of using serverless and cloud-hosted providers is the ability to offload work to specialized services. Also important to any scalable architecture is a way of ensuring services aren't overloaded, and your application is fault-tolerant. In this tutorial, we leverage several such services to handle different aspects of our application. + +First let's take a look at the services: + +- **[Replicate](https://replicate.com)**: Provides open-source machine learning models, accessible via an API. +- **[MongoDB Atlas](https://www.mongodb.com/products/platform/atlas-database)**: An integrated suite of data services centered around a cloud database designed to accelerate and simplify how you build with data. +- **[Hookdeck](https://hookdeck.com?ref=mongodb-iatt**: An event gateway that provides engineering teams with infrastructure and tooling to build and manage event-driven applications. + +Next, let's see how they're used. + +TODO: image + +- **Replicate**: Replicate handles AI inference, producing text and embeddings and allowing us to offload the computationally intensive tasks of running machine learning models. We use different LLMs for analyzing different content types. +- **MongoDB Atlas**: MongoDB Atlas provides database storage and vector search capabilities, ensuring our data is stored efficiently and can be queried quickly. +- **Hookdeck**: Hookdeck acts as a serverless queue for a) ensuring Replicate API requests do not exceed rate limits and can be retried, and b) ingesting, delivery and retrying webhooks from Replicate to ensure reliable ingestion of events. Note: We'll also use the [Hookdeck CLI](https://hookdeck.com/docs/cli?ref=mongodb-iatt) to receive webhooks in our local development environment. + +By utilizing these cloud-based services, we can focus on building the core functionality of our application while ensuring it remains scalable and efficient. Webhooks, in particular, allow for scalability by enabling [asynchronous AI workflows](https://hookdeck.com/blog/asynchronous-ai?ref=mongodb-iatt), offloading those high compute usage scenarios to the third-party services, and just receiving callbacks via a webhook when work is completed. + +## H2. Prerequisites + +Before you begin, ensure you have the following: + +- A free [Hookdeck account](https://dashboard.hookdeck.com/signup?ref=mongodb-iatt) +- The [Hookdeck CLI installed](https://hookdeck.com/docs/cli?ref=mongodb-iatt) +- A free [MongoDB Atlas account](https://www.mongodb.com/cloud/atlas/register) +- A free [Replicate account](https://replicate.com/signin) +- [Python 3](https://www.python.org/downloads/) +- [Poetry](https://python-poetry.org/docs/#installation) for package management + +## H2: Get the App Up and Running + +Let's begin by getting the application up and running and seeing it in action. + +### H3: Get the Code + +Begin by getting the application codebase. + +```sh +git clone https://github.com/hookdeck/index-all-the-things.git +``` + +Active a virtual environment with Poetry: + +```sh +poetry shell +``` + +And install the app dependencies: + +```sh +poetry install +``` + +### H3: Configure the App + +The application needs credentials for the services it interacts with. + +Copy the example `.env-example` file to a new `.env` file: + +```sh +cp .env-example .env +``` + +Update the values within `.env` as follows: + +- `SECRET_KEY`: See the [`SECRET_KEY` Flask docs](https://flask.palletsprojects.com/en/stable/config/#SECRET_KEY). +- `MONGODB_CONNECTION_URI`: Populate with a MongoDB Atlas connection string with a **Read and write to any database** role. See the [Get Connection String docs](https://www.mongodb.com/docs/guides/atlas/connection-string/). +- `HOOKDECK_PROJECT_API_KEY`: Get an **API Key** from the **Project** -> **Settings** -> **Secrets** section of the [Hookdeck Dashboard](https://dashboard.hookdeck.com?ref=mongodb-iatt). +- `HOOKDECK_WEBHOOK_SECRET`: Get a **Signing Secret** from the **Project** -> **Settings** -> **Secrets** section of the [Hookdeck Dashboard](https://dashboard.hookdeck.com?ref=mongodb-iatt). +- `REPLICATE_API_TOKEN`: [Create an API token](https://replicate.com/account/api-tokens) in the Replicate dashboard. +- `REPLICATE_WEBHOOKS_SECRET`: Go to the [Webhooks section](https://replicate.com/account/webhook) of the Replicate dashboard and click the **Show signing key** button. +- `HOOKDECK_REPLICATE_API_QUEUE_API_KEY`, `HOOKDECK_REPLICATE_API_QUEUE_URL`, `AUDIO_WEBHOOK_URL` and `EMBEDDINGS_WEBHOOK_URL` will be automatically populated in the next step. + +### H3: Create Hookdeck Connections + +[Hookdeck Connections](https://hookdeck.com/docs/connections?ref=mongodb-iatt) are used to route inbound HTTP request received by a [Hookdeck Source](https://hookdeck.com/docs/sources?ref=mongodb-iatt) to a [Hookdeck Destination](https://hookdeck.com/docs/destinations?ref=mongodb-iatt). + +The `create-hookdeck-connections.py` script automatically creates the following Hookdeck Connections that: + +1. Route requests made to Hookdeck URLs through to the locally running application via the Hookdeck CLI. Here, Hookdeck is used as an inbound queue. +2. Route request made to a Hookdeck URL through to the Replicate API. Hookdeck is used as an outbound queue in this situation. + +The script also updates the `.env` file with the Source URLs that handle the webhooks. Let's go through the details of the script. + +First, ensure you have the necessary imports and define the authentication and content-type headers for the Hookdeck API request: + +```py +import httpx +import re +import hashlib +import os + +from config import Config + +headers = { + "Authorization": f"Bearer {Config.HOOKDECK_PROJECT_API_KEY}", + "Content-Type": "application/json", +} +``` + +Next, define a function to create a Connection to the Hookdeck API: + +```py +def create_connection(payload): + response = httpx.request( + "PUT", + "https://api.hookdeck.com/latest/connections", + headers=headers, + json=payload, + ) + data = response.json() + + if response.status_code != 200: + raise Exception(f"Failed to create connection: {data}") + + return data +``` + +This function makes `PUT` request to the Hookdeck API with the [upsert Connection payload](https://hookdeck.com/docs/api#createupdate-a-connection?ref=mongodb-iatt), and handles the response. If the response status is not `200` (OK), an exception is raised. The function returns the parsed JSON response. + +The first connection to be created is one for the Replicate API outbound queue: + +```py +replicate_api_queue_api_key = hashlib.sha256(os.urandom(32)).hexdigest() +replicate_api_queue = { + "name": "replicate-api-queue", + "source": { + "name": "replicate-api-inbound", + "verification": { + "type": "API_KEY", + "configs": { + "header_key": Config.HOOKDECK_QUEUE_API_KEY_HEADER_NAME, + "api_key": replicate_api_queue_api_key, + }, + }, + }, + "rules": [ + { + "type": "retry", + "strategy": "exponential", + "count": 5, + "interval": 30000, + "response_status_codes": ["429", "500"], + } + ], + "destination": { + "name": "replicate-api", + "url": "https://api.replicate.com/v1/", + "auth_method": { + "type": "BEARER_TOKEN", + "config": { + "token": Config.REPLICATE_API_TOKEN, + }, + }, + }, +} + +replicate_api_connection = create_connection(replicate_api_queue) +``` + +The Connection has a `name`, a `source`, and a `destination`. The `source` also has a `name` and a `verification`. The `verification` instructs Hookdeck how to authenticate requests. Since the connection is acting as an API queue, we're using the `API_KEY` type with the `header_key` set to the value defined in `Config.HOOKDECK_QUEUE_API_KEY_HEADER_NAME` and the `api_key` value set to the generated hash stored in `replicate_api_queue_api_key`. + +The `rules` define a request retry strategy to use when interacting with the Replicate API. In this case, we're stating to retry up to 5 time, using an interval of `30000` milliseconds, but apply an `exponential` back off retry strategy. Also, we're using the `response_status_codes` option to inform Hookdeck to only retry on `429` and `500` HTTP responses. See the [Hookdeck Retry docs](https://hookdeck.com/docs/retries?ref=mongodb-iatt) for more information on retries and the [Hookdeck Rules](https://hookdeck.com/docs/connections?ref=mongodb-iatt#connection-rules) docs for information on other types of rules that are available. + +The `url` on the Destination is the base URL for the Replicate API. Hookdeck uses path forwarding by default so any path appended to the Hookdeck Source URL will also be appending to the destination URL. For example, a request to a Hookdeck Source with URL `https://hkdk.events/{id}/predictions` will result in a request to a connected Destination of `https://api.replicate.com/v1/predictions` where the Destination has a base URL of `https://api.replicate.com/v1/`. Hookdeck acts very much like a proxy in this scenario. + +The `auth_method` on the Destination is of type `BEARER_TOKEN` with a `config.token` set to the value of the `REPLICATE_API_TOKEN` environment variable. This allows Hookdeck to make authenticated API calls to Replicate. + +Now, create a Connection for the Replicate Audio webhooks to handle audio analysis callbacks: + +```py +replicate_audio = { + "name": "replicate-audio", + "source": { + "name": "replicate-audio", + "verification": { + "type": "REPLICATE", + "configs": { + "webhook_secret_key": Config.REPLICATE_WEBHOOKS_SECRET, + }, + }, + }, + "rules": [ + { + "type": "retry", + "count": 5, + "interval": 30000, + "strategy": "exponential", + "response_status_codes": ["!404"], + } + ], + "destination": { + "name": "cli-replicate-audio", + "cli_path": "/webhooks/audio", + }, +} + +replicate_audio_connection = create_connection(replicate_audio) +``` + +The Replicate Audio webhook callback connection uses a `verification` of type `REPLICATE` with a `configs.webhook_secret_key` value set from the `REPLICATE_WEBHOOKS_SECRET` value we stored in the `.env` file. This enables and instructs Hookdeck to verify that the webhook has come from Replicate. + +The `rules` for this inbound Connection are similar to the outbound connection and define a delivery retry strategy to follow if any requests to our application's webhook endpoint fail. The only difference is the `response_status_codes` informs Hookdeck not retry if it receives a `200` or `404` response. + +The `destination` has a `name` and a `cli_path` that informs Hookdeck that the Destination is the Hookdeck CLI and the path that the request should be forwarded to is `/webhooks/audio`. + +Next, create a connection for Replicate Embeddings webhook callbacks: + +```py +replicate_embedding = { + "name": "replicate-embedding", + "source": { + "name": "replicate-embedding", + "verification": { + "type": "REPLICATE", + "configs": { + "webhook_secret_key": Config.REPLICATE_WEBHOOKS_SECRET, + }, + }, + }, + "rules": [ + { + "type": "retry", + "count": 5, + "interval": 30000, + "strategy": "exponential", + "response_status_codes": ["!200", "!404"], + } + ], + "destination": { + "name": "cli-replicate-embedding", + "cli_path": "/webhooks/embedding", + }, +} + +replicate_embedding_connection = create_connection(replicate_embedding) +``` + +Finally, update the `.env` file with some of the generated values: + +```py +# Update .env +with open(".env", "r") as file: + env_content = file.read() + +replicate_api_connection_url = replicate_api_connection["source"]["url"] +audio_webhook_url = replicate_audio_connection["source"]["url"] +embedding_webhook_url = replicate_embedding_connection["source"]["url"] + +# Replace the .env URLs in the .env content +env_content = re.sub( + r"HOOKDECK_REPLICATE_API_QUEUE_API_KEY=.*", + f"HOOKDECK_REPLICATE_API_QUEUE_API_KEY={replicate_api_queue_api_key}", + env_content, +) +env_content = re.sub( + r"HOOKDECK_REPLICATE_API_QUEUE_URL=.*", + f"HOOKDECK_REPLICATE_API_QUEUE_URL={replicate_api_connection_url}", + env_content, +) +env_content = re.sub( + r"AUDIO_WEBHOOK_URL=.*", f"AUDIO_WEBHOOK_URL={audio_webhook_url}", env_content +) +env_content = re.sub( + r"EMBEDDINGS_WEBHOOK_URL=.*", + f"EMBEDDINGS_WEBHOOK_URL={embedding_webhook_url}", + env_content, +) + +with open(".env", "w") as file: + file.write(env_content) + +print("Connections created successfully!") +``` + +This code reads the current `.env` content, replaces the lines with existing environmental variable placeholders using regular expressions, and writes the updated content back to the `.env` file. This ensures that the environment variables such as the webhook URLs are up-to-date. + +Run the script: + +```sh +poetry run python create-hookdeck-connections.py +``` + +Check your `.env` file to ensure all values are populated. + +Also, navigate to the **Connections** section of the Hookdeck dashboard and check the visual representation of your connection. + +![Hookdeck Connection in the Hookdeck Dashboard]() + +### H3: Create MongoDB Atlas Indexes + +In order to efficiently search a MongoDB database you need indexes. For MongoDB vector search you must create an [Atlas Vector Search Index](https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-overview/#atlas-vector-search-indexes). The `create-indexes.py` script automates the creation and updating of the search indexes in MongoDB using the `pymongo` library. + +First, ensure you have the necessary imports and initialize the database connection: + +```py +from allthethings.mongo import Database +from pymongo.operations import SearchIndexModel + +database = Database() +collection = database.get_collection() +``` + +`Database` is defined in `allthethings/mongo.py` and provides utility access to the `assets` collection in the `iaat` database, with these string values defined in `config.py`. + +Next, ensure that the required collection exists within the database so that the indexes can be created: + +```py +if collection.name not in collection.database.list_collection_names(): + print("Creating empty collection so indexes can be created.") + collection.database.create_collection(collection.name) +``` + +With the collection created, define a function to create or update search indexes: + +```py +def create_or_update_search_index(index_name, index_definition, index_type): + indexes = list(collection.list_search_indexes(index_name)) + + if len(indexes) == 0: + print(f'Creating search index: "{index_name}"') + index_model = SearchIndexModel( + definition=index_definition, + name=index_name, + type=index_type, + ) + collection.create_search_index(model=index_model) + + else: + print(f'Search index "{index_name}" already exists. Updating.') + collection.update_search_index(name=index_name, definition=index_definition) +``` + +This function checks if an index with the given `index_name` already exists. If it does not exist, it creates a new search index using the provided definition and type. If it exists, it updates the existing index with the new definition. + +Now, create a vector search index for embeddings: + +```py +vector_result = create_or_update_search_index( + "vector_index", + { + "fields": [ + { + "type": "vector", + "path": "embedding", + "numDimensions": 768, + "similarity": "euclidean", + } + ] + }, + "vectorSearch", +) +``` + +This creates or updates a vector search index named "vector_index" for the `embedding` field. + +Finally, create a search index for the `url` field as this is used to determine if a URL has already been indexed: + +```py +create_or_update_search_index( + "url_index", + { + "mappings": { + "fields": { + "url": { + "type": "string", + }, + }, + } + }, + "search", +) + +print("Indexes created successfully!") +``` + +Run the script: + +```sh +poetry run python create-indexes.py +``` + +Go to the **Atlas Search** section within the MongoDB Atlas dashboard and check the search indexes have been created. + +![MongoDB Atlas Dashboard Atlas Search indexes]() + +### H3: Check the App is Working + +In one terminal window, run the Flask application: + +```sh +poetry run python -m flask --app app --debug run +``` + +In a second terminal window, create a localtunnel using the Hookdeck CLI: + +```sh +hookdeck listen 5000 '*' +``` + +This command listens to all Hookdeck Sources connected to a CLI Destination, routing webhooks to the application running locally on port 5000. + +When you run the command you will see output similar to the following: + +```sh +Listening for events on Sources that have Connections with CLI Destinations + +Dashboard +👉 Inspect and replay events: https://dashboard.hookdeck.com?team_id=tm_{id} + +Sources +🔌 replicate-embedding URL: https://hkdk.events/{id} +🔌 replicate-audio URL: https://hkdk.events/{id} + +Connections +replicate-embedding -> replicate-embedding forwarding to /webhooks/embedding +replicate-audio -> replicate-audio forwarding to /webhooks/audio + +> Ready! (^C to quit) +``` + +Open `localhost:5000` in your web browser to ensure the Flask app is running. + +![Index All the The Things App]() + +## H2: Submit Content for Analysis and Indexing + +With the app running, it's time to submit an asset for indexing. + +Click **Bruce (mp3)** under the **Examples** header to populate the in-app search bar with a URL and click **Submit**. + +![Index All the The Things App]() + +Submitting the form, sends the URL to a `/process` endpoint as a `POST` request. Let's walk through what that code does. + +First, define the `/process` route in `app.py`: + +```py +@app.route("/process", methods=["POST"]) +def process(): + url = request.form["url"] + + parsed_url = urlparse(url) + if not all([parsed_url.scheme, parsed_url.netloc]): + flash("Invalid URL") + return redirect(url_for("index")) +``` + +This route handles the `POST` request to the `/process` endpoint and retrieves the URL from the form data submitted by the user. It validates the URL and redirects to the index page with an error message if it's not. + +Next, check if the URL already exists in the database: + +```py + database = Database() + collection = database.get_collection() + + exists = collection.find_one({"url": url}) + + if exists is not None: + flash("URL has already been indexed") + return redirect(url_for("index")) +``` + +If the URL is already indexed, flash a message to the user and redirect them to the index page. + +Perform a `HEAD` request to validate the URL and retrieve its headers: + +```py + req = urllib.request.Request(url, method="HEAD") + fetch = urllib.request.urlopen(req) + + if fetch.status != 200: + flash("URL is not reachable") + return redirect(url_for("index")) +``` + +This code sends a `HEAD` request to the URL to avoid downloading the entire file. If the URL is not reachable (status code is not 200), flash a message to the user and redirect them to the index page. + +Retrieve the content type and length from the response headers: + +```py + content_length = fetch.headers["Content-Length"] + content_type = fetch.headers["Content-Type"] +``` + +This code extracts the content length and content type from the response headers. + +Retrieve the appropriate asset processor based on the content type: + +```py + processor = get_asset_processor(content_type) + + if processor is None: + flash('Unsupported content type "' + content_type + '"') + return redirect(url_for("index")) +``` + +If no processor is found for the content type, flash a message to the user and redirect them to the index page. + +The `get_asset_processor` function, defined in `allthethings/processors.py`, returns a processor used to analyze the contents of an asset based on the `content_type`. + +```py +def get_asset_processor( + content_type, +): + if "audio/" in content_type: + return AudioProcessor() + elif "video/" in content_type: + return None + elif "image/" in content_type: + return None + else: + return None +``` + +In this case, the file is an MP3 the `content_type` is `audio/mpeg`, so return an `AudioProcessor` instance. + +Insert the URL, along with its content type and length, into the database with a status of `SUBMITTED`: + +```py + asset = collection.insert_one( + { + "url": url, + "content_type": content_type, + "content_length": content_length, + "status": "SUBMITTED", + } + ) +``` + +Process the URL using the asset processor, an `AudioProcessor`, and obtain the prediction results: + +```py + try: + response = processor.process(asset.inserted_id, url) + except Exception as e: + app.logger.error("Error processing asset: %s", e) + collection.update_one( + filter={"url": url}, + update={ + "$set": { + "status": "PROCESSING_ERROR", + "error": str(e), + } + }, + ) + flash("Error processing asset") + return redirect(url_for("index")) +``` + +Let's look at the `AudioProcessor` from `allthethings/processors.py` in more detail to understand what this does: + +```py +import httpx +from config import Config + +... + +class AudioProcessor: + def process(self, id, url): + input = { + "audio": url, + "model": "large-v3", + "language": "auto", + "translate": False, + "temperature": 0, + "transcription": "plain text", + "suppress_tokens": "-1", + "logprob_threshold": -1, + "no_speech_threshold": 0.6, + "condition_on_previous_text": True, + "compression_ratio_threshold": 2.4, + "temperature_increment_on_fallback": 0.2, + } + + payload = { + "version": "cdd97b257f93cb89dede1c7584e3f3dfc969571b357dbcee08e793740bedd854", + "input": input, + "webhook": f"{Config.AUDIO_WEBHOOK_URL}/{id}", + "webhook_events_filter": ["completed"], + } + + response = httpx.request( + "POST", + f"{Config.HOOKDECK_REPLICATE_API_QUEUE_URL}/predictions", + headers=Config.HOOKDECK_QUEUE_AUTH_HEADERS, + json=payload, + ) + + return response.json() +``` + +`process` method processes the audio URL by creating a prediction request passing the `payload` as the JSON body. + +`payload` includes `webhooks` which consists of the `Config.AUDIO_WEBHOOK_URL` with an appended path (`/{id}`) that indicates which asset the callback is for. The use of the `webhook_events_filter=["completed"]` filter informs Replicate to only send a webhook when the prediction is completed. + +The `payload.version` instructs Replicate to use the [OpenAI Whisper model](https://replicate.com/openai/whisper) for audio to text. The `input` includes details such as the language should be auto-detected and the transcription should be in `plain text`. + +Since we're using Hookdeck as an outbound API queue, the request uses the `Config.HOOKDECK_REPLICATE_API_QUEUE_URL` with the API path `/predications` suffix. The appropriate auth headers are also used from `Config.HOOKDECK_QUEUE_AUTH_HEADERS`. + +Back in `app.py`, update the database with the processing status and pending prediction details: + +```py + collection.update_one( + filter={"url": url}, + update={ + "$set": { + "status": "PROCESSING", + "processor_response": response, + } + }, + ) +``` + +The `processor_response` value is stored for debug purposes as it contains a Hookdeck request ID that can be useful. + +Flash a success message to the user and redirect them to the index page: + +```py + flash( + message="Processing: " + url + " with content type: " + content_type, + category="success", + ) + + return redirect(url_for("index")) +``` + +At this point, the Flask application has offloaded all the work to Replicate and, from a data journey perspective, we're waiting for the predication completed webhook. + +### H3: Handle Audio to Text Prediction Completion Webhook + +Once Replicate completes the predication, it makes a webhook callback to Hookdeck. Hookdeck instantly ingests the webhook, verifies the event came from Replicate, and pushes the data onto a queue for processing and delivery. Based on the current Hookdeck Connection setup, the webhook event is delivered to the CLI and then to the `/webhooks/audio/` endpoint of the Flask application. Let's look at the code that handles the `/webhooks/audio/` request. + +Here's the `/webhooks/audio/` route definition in `app.py`: + +```py +@app.route("/webhooks/audio/", methods=["POST"]) +def webhook_audio(id): + if not verify_webhook(request): + app.logger.error("Webhook signature verification failed") + return jsonify({"error": "Webhook signature verification failed"}), 401 + + payload = request.json + app.logger.info("Audio payload received for id %s", id) + app.logger.debug(payload) +``` + +This route handles `POST` requests to the `/webhooks/audio/` endpoint. The `id` path parameter represents the asset in the MongoDB database that the audio callback is for. The JSON payload from the webhook callback from Replicate. + +Before handling the webhook, we check that the webhook came from Hookdeck via a `verify_webhook` function. If the verification fails a `401` response is returned. Here's the code to verify the webhook: + +```py +def verify_webhook(request): + if Config.HOOKDECK_WEBHOOK_SECRET is None: + app.logger.error("No HOOKDECK_WEBHOOK_SECRET found.") + return False + + hmac_header = request.headers.get("x-hookdeck-signature") + + hash = base64.b64encode( + hmac.new( + Config.HOOKDECK_WEBHOOK_SECRET.encode(), request.data, hashlib.sha256 + ).digest() + ).decode() + + verified = hash == hmac_header + app.logger.debug("Webhook signature verification: %s", verified) + return verified +``` + +This reads the Hookdeck webhook secret stored in the `HOOKDECK_WEBHOOK_SECRET` environment variable, generates a hash using the secret from the inbound webhook data, and compares it with the hash that was sent in the `x-hookdeck-signature` header. If they match, the webhook is verified. + +Next, the processing status is determined based on the presence of an error in the payload: + +```py + database = Database() + collection = database.get_collection() + + status = ( + "PROCESSING_ERROR" if "error" in payload and payload["error"] else "PROCESSED" + ) +``` + +If an error is present, the status is set to `PROCESSING_ERROR`; otherwise, it is set to `PROCESSED`. + +The database is updated with the transcription results and the processing status: + +```py + result = collection.find_one_and_update( + filter={"_id": ObjectId(id)}, + update={ + "$set": { + "status": status, + "text": payload["output"]["transcription"], + "replicate_response": payload, + } + }, + return_document=True, + ) +``` + +This finds the document in the database with the matching `id` and updates it with the new status, transcription `text`, and the entire Replicate response payload. + +Next, we check to ensure the document was found: + +```py + if result is None: + app.logger.error( + "No document found for id %s to add audio transcript", payload["id"] + ) + return jsonify({"error": "No document found to add audio transcript"}), 404 +``` + +If no document is found for the given `id`, an error is logged, and a JSON response with an error message is returned. The `404` response will inform Hookdeck that although the request did not succeed, the request should not be retried. + +With the audio converted to text and stored, the data journey moves to generating embeddings via Replicate: + +```py + app.logger.info("Transcription updated") + app.logger.debug(result) + + request_embeddings(id) + + return "OK" +``` + +Next, the `request_embeddings` function is called to generate embeddings for the processed audio. The endpoint returns an `OK` response to inform Hookdeck the webhook has been successfully processed. + +## H2: Generate Embedding + +The `request_embeddings` function triggers the generation of embeddings for the textual representation of an indexed asset: + +```py +def request_embeddings(id): + app.logger.info("Requesting embeddings for %s", id) + + database = Database() + collection = database.get_collection() + + asset = collection.find_one({"_id": id}) + + if asset is None: + raise RuntimeError("Asset not found") + + if asset["status"] != "PROCESSED": + raise RuntimeError("Asset has not been processed") +``` + +If this asset with the passed `id` is not found or the status of the asset is not `PROCESSED`, which indicates that a textual representation has been created, a `RuntimeError` is raised. + +### H3: Trigger Embedding Generation with Webhook Callback + +Next, the embeddings are generated for the processed asset using the `AsyncEmbeddingsGenerator`: + +```py + generator = AsyncEmbeddingsGenerator() + + try: + response = generator.generate(id, asset["text"]) + except Exception as e: + app.logger.error("Error generating embeddings for %s: %s", id, e) + raise +``` + +This initializes the `AsyncEmbeddingsGenerator` and calls the `generate` function on the instance, passing the ID of the asset being indexed and the textual representation. + +The `AsyncEmbeddingsGenerator` definition in `allthethings/generators.py` follows a similar pattern to the previously used processor: + +```py +import httpx +from config import Config + + +class AsyncEmbeddingsGenerator: + def generate(self, id, text): + payload = { + "version": "b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305", + "input": {"text": text}, + "webhook": f"{Config.EMBEDDINGS_WEBHOOK_URL}/{id}", + "webhook_events_filter": ["completed"], + } + + response = httpx.request( + "POST", + f"{Config.HOOKDECK_REPLICATE_API_QUEUE_URL}/predictions", + headers=Config.HOOKDECK_QUEUE_AUTH_HEADERS, + json=payload, + ) + + return response.json() +``` + +The `generate` method receives the asset `id` and the `text` that embeddings are to be generated for. + +A request `payload` is created containing a `version` that identifies that the [replicate/all-mpnet-base-v2](replicate/all-mpnet-base-v2) model is used to generate embeddings and he `text` for the embedding is passed within an `input` parameter. + +The `webhook` property is set to `Config.EMBEDDINGS_WEBHOOK_URL` with an appended path (`/{id}`) that indicates which asset the callback is for. As before, the use of the `webhook_events_filter=["completed"]` filter informs Replicate to only send a webhook when the prediction is completed. + +Since this is an asynchronous call, Hookdeck is again used to queue the Replicate API request via a call to the `HOOKDECK_REPLICATE_API_QUEUE_URL` endpoint with the `/predications` path. + +The method returns the Hookdeck response. + +Back in `app.py`, update the database with the status and embedding request ID: + +```py + collection.update_one( + filter={"_id": ObjectId(id)}, + update={ + "$set": { + "status": "GENERATING_EMBEDDINGS", + "generator_response": response, + } + }, + ) +``` + +Update the document in the database with the new status `GENERATING_EMBEDDINGS` and the Hookdeck queue response. + +The request to asynchronously generate the embeddings has been triggered, and the work offloaded to Replicate. When the result is read, a webhook will be triggered with the result. + +### H3: Handle Embedding Generation Webhook Callback + +Once Replicate has generated the embedding, a webhook callback is made to the `/webhooks/embedding/` route in our Flask application. This route receives the webhook payload, verifies it came from Hookdeck, updates the database with the embedding results, and sets the appropriate status. + +Here's the route definition: + +```py +@app.route("/webhooks/audio/", methods=["POST"]) +def webhook_audio(id): + if not verify_webhook(request): + app.logger.error("Webhook signature verification failed") + return jsonify({"error": "Webhook signature verification failed"}), 401 + + payload = request.json + app.logger.info("Audio payload received for id %s", id) + app.logger.debug(payload) +``` + +This route handles `POST` requests to the `/webhooks/embedding/` endpoint and is passed the `id` path parameter. It verifies the request came from Hookdeck, and if so, retrieves the JSON payload from the request. Otherwise, it returns a `401` response. + +Next, it checks for errors: + +```py + status = ( + "EMBEDDINGS_ERROR" if "error" in payload and payload["error"] else "SEARCHABLE" + ) +``` + +If there is an error in the payload. If an error is present, the status is set to `EMBEDDINGS_ERROR`; otherwise, it is set to `SEARCHABLE`. + +Next, the vector embedding is extracted from the payload and the database is updated with the embedding details and the new status: + +```py + embedding = payload["output"][0]["embedding"] + + database = Database() + collection = database.get_collection() + + result = collection.update_one( + filter={"_id": ObjectId(id)}, + update={ + "$set": { + "status": status, + "embedding": embedding, + "replicate_embeddings_response": payload, + } + }, + ) +``` + +This finds the document in the database with the matching `id` and updates it with the new status, embedding, and the entire payload. + +Check if the document was found and updated: + +```py + if result.matched_count == 0: + app.logger.error( + "No document found for id %s to update embedding", payload["id"] + ) + return jsonify({"error": "No document found to update embedding"}), 404 + + return "OK" +``` + +If no document is found for the given `id`, an error is logged, and a JSON response with an error message is returned with a `404` status. If the update was success, return an `OK` to inform Hookdeck the webhook has been processed. + +With the vector embedding stored in the `embedding` property, it's now searchable with MongoDB due to the previously defined vector search index. + +## H2: Searching using Atlas Vector Search + +Search is user-driven. The user enters a search term and submits a form. That search query is handled, processed and the resulted returned and displayed. Ideally this is a real-time experience, so operations are performed synchronously. + +Let's walk through each of those steps. + +### H3: Handle Search Submission + +The user navigates to the `/search` endpoint in their web browser, enters a search term and submits the form, making a `GET` request to the `/search` endpoint: + +```py +@app.route("/search", methods=["GET"]) +def search(): + query = request.args.get("query") + if query is None: + return render_template("search.html", results=[]) + + app.logger.info("Query submitted") + app.logger.debug(query) + + results = query_vector_search(query) + + results = format_results(results) + + app.logger.debug("Formatted search results", results) + + return render_template("search.html", results=results, query=query) +``` + +The `search` function in the Flask application handles `GET` requests to the `/search` endpoint. It retrieves the search `query` from the `request.args.get` submitted by the user. If there is no query, the `search` template is rendered. Otherwise, a vector search is performed using the `query_vector_search` function. The result is then formatted by passing the results to the `format_results` function. The formatted results are then rendered using the `search.html` template. + +### H3: Generating Search Query Embeddings + +The `query_vector_search` function performs a vector search using the query provided by the user, generates embeddings for the query, and retrieves matching documents from the MongoDB collection. + +```py +def query_vector_search(q): + generator = SyncEmbeddingsGenerator() + + try: + generator_response = generator.generate(q) + app.logger.debug(generator_response) + except Exception as e: + app.logger.error("Error generating embeddings: %s", e) + return None + + if generator_response["status"] != "completed": + app.logger.debug("Embeddings generation timed out") + return None + + query_embedding = generator_response["output"][0]["embedding"] +``` + +The function takes the query, `q`, and uses the `SyncEmbeddingsGenerator` to generate the embedding for the search query by calling its `generate` function and passing the query. If the embedding creation fails for various reasons, `None` is returned. + +The `SyncEmbeddingsGenerated` is used to synchronously generate embeddings for the search query. This operation is synchronous because the request is user-driven and requires a direct response. `SyncEmbeddingsGenerated` is defined in `allthethings/generators.py`: + +```py +class SyncEmbeddingsGenerator: + def generate(self, text): + payload = { + "version": "b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305", + "input": {"text": text}, + } + + response = httpx.request( + "POST", + "https://api.replicate.com/v1/predictions", + headers={**Config.REPLICATE_API_AUTH_HEADERS, "Prefer": "wait"}, + json=payload, + timeout=60, + ) + + return response.json() +``` + +The `generate` functions receives the `text` to generate an embedding for. A synchronous request is made directly to the Replicate HTTP API passing the same [replicate/all-mpnet-base-v2](replicate/all-mpnet-base-v2) model `version` used in the asynchronous embedding request. The `"Prefer": "Wait"` header and `timeout` values are set to enable long-running synchronous HTTP requests. Also, the Replicate API token is included in the headers via `Config.REPLICATE_API_AUTH_HEADERS`. + +The response JSON is returned to the calling function. + +### H3. Create Vector Search Query + +Back in `query_vector_search`, the embedding result is used to construct the vector search query. + +```py + ... + + query_embedding = generate_response[0]["embedding"] + + vs_query = { + "index": "vector_index", + "path": "embedding", + "queryVector": query_embedding, + "numCandidates": 100, + "limit": 10, + } + + new_search_query = {"$vectorSearch": vs_query} + + app.logger.info("Vector search query created") + app.logger.debug(new_search_query) +``` + +`vs_query` represents the vector search to be performed. It identifies the `index` to be queried as `vector_index`, the `path` to the property, `embedding`, the query is on, and the result of the text query in embedding format (`"queryVector": query_embedding`). See the [MongoDB vector search docs](https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/#mongodb-pipeline-pipe.-vectorSearch) for more information, including the purpose of the `numCandidates` and `limit` properties. + +### H3: Retrieve Vector Search Results + +Next, the function defines the projection to specify which fields to include in the search results. + +```py + project = { + "$project": { + "score": {"$meta": "vectorSearchScore"}, + "_id": 0, + "url": 1, + "content_type": 1, + "content_length": 1, + "text": 1, + } + } +``` + +The projection includes the vector search score, URL, content type, content length, and text. + +The function then performs the aggregation query using the constructed vector search query and projection: + +```py + database = Database() + collection = database.get_collection() + + app.logger.info("Vector search query without post filter") + res = list(collection.aggregate([new_search_query, project])) + + app.logger.info("Vector search query run") + app.logger.debug(res) + return res +``` + +Overall, the `query_vector_search` function performs a vector search using the query provided by the user, generates embeddings for the query, and retrieves matching documents from the MongoDB database. + +#### H3. Format and Display the Vector Search Results + +Next, within `search_post` in `app.py`, the results are formatted for rendering: + +```py + results = format_results(results) +``` + +And within `format_results`, also defined in `app.py`: + +```py +def format_results(results): + formatted_results = [] + for _idx, index in enumerate(results): + parse_result = urlparse(index["url"]) + parsed_url = { + "netloc": parse_result.netloc, + "path": parse_result.path, + "params": parse_result.params, + "query": parse_result.query, + "fragment": parse_result.fragment, + "hostname": parse_result.hostname, + "last_part": parse_result.path.rstrip("/").split("/")[-1], + } + index["parsed_url"] = parsed_url + formatted_results.append(index) + + return formatted_results +``` + +The `format_results` function iterates over the vector search result and returns and array with each element containing the result along with a `parsed_url` property with information about the indexed asset. + +Finally, back in the `POST /search` route, the results are displayed: + +```py +@app.route("/search", methods=["POST"]) +def search_post(): + ... + + results = format_results(results) + + return render_template("search.html", results=results, query=query) +``` + +This renders the `search.html` template, passing the formatted results and the original query to the template for display. + +![Search results](