From b4646eb6d1e51be958c61ff43d8853d4a1cd5eec Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Thu, 29 Jun 2023 17:43:00 -0700 Subject: [PATCH 01/34] Initial checkin. Upstash and backstory works. LLM not on branch yet. --- .gitignore | 5 +++- python/api/chatgpt.py | 13 +++++++++ python/api/upstash.py | 56 ++++++++++++++++++++++++++++++++++++++ python/companion.py | 24 +++++++++++++++++ python/localcompanion.py | 58 ++++++++++++++++++++++++++++++++++++++++ python/requirements.txt | 2 ++ 6 files changed, 157 insertions(+), 1 deletion(-) create mode 100644 python/api/chatgpt.py create mode 100644 python/api/upstash.py create mode 100644 python/companion.py create mode 100644 python/localcompanion.py create mode 100644 python/requirements.txt diff --git a/.gitignore b/.gitignore index d489e57..ce2fb5b 100644 --- a/.gitignore +++ b/.gitignore @@ -35,4 +35,7 @@ yarn-error.log* next-env.d.ts /.env.prod -/fly.toml \ No newline at end of file +/fly.toml + +# python +__pychache__ \ No newline at end of file diff --git a/python/api/chatgpt.py b/python/api/chatgpt.py new file mode 100644 index 0000000..24562dd --- /dev/null +++ b/python/api/chatgpt.py @@ -0,0 +1,13 @@ +# +# API to OpenAI's ChatGPT via LangChain +# + +import os +import json +import openai +import asyncio + +class LlmManager: + + async def post(user_str): + return \ No newline at end of file diff --git a/python/api/upstash.py b/python/api/upstash.py new file mode 100644 index 0000000..66897b4 --- /dev/null +++ b/python/api/upstash.py @@ -0,0 +1,56 @@ +# +# Persistent memory for companions +# + +import os +import json +import time + +from upstash_redis.client import Redis + +class MemoryManager: + instance = None + + def __init__(self, companion_name, user_id, model_name): + self.history = Redis.from_env() + self.user_id = user_id + self.companion_name = companion_name + self.model_name = model_name + + def get_companion_key(self): + return f"{self.model_name}-{self.companion_name}-{self.user_id}" + + async def write_to_history(self, text): + if self.user_id is None: + print("No user id") + return "" + + key = self.get_companion_key() + async with self.history: + result = self.history.zadd(key, {text: int(time.time())}) + + return result + + async def read_latest_history(self): + if self.user_id is None: + print("No user id") + return "" + + key = self.get_companion_key() + async with self.history: + now = int(time.time()*1000) + result = await self.history.zrange(key, 1, now, range_method="byscore") + print(f'Found {len(result)} chat messages in history.') + result = list(reversed(result[-30:])) + recent_chats = "\n".join(result) + return recent_chats + + async def seed_chat_history(self, seed_content, delimiter="\n"): + key = self.get_companion_key() + if self.history.exists(key): + print("User already has chat history") + return + + content = seed_content.split(delimiter) + for index, line in enumerate(content): + self.history.zadd(key, {line: index}) diff --git a/python/companion.py b/python/companion.py new file mode 100644 index 0000000..9de57f3 --- /dev/null +++ b/python/companion.py @@ -0,0 +1,24 @@ +# +# Class that represents a companion +# + +class Companion: + + # Constructor for the class, takes a JSON object as an input + def __init__(self, cdata): + self.name = cdata["name"] + self.title = cdata["title"] + self.imagePath = cdata["imageUrl"] + self.llm_name = cdata["llm"] + + def load_backstory(self, file_path): + # Load backstory + with open(file_path , 'r', encoding='utf-8') as file: + data = file.read() + self.preamble, rest = data.split('###ENDPREAMBLE###', 1) + self.seed_chat, _ = rest.split('###ENDSEEDCHAT###', 1) + return len(self.preamble) + len(self.seed_chat) + + def __str__(self): + return f'Companion: {self.name}, {self.title} (using {self.llm_name})' + diff --git a/python/localcompanion.py b/python/localcompanion.py new file mode 100644 index 0000000..8db6c9d --- /dev/null +++ b/python/localcompanion.py @@ -0,0 +1,58 @@ +# +# Compainion-App implemented as a local script, no web server required +# + +import os +import json +import asyncio +from dotenv import load_dotenv +from api.upstash import MemoryManager +from api.chatgpt import LlmManager + +from companion import Companion + +# Location of the data files from the TS implementation +env_file = "../.env.local" +companion_dir = "../companions" +companions_file = "companions.json" + +# This is the Clerk user ID. We don't use Clerk for the local client, but it is needed for the Redis key +user_id = "user_2Rr1oYMS2KUX93esKB5ZAEGDWWi" + +# load environment variables from the JavaScript .env file +config = load_dotenv(env_file) + +def main(): + + # Read list of companions from JSON file + i = 0 + companions = [] + with open(os.path.join(companion_dir, companions_file)) as f: + companion_data = json.load(f) + for c in companion_data: + companion = Companion(c) + print(f' #{i+1}: {companion}') + companions.append(companion) + i += 1 + + # Ask user to pick a companion and load it + print(f'Who do you want to chat with 1-{i}?') + selection = int(input()) + companion = companions[selection-1] + print('') + print(f'Connecting you to {companion.name}...') + + # load the companion's backstory, this should come from the vectorDB + l = companion.load_backstory(os.path.join(companion_dir, f'{companion.name}.txt')) + print(f'Loaded {l} characters of backstory.') + + # Initialize memory, embeddings and llm + companion.memory = MemoryManager(companion.name, user_id, companion.llm_name) + h = asyncio.run(companion.memory.read_latest_history()) + print(f'Loaded {len(h)} characters of chat history.') + + # Initialize LLM + companion.llm = LlmManager() + +if __name__ == "__main__": + main() diff --git a/python/requirements.txt b/python/requirements.txt new file mode 100644 index 0000000..88e4268 --- /dev/null +++ b/python/requirements.txt @@ -0,0 +1,2 @@ +python-dotenv==1.0.0 +upstash-redis==0.12.0 \ No newline at end of file From 45e78e8fd2eeff09b0b9bc1f76ba1d16dedfd224 Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Fri, 30 Jun 2023 11:09:01 -0700 Subject: [PATCH 02/34] Working on ChatGPT --- .gitignore | 2 +- python/companion.py | 77 +++++++++++++++++++++++++++++++++++++++- python/localcompanion.py | 12 ++++--- 3 files changed, 85 insertions(+), 6 deletions(-) diff --git a/.gitignore b/.gitignore index ce2fb5b..4e12e55 100644 --- a/.gitignore +++ b/.gitignore @@ -38,4 +38,4 @@ next-env.d.ts /fly.toml # python -__pychache__ \ No newline at end of file +__pycache__/ \ No newline at end of file diff --git a/python/companion.py b/python/companion.py index 9de57f3..a141e67 100644 --- a/python/companion.py +++ b/python/companion.py @@ -2,8 +2,27 @@ # Class that represents a companion # +import asyncio +from langchain import LLMChain, PromptTemplate + class Companion: + # --- Prompt template --- + + prompt_template_str = """You are ${name} and are currently talking to ${user_name}. + + ${preamble} + + You reply with answers that range from one sentence to one paragraph and with some details. ${replyLimit} + + Below are relevant details about ${name}'s past: + + ${relevantHistory} + + Below is a relevant conversation history: + + ${recentChatHistory}""" + # Constructor for the class, takes a JSON object as an input def __init__(self, cdata): self.name = cdata["name"] @@ -11,14 +30,70 @@ def __init__(self, cdata): self.imagePath = cdata["imageUrl"] self.llm_name = cdata["llm"] - def load_backstory(self, file_path): + def load_prompt(self, file_path): # Load backstory with open(file_path , 'r', encoding='utf-8') as file: data = file.read() self.preamble, rest = data.split('###ENDPREAMBLE###', 1) self.seed_chat, _ = rest.split('###ENDSEEDCHAT###', 1) + + self.prompt_template = PromptTemplate.from_template(self.prompt_template_str) + return len(self.preamble) + len(self.seed_chat) + + def __str__(self): return f'Companion: {self.name}, {self.title} (using {self.llm_name})' + async def chat(self, user_input, user_name, max_reply_length=0): + + # Read chat history + recent_chat_history = asyncio.run(self.memory.read_latest_history()) + + #client = PineconeClient(api_key=os.getenv('PINECONE_API_KEY'), + # environment=os.getenv('PINECONE_ENVIRONMENT')) + #index_name = os.getenv('PINECONE_INDEX') + #pinecone_index = client.get_index(index_name) + + # TODO: Implement PineconeStore and OpenAIEmbeddings in Python. + # vector_store = PineconeStore.from_existing_index( + # OpenAIEmbeddings(api_key=os.getenv('OPENAI_API_KEY')), + # pinecone_index + #) + + #try: + # similar_docs = vector_store.similarity_search(recent_chat_history, 3, file_name=companion_file_name) + #except Exception as e: + # print(f"WARNING: failed to get vector search results. {str(e)}") + # similar_docs = [] + + similar_docs = [self.backstory] + relevant_history = "\n".join(doc.page_content for doc in similar_docs) + + # Create the prompt and invoke the LLM + reply_limit = f'You reply within {max_reply_length} characters.' if max_reply_length else "" + + name=self.name, user_name=user_name, preamble=self.preamble, replyLimit=reply_limit, + relevantHistory=relevant_history, recentChatHistory=recent_chat_history) + + print("Prompt:") + print(chain_prompt) + + chain = LLMChain(llm=self.llm.model, prompt=self.prompt_template) + + try: + result = await chain.call(relevant_history=relevant_history, recent_chat_history=recent_chat_history) + except Exception as e: + print(str(e)) + result = None + + print("result", result) + + self.memory.write_to_history(f"Human: {user_input}\n") + self.memory.write_to_history(result.text + "\n") + print("chatHistoryRecord", chat_history_record) + + if is_text: + return jsonify(result.text) + return web.StreamResponse(stream) \ No newline at end of file diff --git a/python/localcompanion.py b/python/localcompanion.py index 8db6c9d..44df85b 100644 --- a/python/localcompanion.py +++ b/python/localcompanion.py @@ -42,14 +42,18 @@ def main(): print('') print(f'Connecting you to {companion.name}...') - # load the companion's backstory, this should come from the vectorDB - l = companion.load_backstory(os.path.join(companion_dir, f'{companion.name}.txt')) + # load the companion's backstory, initialize prompts + l = companion.load_prompt(os.path.join(companion_dir, f'{companion.name}.txt')) print(f'Loaded {l} characters of backstory.') - # Initialize memory, embeddings and llm + # Initialize memory. Initialize if empty. companion.memory = MemoryManager(companion.name, user_id, companion.llm_name) h = asyncio.run(companion.memory.read_latest_history()) - print(f'Loaded {len(h)} characters of chat history.') + if not h: + print(f'Chat history empty, initializing.') + self.memory.seed_chat_history(self.seed_chat, '\n\n') + else: + print(f'Loaded {len(h)} characters of chat history.') # Initialize LLM companion.llm = LlmManager() From e59f8005d18f4c0cb8f7838576ed7a6a536cac20 Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Fri, 30 Jun 2023 16:05:01 -0700 Subject: [PATCH 03/34] Basic chatting now works. Wohoo! --- python/README.md | 45 +++++++++++++++++++++++ python/api/chatgpt.py | 10 ++--- python/api/pinecone.py | 17 +++++++++ python/api/upstash.py | 17 +++++---- python/companion.py | 79 ++++++++++++++-------------------------- python/localcompanion.py | 28 +++++++++++--- python/requirements.txt | 5 ++- 7 files changed, 129 insertions(+), 72 deletions(-) create mode 100644 python/README.md create mode 100644 python/api/pinecone.py diff --git a/python/README.md b/python/README.md new file mode 100644 index 0000000..9eab153 --- /dev/null +++ b/python/README.md @@ -0,0 +1,45 @@ +# Python Local Companion + +This is a local python implementation of the CompanionAI stack. It is compatible with +the TypeScript implementation and uses the same config files, data files and databases. +This means if you use a supported LLM you can start a conversation via the TS web app +and continue it via the local python client (or vice versa). + +Specifically: +- Companion information is loaded from the companion directory +- Conversation history is stored in Upstash/Redis +- It uses OpenAI ChatGPT-turbo-3.5 to generate the chat messages + +Right now, Vicuña (the OSS LLM) and Pinecone (for retrieving longer chat history and +backstory), are not supported yet but will be added shortly. + +## Installation + +Make sure you have python 3.X. Install the necessary requirements with: + +``` +$ pip3 install -r requirements.txt +``` + +Next, get the necessary API keys as described in the Readme.md file for the main TS project. +You will need at least Upstash, OpenAI and Pinecone. You do not need Clerk (as we are local). +Add them to the .env.local file in the root directory as described in the top level README. +The python code will reads the API keys from the same .env.local file as the TS app. + +Run the local client: + +``` +$ python3 localcompanion.py +``` + +This should bring up the list of companions, allow you to select a companion, and start chatting. + +## Sharing a companion with the web app + +Right now, if you want to share chat history with the web app, you need to specify the Clerk user +ID as it is used as part of the Upstash Redis key. Find it via the Clerk console, add the following +to the .env.local in the root directory: + +``` +CLERK_USER_ID="user_***" +``` diff --git a/python/api/chatgpt.py b/python/api/chatgpt.py index 24562dd..1375888 100644 --- a/python/api/chatgpt.py +++ b/python/api/chatgpt.py @@ -2,12 +2,12 @@ # API to OpenAI's ChatGPT via LangChain # -import os -import json -import openai -import asyncio +from langchain import LLMChain +from langchain.chat_models import ChatOpenAI class LlmManager: - async def post(user_str): + def __init__(self, prompt_template): + self.model = ChatOpenAI(model="gpt-3.5-turbo-16k") + self.chain = LLMChain(llm=self.model, prompt=prompt_template, verbose=True) return \ No newline at end of file diff --git a/python/api/pinecone.py b/python/api/pinecone.py new file mode 100644 index 0000000..ecfa749 --- /dev/null +++ b/python/api/pinecone.py @@ -0,0 +1,17 @@ + #client = PineconeClient(api_key=os.getenv('PINECONE_API_KEY'), + # environment=os.getenv('PINECONE_ENVIRONMENT')) + #index_name = os.getenv('PINECONE_INDEX') + #pinecone_index = client.get_index(index_name) + + # TODO: Implement PineconeStore and OpenAIEmbeddings in Python. + # vector_store = PineconeStore.from_existing_index( + # OpenAIEmbeddings(api_key=os.getenv('OPENAI_API_KEY')), + # pinecone_index + #) + + #try: + # similar_docs = vector_store.similarity_search(recent_chat_history, 3, file_name=companion_file_name) + #except Exception as e: + # print(f"WARNING: failed to get vector search results. {str(e)}") + # similar_docs = [] + # relevant_history = "\n".join(doc.page_content for doc in similar_docs) \ No newline at end of file diff --git a/python/api/upstash.py b/python/api/upstash.py index 66897b4..7807983 100644 --- a/python/api/upstash.py +++ b/python/api/upstash.py @@ -27,7 +27,7 @@ async def write_to_history(self, text): key = self.get_companion_key() async with self.history: - result = self.history.zadd(key, {text: int(time.time())}) + result = await self.history.zadd(key, {text: int(time.time())}) return result @@ -41,16 +41,17 @@ async def read_latest_history(self): now = int(time.time()*1000) result = await self.history.zrange(key, 1, now, range_method="byscore") print(f'Found {len(result)} chat messages in history.') - result = list(reversed(result[-30:])) + result = list(result[-30:]) recent_chats = "\n".join(result) return recent_chats async def seed_chat_history(self, seed_content, delimiter="\n"): key = self.get_companion_key() - if self.history.exists(key): - print("User already has chat history") - return + async with self.history: + if await self.history.exists(key): + print("User already has chat history") + return - content = seed_content.split(delimiter) - for index, line in enumerate(content): - self.history.zadd(key, {line: index}) + content = seed_content.split(delimiter) + for index, line in enumerate(content): + await self.history.zadd(key, {line: index}) diff --git a/python/companion.py b/python/companion.py index a141e67..9838f42 100644 --- a/python/companion.py +++ b/python/companion.py @@ -7,21 +7,25 @@ class Companion: - # --- Prompt template --- + # --- Prompt template ------------------------------------------------------------------------------------ - prompt_template_str = """You are ${name} and are currently talking to ${user_name}. + prompt_template_str = """You are {name} and are currently talking to {user_name}. + {preamble} + Below are relevant details about {name}'s past: + ---START--- + {relevantHistory} + ---END--- + Generate the next chat message to the human. It may be between one sentence to one paragraph and with some details. + You may not never generate chat messages from the Human. {replyLimit} - ${preamble} + Below is the recent chat history of your conversation with the human. + ---START--- + {recentChatHistory} - You reply with answers that range from one sentence to one paragraph and with some details. ${replyLimit} + """ - Below are relevant details about ${name}'s past: + # --- Prompt template ------------------------------------------------------------------------------------ - ${relevantHistory} - - Below is a relevant conversation history: - - ${recentChatHistory}""" # Constructor for the class, takes a JSON object as an input def __init__(self, cdata): @@ -35,65 +39,36 @@ def load_prompt(self, file_path): with open(file_path , 'r', encoding='utf-8') as file: data = file.read() self.preamble, rest = data.split('###ENDPREAMBLE###', 1) - self.seed_chat, _ = rest.split('###ENDSEEDCHAT###', 1) + self.seed_chat, self.backstory = rest.split('###ENDSEEDCHAT###', 1) self.prompt_template = PromptTemplate.from_template(self.prompt_template_str) return len(self.preamble) + len(self.seed_chat) - - def __str__(self): return f'Companion: {self.name}, {self.title} (using {self.llm_name})' async def chat(self, user_input, user_name, max_reply_length=0): - # Read chat history - recent_chat_history = asyncio.run(self.memory.read_latest_history()) - - #client = PineconeClient(api_key=os.getenv('PINECONE_API_KEY'), - # environment=os.getenv('PINECONE_ENVIRONMENT')) - #index_name = os.getenv('PINECONE_INDEX') - #pinecone_index = client.get_index(index_name) - - # TODO: Implement PineconeStore and OpenAIEmbeddings in Python. - # vector_store = PineconeStore.from_existing_index( - # OpenAIEmbeddings(api_key=os.getenv('OPENAI_API_KEY')), - # pinecone_index - #) - - #try: - # similar_docs = vector_store.similarity_search(recent_chat_history, 3, file_name=companion_file_name) - #except Exception as e: - # print(f"WARNING: failed to get vector search results. {str(e)}") - # similar_docs = [] + # Add user input to chat history database + await self.memory.write_to_history(f"Human: {user_input}\n") - similar_docs = [self.backstory] - relevant_history = "\n".join(doc.page_content for doc in similar_docs) + # Missing: Use Pinecone - # Create the prompt and invoke the LLM + # Read chat history + recent_chat_history = await self.memory.read_latest_history() + relevant_history = self.backstory reply_limit = f'You reply within {max_reply_length} characters.' if max_reply_length else "" - - name=self.name, user_name=user_name, preamble=self.preamble, replyLimit=reply_limit, - relevantHistory=relevant_history, recentChatHistory=recent_chat_history) - - print("Prompt:") - print(chain_prompt) - - chain = LLMChain(llm=self.llm.model, prompt=self.prompt_template) try: - result = await chain.call(relevant_history=relevant_history, recent_chat_history=recent_chat_history) + result = self.llm.chain.run( + name=self.name, user_name=user_name, preamble=self.preamble, replyLimit=reply_limit, + relevantHistory=relevant_history, recentChatHistory=recent_chat_history) except Exception as e: print(str(e)) result = None - print("result", result) - - self.memory.write_to_history(f"Human: {user_input}\n") - self.memory.write_to_history(result.text + "\n") - print("chatHistoryRecord", chat_history_record) + if result: + await self.memory.write_to_history(result + "\n") - if is_text: - return jsonify(result.text) - return web.StreamResponse(stream) \ No newline at end of file + return result \ No newline at end of file diff --git a/python/localcompanion.py b/python/localcompanion.py index 44df85b..5635900 100644 --- a/python/localcompanion.py +++ b/python/localcompanion.py @@ -17,12 +17,17 @@ companions_file = "companions.json" # This is the Clerk user ID. We don't use Clerk for the local client, but it is needed for the Redis key -user_id = "user_2Rr1oYMS2KUX93esKB5ZAEGDWWi" +user_id = "local" +user_name = "Human" # load environment variables from the JavaScript .env file config = load_dotenv(env_file) -def main(): +async def main(): + + # For compatibility with the TS implementation, user needs to specify the Clerk user ID + if os.getenv('CLERK_USER_ID'): + user_id = os.getenv('CLERK_USER_ID') # Read list of companions from JSON file i = 0 @@ -48,15 +53,26 @@ def main(): # Initialize memory. Initialize if empty. companion.memory = MemoryManager(companion.name, user_id, companion.llm_name) - h = asyncio.run(companion.memory.read_latest_history()) + h = await companion.memory.read_latest_history() if not h: print(f'Chat history empty, initializing.') - self.memory.seed_chat_history(self.seed_chat, '\n\n') + await companion.memory.seed_chat_history(companion.seed_chat, '\n\n') else: print(f'Loaded {len(h)} characters of chat history.') # Initialize LLM - companion.llm = LlmManager() + companion.llm = LlmManager(companion.prompt_template) + + # Start chatting + print('') + print(f'You are now chatting with {companion.name}. Type "quit" to exit.') + while True: + user_input = input("Human> ") + if user_input == "quit": + break + reply = await companion.chat(user_input, user_name) + print(f'{companion.name}: {reply}') if __name__ == "__main__": - main() + asyncio.run(main()) + diff --git a/python/requirements.txt b/python/requirements.txt index 88e4268..09b9b78 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,2 +1,5 @@ python-dotenv==1.0.0 -upstash-redis==0.12.0 \ No newline at end of file +upstash-redis==0.12.0 +pinecone-client==2.2.2 +openai==0.27.0 +langchain==0.0.219 \ No newline at end of file From 9c1923ba39b9654d39592ac6e02c3ae29a2fedf6 Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Fri, 30 Jun 2023 16:21:03 -0700 Subject: [PATCH 04/34] Turned off debugging --- python/api/chatgpt.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/api/chatgpt.py b/python/api/chatgpt.py index 1375888..6a902de 100644 --- a/python/api/chatgpt.py +++ b/python/api/chatgpt.py @@ -7,7 +7,9 @@ class LlmManager: + verbose = False + def __init__(self, prompt_template): self.model = ChatOpenAI(model="gpt-3.5-turbo-16k") - self.chain = LLMChain(llm=self.model, prompt=prompt_template, verbose=True) + self.chain = LLMChain(llm=self.model, prompt=prompt_template, verbose=self.verbose) return \ No newline at end of file From aefa1c014bb264aa00b73be10f2828a1fd0e8c25 Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Fri, 30 Jun 2023 16:30:49 -0700 Subject: [PATCH 05/34] Fixed ordering of the fields that make up the key to enable compatibility. Fixed time in s vs. ms. --- python/api/upstash.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/api/upstash.py b/python/api/upstash.py index 7807983..df1a5b7 100644 --- a/python/api/upstash.py +++ b/python/api/upstash.py @@ -18,7 +18,7 @@ def __init__(self, companion_name, user_id, model_name): self.model_name = model_name def get_companion_key(self): - return f"{self.model_name}-{self.companion_name}-{self.user_id}" + return f"{self.companion_name}-{self.model_name}-{self.user_id}" async def write_to_history(self, text): if self.user_id is None: @@ -27,7 +27,7 @@ async def write_to_history(self, text): key = self.get_companion_key() async with self.history: - result = await self.history.zadd(key, {text: int(time.time())}) + result = await self.history.zadd(key, {text: int(time.time()*1000)}) return result From e73e87e71b0d628f7cbb5ad86af08315f95b7920 Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Fri, 30 Jun 2023 18:23:52 -0700 Subject: [PATCH 06/34] Cleanup for GUI version --- python/api/pinecone.py | 17 ------------- python/api/upstash.py | 18 +++++++++++-- python/companion.py | 34 ++++++++++++++++++++++--- python/localcompanion.py | 55 +++++++++++++++------------------------- 4 files changed, 67 insertions(+), 57 deletions(-) delete mode 100644 python/api/pinecone.py diff --git a/python/api/pinecone.py b/python/api/pinecone.py deleted file mode 100644 index ecfa749..0000000 --- a/python/api/pinecone.py +++ /dev/null @@ -1,17 +0,0 @@ - #client = PineconeClient(api_key=os.getenv('PINECONE_API_KEY'), - # environment=os.getenv('PINECONE_ENVIRONMENT')) - #index_name = os.getenv('PINECONE_INDEX') - #pinecone_index = client.get_index(index_name) - - # TODO: Implement PineconeStore and OpenAIEmbeddings in Python. - # vector_store = PineconeStore.from_existing_index( - # OpenAIEmbeddings(api_key=os.getenv('OPENAI_API_KEY')), - # pinecone_index - #) - - #try: - # similar_docs = vector_store.similarity_search(recent_chat_history, 3, file_name=companion_file_name) - #except Exception as e: - # print(f"WARNING: failed to get vector search results. {str(e)}") - # similar_docs = [] - # relevant_history = "\n".join(doc.page_content for doc in similar_docs) \ No newline at end of file diff --git a/python/api/upstash.py b/python/api/upstash.py index df1a5b7..10a3286 100644 --- a/python/api/upstash.py +++ b/python/api/upstash.py @@ -11,9 +11,9 @@ class MemoryManager: instance = None - def __init__(self, companion_name, user_id, model_name): + def __init__(self, companion_name, model_name): self.history = Redis.from_env() - self.user_id = user_id + self.user_id = None self.companion_name = companion_name self.model_name = model_name @@ -55,3 +55,17 @@ async def seed_chat_history(self, seed_content, delimiter="\n"): content = seed_content.split(delimiter) for index, line in enumerate(content): await self.history.zadd(key, {line: index}) + + # This is a hack to try to discover the Clerk user ID + # It's the last part of the key name in Redis + + async def find_clerk_user_id(self): + async with self.history: + pattern = f"{self.companion_name}-{self.model_name}-*" + result = await self.history.keys(pattern) + if(len(result) > 0): + if len(result) > 1: + print(f'** WARNING: Found {len(result)} potential user chats in Redis that match, using first one.') + print(f'** You may want to specify a specific Clerk user ID in .env.local') + return result[0].split('-')[-1] + return None diff --git a/python/companion.py b/python/companion.py index 9838f42..1799a24 100644 --- a/python/companion.py +++ b/python/companion.py @@ -2,11 +2,25 @@ # Class that represents a companion # -import asyncio -from langchain import LLMChain, PromptTemplate +import os +import json +from langchain import PromptTemplate + +def load_companions(): + companions = [] + with open(os.path.join(Companion.companion_dir, Companion.companions_file)) as f: + companion_data = json.load(f) + for c in companion_data: + companion = Companion(c) + companions.append(companion) + return companions class Companion: + # Configuration + companion_dir = "../companions" + companions_file = "companions.json" + # --- Prompt template ------------------------------------------------------------------------------------ prompt_template_str = """You are {name} and are currently talking to {user_name}. @@ -34,7 +48,8 @@ def __init__(self, cdata): self.imagePath = cdata["imageUrl"] self.llm_name = cdata["llm"] - def load_prompt(self, file_path): + async def load(self): + file_path = os.path.join(self.companion_dir, f'{self.name}.txt') # Load backstory with open(file_path , 'r', encoding='utf-8') as file: data = file.read() @@ -43,7 +58,18 @@ def load_prompt(self, file_path): self.prompt_template = PromptTemplate.from_template(self.prompt_template_str) - return len(self.preamble) + len(self.seed_chat) + print(f'Loaded {self.name} with {len(self.backstory)} characters of backstory.') + + # Check if we have a backstory, if not, seed the chat history + h = await self.memory.read_latest_history() + if not h: + print(f'Chat history empty, initializing.') + await self.memory.seed_chat_history(self.seed_chat, '\n\n') + else: + print(f'Loaded {len(h)} characters of chat history.') + + + return def __str__(self): return f'Companion: {self.name}, {self.title} (using {self.llm_name})' diff --git a/python/localcompanion.py b/python/localcompanion.py index 5635900..d4cea46 100644 --- a/python/localcompanion.py +++ b/python/localcompanion.py @@ -3,42 +3,39 @@ # import os -import json import asyncio from dotenv import load_dotenv from api.upstash import MemoryManager from api.chatgpt import LlmManager -from companion import Companion +from companion import load_companions # Location of the data files from the TS implementation env_file = "../.env.local" -companion_dir = "../companions" -companions_file = "companions.json" -# This is the Clerk user ID. We don't use Clerk for the local client, but it is needed for the Redis key -user_id = "local" -user_name = "Human" +# This is the default user ID and name. +def_user_id = "local" +def_user_name = "Human" # load environment variables from the JavaScript .env file config = load_dotenv(env_file) -async def main(): +# Find the Clerk user ID, from environment variable or Redis key name +# or default to "local" - # For compatibility with the TS implementation, user needs to specify the Clerk user ID +async def guess_clerk_user_id(companion): if os.getenv('CLERK_USER_ID'): - user_id = os.getenv('CLERK_USER_ID') + return os.getenv('CLERK_USER_ID') + else: + id = await companion.memory.find_clerk_user_id() + return id or def_user_id + +async def main(): # Read list of companions from JSON file - i = 0 - companions = [] - with open(os.path.join(companion_dir, companions_file)) as f: - companion_data = json.load(f) - for c in companion_data: - companion = Companion(c) - print(f' #{i+1}: {companion}') - companions.append(companion) - i += 1 + companions = load_companions() + for i in range(len(companions)): + print(f' #{i+1}: {companions[i]}') # Ask user to pick a companion and load it print(f'Who do you want to chat with 1-{i}?') @@ -47,20 +44,10 @@ async def main(): print('') print(f'Connecting you to {companion.name}...') - # load the companion's backstory, initialize prompts - l = companion.load_prompt(os.path.join(companion_dir, f'{companion.name}.txt')) - print(f'Loaded {l} characters of backstory.') - - # Initialize memory. Initialize if empty. - companion.memory = MemoryManager(companion.name, user_id, companion.llm_name) - h = await companion.memory.read_latest_history() - if not h: - print(f'Chat history empty, initializing.') - await companion.memory.seed_chat_history(companion.seed_chat, '\n\n') - else: - print(f'Loaded {len(h)} characters of chat history.') - - # Initialize LLM + # Initialize the companion + companion.memory = MemoryManager(companion.name, companion.llm_name) + companion.memory.user_id = await guess_clerk_user_id(companion) + await companion.load() companion.llm = LlmManager(companion.prompt_template) # Start chatting @@ -70,7 +57,7 @@ async def main(): user_input = input("Human> ") if user_input == "quit": break - reply = await companion.chat(user_input, user_name) + reply = await companion.chat(user_input, def_user_name) print(f'{companion.name}: {reply}') if __name__ == "__main__": From cd2de6ef08b14f8a8ae644142ca245a0786c31df Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Fri, 30 Jun 2023 19:05:18 -0700 Subject: [PATCH 07/34] polishing --- python/README.md | 20 +++++++------ python/companion_app.py | 65 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 9 deletions(-) create mode 100644 python/companion_app.py diff --git a/python/README.md b/python/README.md index 9eab153..25781d6 100644 --- a/python/README.md +++ b/python/README.md @@ -10,8 +10,9 @@ Specifically: - Conversation history is stored in Upstash/Redis - It uses OpenAI ChatGPT-turbo-3.5 to generate the chat messages -Right now, Vicuña (the OSS LLM) and Pinecone (for retrieving longer chat history and -backstory), are not supported yet but will be added shortly. +Right now, Vicuña (the OSS LLM) is not supported. It also doesn't use Pinecone for the +backstory but unless you have a very long (> 4000 word) backstory there should be no +difference. ## Installation @@ -22,23 +23,24 @@ $ pip3 install -r requirements.txt ``` Next, get the necessary API keys as described in the Readme.md file for the main TS project. -You will need at least Upstash, OpenAI and Pinecone. You do not need Clerk (as we are local). +You will need at least Upstash and OpenAI. You do not need Clerk, Pinecone/Supabase. Add them to the .env.local file in the root directory as described in the top level README. -The python code will reads the API keys from the same .env.local file as the TS app. +The python code will read the API keys from the same .env.local file as the TS app. Run the local client: - ``` -$ python3 localcompanion.py +$ python3 companion_app.py ``` This should bring up the list of companions, allow you to select a companion, and start chatting. ## Sharing a companion with the web app -Right now, if you want to share chat history with the web app, you need to specify the Clerk user -ID as it is used as part of the Upstash Redis key. Find it via the Clerk console, add the following -to the .env.local in the root directory: +If you want to chat with the same companion using both the TypeScript web server and the local +app, the local app needs your Clerk User ID. It will try to discover this automatically by looking +for a specific Redis key. If for any reason this doesn't work, you may need to go to the Clerk +console, find your Clerk User ID and add it to the .env.local file. It should look something +like this: ``` CLERK_USER_ID="user_***" diff --git a/python/companion_app.py b/python/companion_app.py new file mode 100644 index 0000000..13e8241 --- /dev/null +++ b/python/companion_app.py @@ -0,0 +1,65 @@ +# +# Compainion-App implemented as a local script, no web server required +# + +import os +import asyncio +from dotenv import load_dotenv +from api.upstash import MemoryManager +from api.chatgpt import LlmManager + +from companion import load_companions + +# Location of the data files from the TS implementation +env_file = "../.env.local" + +# This is the default user ID and name. +def_user_id = "local" +def_user_name = "Human" + +# load environment variables from the JavaScript .env file +config = load_dotenv(env_file) + +# Find the Clerk user ID, from environment variable or Redis key name +# or default to "local" + +async def guess_clerk_user_id(companion): + if os.getenv('CLERK_USER_ID'): + return os.getenv('CLERK_USER_ID') + else: + id = await companion.memory.find_clerk_user_id() + return id or def_user_id + +async def main(): + + # Read list of companions from JSON file + companions = load_companions() + for i in range(len(companions)): + print(f' #{i+1}: {companions[i]}') + + # Ask user to pick a companion and load it + print(f'Who do you want to chat with (1-{i+1})?') + selection = int(input()) + companion = companions[selection-1] + print('') + print(f'Connecting you to {companion.name}...') + + # Initialize the companion + companion.memory = MemoryManager(companion.name, companion.llm_name) + companion.memory.user_id = await guess_clerk_user_id(companion) + await companion.load() + companion.llm = LlmManager(companion.prompt_template) + + # Start chatting + print('') + print(f'You are now chatting with {companion.name}. Type "quit" to exit.') + while True: + user_input = input("Human> ") + if user_input == "quit": + break + reply = await companion.chat(user_input, def_user_name) + print(f'{companion.name}: {reply}') + +if __name__ == "__main__": + asyncio.run(main()) + From 41989934986e9608b43914ba14da515fb6e3dbe4 Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Fri, 30 Jun 2023 19:09:23 -0700 Subject: [PATCH 08/34] Update README.md with image --- python/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/README.md b/python/README.md index 25781d6..aabb424 100644 --- a/python/README.md +++ b/python/README.md @@ -5,6 +5,8 @@ the TypeScript implementation and uses the same config files, data files and dat This means if you use a supported LLM you can start a conversation via the TS web app and continue it via the local python client (or vice versa). +![image](https://github.com/a16z-infra/companion-app/assets/286029/f7382ef9-4948-40f8-acc1-27396b864037) + Specifically: - Companion information is loaded from the companion directory - Conversation history is stored in Upstash/Redis From 9b6a8e29f873c3da6527d86445eb79fc3bc3237e Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Fri, 30 Jun 2023 19:18:17 -0700 Subject: [PATCH 09/34] polish --- python/api/upstash.py | 2 +- python/localcompanion.py | 65 ---------------------------------------- 2 files changed, 1 insertion(+), 66 deletions(-) delete mode 100644 python/localcompanion.py diff --git a/python/api/upstash.py b/python/api/upstash.py index 10a3286..fcc5540 100644 --- a/python/api/upstash.py +++ b/python/api/upstash.py @@ -40,7 +40,7 @@ async def read_latest_history(self): async with self.history: now = int(time.time()*1000) result = await self.history.zrange(key, 1, now, range_method="byscore") - print(f'Found {len(result)} chat messages in history.') + #print(f'Found {len(result)} chat messages in history.') result = list(result[-30:]) recent_chats = "\n".join(result) return recent_chats diff --git a/python/localcompanion.py b/python/localcompanion.py deleted file mode 100644 index d4cea46..0000000 --- a/python/localcompanion.py +++ /dev/null @@ -1,65 +0,0 @@ -# -# Compainion-App implemented as a local script, no web server required -# - -import os -import asyncio -from dotenv import load_dotenv -from api.upstash import MemoryManager -from api.chatgpt import LlmManager - -from companion import load_companions - -# Location of the data files from the TS implementation -env_file = "../.env.local" - -# This is the default user ID and name. -def_user_id = "local" -def_user_name = "Human" - -# load environment variables from the JavaScript .env file -config = load_dotenv(env_file) - -# Find the Clerk user ID, from environment variable or Redis key name -# or default to "local" - -async def guess_clerk_user_id(companion): - if os.getenv('CLERK_USER_ID'): - return os.getenv('CLERK_USER_ID') - else: - id = await companion.memory.find_clerk_user_id() - return id or def_user_id - -async def main(): - - # Read list of companions from JSON file - companions = load_companions() - for i in range(len(companions)): - print(f' #{i+1}: {companions[i]}') - - # Ask user to pick a companion and load it - print(f'Who do you want to chat with 1-{i}?') - selection = int(input()) - companion = companions[selection-1] - print('') - print(f'Connecting you to {companion.name}...') - - # Initialize the companion - companion.memory = MemoryManager(companion.name, companion.llm_name) - companion.memory.user_id = await guess_clerk_user_id(companion) - await companion.load() - companion.llm = LlmManager(companion.prompt_template) - - # Start chatting - print('') - print(f'You are now chatting with {companion.name}. Type "quit" to exit.') - while True: - user_input = input("Human> ") - if user_input == "quit": - break - reply = await companion.chat(user_input, def_user_name) - print(f'{companion.name}: {reply}') - -if __name__ == "__main__": - asyncio.run(main()) - From 45374077ec8fb39fe5d28b638020bbf70fe962b6 Mon Sep 17 00:00:00 2001 From: Guido Appenzeller Date: Fri, 30 Jun 2023 20:44:04 -0700 Subject: [PATCH 10/34] updated readme --- python/README.md | 71 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 55 insertions(+), 16 deletions(-) diff --git a/python/README.md b/python/README.md index aabb424..74ac2f4 100644 --- a/python/README.md +++ b/python/README.md @@ -1,20 +1,23 @@ # Python Local Companion -This is a local python implementation of the CompanionAI stack. It is compatible with -the TypeScript implementation and uses the same config files, data files and databases. -This means if you use a supported LLM you can start a conversation via the TS web app -and continue it via the local python client (or vice versa). +This is a local python implementation of the CompanionAI stack. It includes: + + 1. A local python client that you can use to chat with a companion without starting a web server + 2. An api layer you can use if you want to read or modify companion data from python + +The python stack is compatible with the TypeScript implementation and uses the same config files, +data files, database schemas and databases. Below an example of the python chat client running +locally from the command line. ![image](https://github.com/a16z-infra/companion-app/assets/286029/f7382ef9-4948-40f8-acc1-27396b864037) -Specifically: -- Companion information is loaded from the companion directory -- Conversation history is stored in Upstash/Redis -- It uses OpenAI ChatGPT-turbo-3.5 to generate the chat messages +When running the python client it will: +- Load companion information from the top level /companions directory. +- Read/write conversation history to Upstash/Redis +- Use OpenAI ChatGPT-turbo-3.5 to generate the chat messages -Right now, Vicuña (the OSS LLM) is not supported. It also doesn't use Pinecone for the -backstory but unless you have a very long (> 4000 word) backstory there should be no -difference. +Right now, Vicuña is not supported. Instead of using Pinecone for the backstory it inserts it directly. +Unless you have a very long (> 5000 word) backstory there should be no noticable difference. ## Installation @@ -36,14 +39,50 @@ $ python3 companion_app.py This should bring up the list of companions, allow you to select a companion, and start chatting. -## Sharing a companion with the web app +## Using the Python stack as an API Layer + +Accessing the companion-ai-stack data via the python API layer is fairly straightforward and below is an example. +After reading the env file with credentials, we load available companions and pick one. Next we attach a +memory manager to the companion. This creates a connection to a serverless Redis instance on Upstash. As we +don't use Clerk for authentication we now need to find the user ID for the user (more on that below). Once the +companion is loaded, we can access companion data. In this case, we print the companion's recent chat history. + +``` +import asyncio +from dotenv import load_dotenv +from api.upstash import MemoryManager +from companion import load_companions +from companion_app import guess_clerk_user_id + +config = load_dotenv("../.env.local") + +async def main(): + companion = load_companions()[1] + companion.memory = MemoryManager(companion.name, companion.llm_name) + companion.memory.user_id = await guess_clerk_user_id(companion) + await companion.load() + + i = 0 + for c in ( await companion.memory.read_latest_history() ).split("\n"): + if len(c.strip()) > 0: + i += 1 + print(f'{i:2} {c.strip()}') + +if __name__ == "__main__": + asyncio.run(main()) +``` + +## Clerk user IDs If you want to chat with the same companion using both the TypeScript web server and the local -app, the local app needs your Clerk User ID. It will try to discover this automatically by looking -for a specific Redis key. If for any reason this doesn't work, you may need to go to the Clerk -console, find your Clerk User ID and add it to the .env.local file. It should look something -like this: +app, the local app needs your Clerk User ID. If you first start using the TypeScript web server +the python stack should discover the correct Clerk User ID automatically by looking +for a specific Redis key. However if you have multiple users using the web server it can't tell +which one is the correct one. If this happens, go to the Clerk console, find your Clerk User ID +and add it to the .env.local file. It should look something like this: ``` CLERK_USER_ID="user_***" ``` + +Once this is done, the python stack will always read/write chat history for this user. \ No newline at end of file From 4cb34b0f8825d8e8a1b82d63c5cba24ac0404244 Mon Sep 17 00:00:00 2001 From: Ted Benson Date: Wed, 12 Jul 2023 21:29:05 +0800 Subject: [PATCH 11/34] Contribute initial multimodal agent support & steamship agent --- .env.local.example | 5 +- .gitignore | 8 ++- README.md | 13 +++- companions/companions.json | 21 +++++-- public/rick.jpeg | Bin 0 -> 6683 bytes src/app/api/steamship/route.ts | 105 +++++++++++++++++++++++++++++++++ src/components/ChatBlock.tsx | 66 +++++++++++++++++++++ src/components/QAModal.tsx | 22 +++++-- src/components/actions.ts | 2 +- 9 files changed, 228 insertions(+), 14 deletions(-) create mode 100644 public/rick.jpeg create mode 100644 src/app/api/steamship/route.ts create mode 100644 src/components/ChatBlock.tsx diff --git a/.env.local.example b/.env.local.example index 6f7e8b6..4f82427 100644 --- a/.env.local.example +++ b/.env.local.example @@ -31,4 +31,7 @@ UPSTASH_REDIS_REST_TOKEN=AZ**** # Twilio related environment variables TWILIO_ACCOUNT_SID=AC*** -TWILIO_AUTH_TOKEN=***** \ No newline at end of file +TWILIO_AUTH_TOKEN=***** + +# Steamship related environment variables +STEAMSHIP_API_KEY=**** \ No newline at end of file diff --git a/.gitignore b/.gitignore index d489e57..f1c6735 100644 --- a/.gitignore +++ b/.gitignore @@ -35,4 +35,10 @@ yarn-error.log* next-env.d.ts /.env.prod -/fly.toml \ No newline at end of file +/fly.toml + +# JetBrains +.idea + +# Yarn Lockfiles (since this project uses NPM) +yarn.lock \ No newline at end of file diff --git a/README.md b/README.md index 174e773..14cf945 100644 --- a/README.md +++ b/README.md @@ -113,8 +113,7 @@ e. **Upstash API key** - Scroll down to "REST API" section and click on ".env". Now you can copy paste both environment variables to your `.env.local` Screen Shot 2023-07-10 at 11 07 21 PM - -e. **Supabase API key** (optional) +f. **Supabase API key** (optional) If you prefer to use Supabsae, you will need to uncomment `VECTOR_DB=supabase` and fill out the Supabase credentials in `.env.local`. - Create a Supabase instance [here](https://supabase.com/dashboard/projects); then go to Project Settings -> API @@ -122,6 +121,16 @@ If you prefer to use Supabsae, you will need to uncomment `VECTOR_DB=supabase` a - `SUPABASE_PRIVATE_KEY` is the key starts with `ey` under Project API Keys - Now, you should enable pgvector on Supabase and create a schema. You can do this easily by clicking on "SQL editor" on the left hand side on Supabase UI and then clicking on "+New Query". Copy paste [this code snippet](https://github.com/a16z-infra/ai-getting-started/blob/main/pgvector.sql) in the SQL editor and click "Run". +g. **Steamship API key** + +You can connect Steamship agent instance as if it were an LLM with personality, vector storage, and tools built-in. To do so: + +- Sign in to [Steamship](https://steamship.com/account) +- Copy the API key from your account settings page +- Add it as the `STEAMSHIP_API_KEY` variable + +Next, [build an agent](https://www.steamship.com/learn/agent-guidebook), create your own instance of it, and connect it in `companions.json` using the *Rick* example as a guide. + ### 4. Generate embeddings The `companions/` directory contains the "personalities" of the AIs in .txt files. To generate embeddings and load them into the vector database to draw from during the chat, run the following command: diff --git a/companions/companions.json b/companions/companions.json index b79ebe8..198380c 100644 --- a/companions/companions.json +++ b/companions/companions.json @@ -1,30 +1,43 @@ [ + { + "name": "Rick", + "title": "I can generate voice and pictures.", + "imageUrl": "/rick.jpeg", + "llm": "steamship", + "generateEndpoint": "https://a16z.steamship.run/a16z/rick-b1578149038e664bacae7fc083683565/answer", + "phone": "OPTIONAL_COMPANION_PHONE_NUMBER", + "telegram": "https://t.me/rick_a16z_bot" + }, { "name": "Alex", "title": "I love talking about books and games", "imageUrl": "/alex.png", "llm": "chatgpt", - "phone": "OPTIONAL_COMPANION_PHONE_NUMBER" + "phone": "OPTIONAL_COMPANION_PHONE_NUMBER", + "telegram": "OPTIONAL_TELEGRAM_LINK" }, { "name": "Rosie", "title": "I'm a house robot who became aware", "imageUrl": "/rosie.png", "llm": "vicuna13b", - "phone": "OPTIONAL_COMPANION_PHONE_NUMBER" + "phone": "OPTIONAL_COMPANION_PHONE_NUMBER", + "telegram": "OPTIONAL_TELEGRAM_LINK" }, { "name": "Sebastian", "title": "I'm a travel blogger and a mystery novel writer", "imageUrl": "/sebastian.png", "llm": "chatgpt", - "phone": "OPTIONAL_COMPANION_PHONE_NUMBER" + "phone": "OPTIONAL_COMPANION_PHONE_NUMBER", + "telegram": "OPTIONAL_TELEGRAM_LINK" }, { "name": "Lucky", "title": "I am a space corgi", "imageUrl": "/corgi.png", "llm": "vicuna13b", - "phone": "OPTIONAL_COMPANION_PHONE_NUMBER" + "phone": "OPTIONAL_COMPANION_PHONE_NUMBER", + "telegram": "OPTIONAL_TELEGRAM_LINK" } ] diff --git a/public/rick.jpeg b/public/rick.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..0840ba31e77462a255da6d5cd913ffd704b56082 GIT binary patch literal 6683 zcmZuTby(ET)1RY}yrV-J$)n*2sgpWV8V{6^?gl{+Bn06oDM3Quh(k(1LRv}$q~io6 zB_*V#UcTSo?|t5X-r3pNo!Q-)d3K+jnVXrLMSxOM4WR~rKmY)`ZNSYBAQHGkNJI^R z(ENj78hUzq8X5*LJ%k>@$V^Yqz(~)`%)rbk#Ldhiz{DoRL*N=V*z30cuwatr^ZqW^$|g#5p<|CLH8-gE$9 zBG4V66&J(-;DAB6V9-rBz;x>){y(1nAAoRh@$d-=ZWj@h0M0)g0Ui+vA^ty`KsW#{ z7!SgZFRE-!KqaQ{70%(E{2p##^D?F8V<#as4X5~1pEm`yV-gP{x<<#BxC~PZ_YZDW zG2Gg~{eSG-F5Lrz0GxjTg#TYJZjFK=qU_3e`cxcZ)ST8{$v0Sl6!&%?7#9pE0_(i3 zRjiHl{LTxUTM*wOzwu``K#Z7K1FAL5^5-)Zfk{i%7Shm1qD30>E#Z4NfL3o*>s1oF z>SRmk5_oLY2QS3H&YYIHZ-OIrC)Z&sJh)KO*kzm?oPtjN62(%A!MqDYD^&g zX#+!11IZ#=TuT7eb#1;IOEjLrjDb#c>O$|yVpR((@^p`Cz-k|5QfOElHP>K2Wo`Fx zwraNKXK=7R6(ZKfyt{YSPVn|VZh#YBC0{O*)8VLES^5SV(MQfAb1~-SEVYj2UzAtm zX_@Q|ilMqW631ZEEy!_?IJP#OvxNJ7xHsslejVYcy5hquY4?O>{uKOss#fy+ZDEXN z8zIoCCD}Yv^2#S7U2n21E*y`Xcjpwwk3Sq4alF&g^)IjiT|uU*B;?kg#b%Ky}+ z6QcgRc~W(%2T~5f->15t43>?~{ACa05dgmT`R@@w#_k4?(QP}it|-wSA*LUAxdE)c zrR$=CZa(5p=oYOg10iv+mGppzam=SM_YoBm+A*mdj&GzN&EX^IW2MHcgp-gI%7Vq0 zez3iJH^B4h?gDuTfMK0%%W(sjgHhQ<)j_%%Np|jXWdr!hn{6v8b`C0V*+Zs`t zvaHn2bvl^LCSTjdqfD@s+8Oq>pzpVwSkC3DfP{yHvEt)LthBP&x;8OF{ULG_1u5IV zViPg-^}Kb1YD;TdJI+UYQzNArJO)h}jX?;n=DDX%u35*76C1|Ce7GZaa0DyyhZecB z;a0~_+1Kzso9)zMc@34oU;lD7%SAhwwGd;a7M|A5bl|C;l2$m??LJ(MMo7MQ@@4|m8IZa1H~C$$bm(k8i)YuLU)(uV(M zNV#vh#6-)tl#=;-B1DECN9!4+ZFaD0xMz$dZmhRDASh;_>EU~G-d60Ea_iCVPVAC@ zAKXjY^D)q!Kar*MI?Salx9D=rK?;d=@Z9Ke?sobGB~gW_phFIGM&hBT2-;WAbBxGF z-Plxk6X}<<1BlL$_{|iV4sA9I+wLru+w_xZdH1Y~uN}hvye(Mq;<(t+Xh#*;rDG*# zmHfCC*89mJy#syqcQzl%vqeVhduW3l%+K7{-8WiUHhyt3#&15!;`pRC_!xuRZVWi8 z6^`YeKJ`^6iGI>G6)J2dz{<_Zp9l)IchS^7w+lA8FXh>oTCU@4Xw|hf?WB@x>-4dOuK36wG-TqVN+a^>g`5F9yr*~xE*F5 zO@s(LDH})UC!al9wWtYKOYEMzh1%P>)|Ura)wdkqDP+~&h<6VC`lO_RP3ka_h972+ z(p4)oRNfgh3%-|6nK}IIXrZ04=7-Qsf<)ORwkR(Mv==iIZ8>IuiD=>Vy8))m5+Qh# zkugKI-}_36Ll{d}#Kaos{n+A8oqA;vu^rqG@{B@eH)k!=&O}x_5)AT7k7l^dom~_1 zU%Q9$5msMTDlZ0-m8C$O+@Dv#PUv09qGjvV-R5afZ0dH1x%D}jWSNgERqpYo z4SHlmAX@Kc1W|RkIs2~2)_1S;Wdzo62U=!IXj)M$Zei#9)XwjxO(3NoVRY*%OI3~3 z(0#l6M~8d_!-=jN!Kj9WW!QxSd+v|-O!zxqGz_D6y)k?P&d^ zeh=&`1teqVKiAoSN|#@!h?7p<$)?f5cAy6PMwuhPrCBlWm5gr>0}=pP2l?jr?0fBI zepWs0Sa|+J8l{K-<`j3p6Fd^R=1&Bt{09dn(k2SS2D-ZMcB^+0=s+gsMoWDt#X0Zp zW_)UgB7oC*Jth)TSD`?cyLF{>+WkTsJ|$uV<%{3^4%>;56-q~sM&=)p@U5CG``fhh zpYJlu)EqP1<6Nc{UVk?|OqUUs^0xSjGq4b4?b4zqS>VI-Fc2U=|_;$^q3`|q?7a8Z2;!+vq>?btSUw~sdu}Jwied9ly8+ZSYx7dyQwL_S{peuOrTy!4&UeyIoZ0*NB*%#& zPKCe&=IOCoAbvnCU#Nc-lx7x8Vm>cS?juj;%`XWxC%nGaZX^ZPifX zWscTuXm$9F#EF8HYd1FA`Z@R;l!%xU@bHq}Rww}4+B*v8;s|6Cbyp%Al78}tX38JQ(3-OL5})=vb35DYI8M;voJwS^&Val- zW2Km;oYjnrE78mZCU_#UFUj}qwo^ubYuSAGYyZx;F37JePybyA|Fc(hy=@~8iN8Gz zBUa2Vr3SU6ThXYCsWgT8GnB~)_A8MFd=zOG-IHp5(_Kj4_vBNV$t0g0m#1?i0Ua~E z(u8@2^(FTWAo=zN(6Ou!qCjGzRgjw42(y~9{$m%9Ph8uznb#@xYI{Urs)3H`zXq4w zJkS#C@fWkB!gKz-?L^dbz2m{0_6W3YX01YqcZFvClcnUcY=Ig^IzpZCG0n3b4wGW1 z2|B}4PBjCVST3nFgIV2P{jap?7|%!;@CQ7$Fflt6_2DdF;(fJT1RqdHLBrWG=b7fl zTUH%MM0^|LB;PxpA#tIM+n(y2+_6raRXfI!xHGubu+ue=Gj*@nlNUU)As) zc~wPxVVy9zAQ5)0TwuGd11<87U3|@X=5XY_l(_q1Djr>{;a=Z+f?2%$GD$&3^;3wL z02id1-YCImj7ivOn#65*6co2q5V&fy`3KZJz#Z;> zJ!qaf58FD84Gpr$+cHlOOPg4yUCrQ#R2YTgmicA41m6Jmul9akmFa;$ZO;|lx$A+a`>~Aqr_r63p(cw>!A>Kx4wCHb zfSg}4h+ayojgK!UEsQXIlIpnpZ>#|0GkyRdH-&M%Ws!~BG>DUwjYUtFAB`kEBKPAo z$04iZY|NfZo-V2fmwmzv7`M=y^<%LIMbfSz_IP9E8Z)y!NU|vbK*Fk?ws&f&d1`EI zdL=L~@zvF&-YK_Ro^fu9_{s0IE59Slv+#7=KdD|3o8-IKK;!2uCEIfBGjAP>tl+>vv7xi+<852@b7TxednR z7vBKNFFqyC8vPx=wjl}*GAE?a%`DN8{g{te+B|9QE;C%xu@Qj8s`-XCd0C2prfSr2 zJ?%>LB%3Vt`(vvFma1KJwwf1?EsTP(voATER^!&6g&I}}_rmI9;E*!PNVZ!v?w;T? zcw@0@Gcn`&8PHG6T08$U@rv%j2Q{rVt{`ZS8B-y-b(n&_NMJ$@E;ub}a(cOW1G%oOPocC%gu|4@-N$<#Dp;qVdn|`Jba9oR8cE z{Rki6)^_<^WsLLJjXF&#TkuwUo;#bl4-&B_$|cGlx;PwlY{LSWkbF()hD@5;cB_em&1*7FAy;gz1o) zuI-aYp-$gd2T>>daDbz?3o7DGZL7ok&^#k#h8@J)9wA5l$(4O-EVJhgN}^8vE^AdvWJ#<^NEZ zgng+wO3KhWP={U9Hm=_JZu|_&b}^v5-G(Vs@!S^Lb9ao#x#C>vdekC})7T{n3z{@f zId*J&WC}Y__IFc|fBN@WK>9=+Eck>j+|2cZG;E9aXFIptg&PmO=2Jh}r=3(+oK++p zsC}dC2fA+$R1|$Of1!63yU#M>zMaz}0|(nkv!s`B%CMwJHp^IA zt}yy!Z3H_zk&;HTjZ%H>3Cco_wrC1(ezd-LL7?N1R+el9vAZ8^E-6uFOvb!X75odm z$?HDDl5hh=(hUdRC&1tS%#jpM`MHMSgESx{q=RZTl%)lEj}^D}N6h4FIorVaDhlkM zZaP^|60t=zMtDA7UeJF|CWatM%0Uzj#y{5tzmR4Y@T@SS*#F*|l=(PNB7zQ8g77i+ zEtZ>q`LpZddt}vFzI=09-g@cI_L?YRWn!G;*DE(2BjM&3|la z#%IU0U3_p`S&MnMJyNi)o7;4w)FjtZxA!g?GN9`1I zv9sgF(vGrmu|q&aaQw5RST;UXT;QsjzmclYF1!G>n#&q;2Ivuupfbt+23{0b5*9XM zy0-N=a)vBYS=D-OfasvB@KJ1_6~8>jba~xaSA)XVXCWla!#=A=VYDIs%Mnd`OE>SD z4%zh^*ty-nV}dUc{^hZuDr3f{+*gO=ztTQMd8f{lxP6F4U%}Xoel$|~^fTPIW_os& z7G8pI#^9!5j0i;qUE`g?eZG49vdhVNxVkm-*LMdMvI^Qz95!cVN`MGLY0$Z^ch#+g z=T1u7WaX>+d%6v{UKPWeH-u-cd(1QB7q=HERLcgEOQ2%=U*)z;Yad9U)tQSTJ)N57 zs+wBW=B|;a;m{}EgWCRkaVouwm*X%Tt%>ehvqGc(h;|MrjAxy|GJ9AN9OD4hO^hnH5dSkLeahfCj&v?<<_{}Oh{&UYeL*a&_5JZyYwSw@zL7wS2BVSgO=Y#5H88lT zl<8o>BEfvGX^$^a;>fb_z(8xj0?W0*{NzR5Onr-`Af|bEJ z>Mm0Y^1W{?i?e;qq7^En)NDPCC(`WXItjX8d}m-D;@~pN2!n%3Ne`fJfxta2dGQBeD&2N9b&hk};uTj)8zA|pKNKkwgXzz6wnw$BnGx&}F6hJ9Yn7>Pu z7^Z2io=^tWa_G1F8K&J&tEIrp*kVxe&igYtyAY;1cXyukvp^GdHWowpn{{@JlX|-7LNtb(SwwntvSelxTx^QiqN4u5GS(QePc0aRZ%G&%Puj3)Uj4AJAXnmaf~;m5RoO zI}!s`TJmoZ>UYzDyBD3T#Cib}ep#O%ASKmf6fG>_I1&$r>i zdPl!H{_(Sot^1lu@`vf1bGQqjlK~`c#4=p!P$CA-MRJ>ItZ{bxO7f^w106_1t%T&a zcpePh7goIi>Z?j`3u`?O$tumS=V(_y?^KNJNg6}?)sn7dNYU6Q#Ur=F(2;ayqhFP} zmobnVfUn=4rTf7lx@9bZqTcf4)wvV2LtCwMrPVArzUPbN=KFJjW8#k&iQ$>j!lQ=A zR6DVi!tT$n(|t5&ijOv~M{WTAqAb%M|Nl<07W)lZ$|#Sard>6K4%?Krkt;Gw{p+u> z8wm@kb>bS!!|M4ThEl2(3GxR*eDTT-_%fgOq!@ot)pUkQ1)l@zKiRR5qpr}bzql;a zHyzK~XTk#{)&en)A8V#wb2d}pd(+K3O*p`Q{QM!x(s)5E;Qr + if (text) { + internalComponent = {text} + } else if (mimeType && url) { + if (mimeType.startsWith("audio")) { + internalComponent =