diff --git a/docs/source/tutorials/model_client.rst b/docs/source/tutorials/model_client.rst
index 943cc574..438d34d3 100644
--- a/docs/source/tutorials/model_client.rst
+++ b/docs/source/tutorials/model_client.rst
@@ -1,3 +1,15 @@
+.. raw:: html
+
+
+
.. _tutorials-model_client:
ModelClient
@@ -268,6 +280,1238 @@ The output will be:
.. TODO: add optional package introduction here
+OPENAI EMBEDDER - Embedding Processing Example
+-------------------------------------------------
+
+In this example, we are using a collection of embeddings to demonstrate different functionalities such as calculating semantic similarity, finding nearest neighbors, and averaging embeddings. Below is the Python code used to achieve these tasks:
+
+.. code-block:: python
+
+ from typing import List
+ import numpy as np
+ from adalflow.core.types import ModelType, EmbedderOutput
+ from adalflow.components.model_client import OpenAIClient
+ from dataclasses import dataclass
+ from enum import Enum
+ from numpy.linalg import norm
+
+Data Classes
+
+We use two dataclass types to structure the collection and usage data:
+
+EmbeddingCollection: Stores an individual embedding collection and its corresponding index.
+Usage: Keeps track of token usage, such as prompt_tokens and total_tokens.
+
+.. code-block:: python
+
+ @dataclass
+ class EmbeddingCollection:
+ collection: List[float]
+ cindex: int
+
+
+ @dataclass
+ class Usage:
+ prompt_tokens: int
+ total_tokens: int
+
+The following function, `get_openai_embedding`, sends a request to the OpenAI API to retrieve embeddings for a given text. It sets the model type to `EMBEDDER`, prepares the required model-specific parameters, and processes the response:
+
+.. code-block:: python
+
+ openai_client = OpenAIClient()
+
+ def get_openai_embedding(text):
+ # Set model type to EMBEDDER for embedding functionality
+ model_type = ModelType.EMBEDDER
+
+ # Prepare input and model-specific parameters
+ input = text
+ model_kwargs = {
+ "model": "text-embedding-3-small",
+ "dimensions": 8,
+ "encoding_format": "float",
+ }
+
+ # Convert inputs to the required API format
+ api_kwargs = openai_client.convert_inputs_to_api_kwargs(
+ input=input, model_kwargs=model_kwargs, model_type=model_type
+ )
+ print(f"api_kwargs: {api_kwargs}") # Debug output to verify API arguments
+
+ # Call OpenAI API and parse response for embeddings
+ response = openai_client.call(api_kwargs=api_kwargs, model_type=model_type)
+ reponse_embedder_output = openai_client.parse_embedding_response(response)
+ print(
+ f"reponse_embedder_output: {reponse_embedder_output}"
+ ) # Debug output to verify embeddings
+ return reponse_embedder_output
+
+Embedding Processing
+
+The function process_embeddings takes in a collection of embeddings and provides utilities for calculating similarity, averaging embeddings, and finding nearest neighbors:
+
+Similarity: Measures the cosine similarity between two embeddings.
+Average Embedding: Computes the mean embedding across a set of embeddings.
+Nearest Neighbors: Identifies the top-k nearest neighbors based on cosine similarity.
+
+.. code-block:: python
+
+ def process_embeddings(embeddings_collection):
+ # Extract embedding data for each item in the collection
+ embeddingOutput = [emb.collection for emb in embeddings_collection]
+ embeddingDataList = [each_emb_out.data for each_emb_out in embeddingOutput]
+ embeddingList = [
+ each_item.embedding
+ for each_emb_data in embeddingDataList
+ for each_item in each_emb_data
+ ]
+
+ # Convert to numpy array for easier manipulation and calculations
+ embeddings_array = np.array(embeddingList)
+
+ def calculate_similarity(emb1, emb2):
+ # Compute cosine similarity between two embeddings
+ return np.dot(emb1, emb2) / (norm(emb1) * norm(emb2))
+
+ def get_average_embedding(embeddings_list):
+ # Calculate the mean embedding across a list of embeddings
+ return np.mean(embeddings_list, axis=0)
+
+ def find_nearest_neighbors(
+ query_index: int, embedding_list: List[List[float]], k: int = 5
+ ):
+ # Find top-k most similar embeddings to a query embedding, based on cosine similarity
+ query_embedding = embedding_list[query_index]
+ similarities = [
+ (i, calculate_similarity(query_embedding, emb))
+ for i, emb in enumerate(embedding_list)
+ if i != query_index
+ ]
+ return sorted(similarities, key=lambda x: x[1], reverse=True)[:k]
+
+ # Return dictionary of functions and processed data for further use
+ return {
+ "embeddings_array": embeddings_array,
+ "calculate_similarity": calculate_similarity,
+ "average_embedding": get_average_embedding,
+ "find_nearest_neighbors": find_nearest_neighbors,
+ }
+
+The function `demonstrate_embeddings_usage` showcases how to analyze semantic similarities, find nearest neighbors, and calculate average embeddings for sample texts. It selects random texts, compares their similarities, finds nearest neighbors for a specific query, and compares average embeddings for texts containing "Paris".
+
+.. code-block:: python
+
+ # Demonstrate embeddings usage with sample data
+ def demonstrate_embeddings_usage(sample_embeddings, input_text_list):
+ # Initialize processor and retrieve embeddings array
+ processor = process_embeddings(sample_embeddings)
+ embeddings = processor["embeddings_array"]
+
+ print("1. Analyzing Semantic Similarities:")
+ print("-" * 50)
+
+ # Select a few random indices for similarity testing
+ num_indices = 5
+ assert len(input_text_list) == len(embeddings)
+ indices = np.random.choice(len(input_text_list), num_indices, replace=False)
+ selected_text = np.array(input_text_list)[indices]
+ selected_embeddings = np.array(embeddings)[indices]
+
+ # Display selected texts and their embeddings
+ print("Selected indices:", indices)
+ print("Selected elements from array1:", selected_text)
+ print("Selected elements from array2:", selected_embeddings)
+
+ # Calculate similarity between each pair of selected texts
+ for i in range(len(selected_text)):
+ for j in range(i + 1, len(selected_text)):
+ similarity = processor["calculate_similarity"](
+ selected_embeddings[i], selected_embeddings[j]
+ )
+ print(f"\nComparing:\n'{selected_text[i]}' \nwith:\n'{selected_text[j]}'")
+ print(f"Similarity score: {similarity:.4f}")
+
+ print("\n2. Finding Nearest Neighbors:")
+ print("-" * 50)
+
+ # Find and display the 3 nearest neighbors for the first text
+ query_idx = 0
+ neighbors = processor["find_nearest_neighbors"](query_idx, embeddings, k=3)
+ print(f"\nQuery text: '{input_text_list[query_idx]}'")
+ print("\nNearest neighbors:")
+
+ for idx, similarity in neighbors:
+ print(f"- '{input_text_list[idx]}' (similarity: {similarity:.4f})")
+
+ print("\n3. Using Average Embeddings:")
+ print("-" * 50)
+
+ # Calculate and compare the average embedding for texts containing "Paris"
+ paris_indices = [i for i, text in enumerate(input_text_list) if "Paris" in text]
+ paris_embeddings = embeddings[paris_indices]
+ avg_paris_embedding = processor["average_embedding"](paris_embeddings)
+
+ print("\nComparing average 'Paris' embedding with all texts:")
+ for i, text in enumerate(input_text_list):
+ similarity = processor["calculate_similarity"](
+ avg_paris_embedding, embeddings[i]
+ )
+ print(f"- '{text}' (similarity: {similarity:.4f})")
+
+
+Running the Model Client
+
+Finally, we run the model client by initializing a set of sample texts, generating their embeddings, and using the embedding processing functions to analyze similarities and neighbors.
+
+.. code-block:: python
+
+ def run_model_client_embedding_usage():
+ # Define a set of sample texts to test embedding and similarity functionalities
+ sample_texts = [
+ "What is the capital of France?",
+ "Paris is the capital of France.",
+ "What is the population of France?",
+ "How big is Paris?",
+ "What is the weather like in Paris?",
+ ]
+
+ # Duplicate each sample text to form an input list with repeated entries (for embedding testing)
+ input_text_list = [text for text in sample_texts for _ in range(2)]
+
+ # Generate embeddings for each text in the input list, and store them in an EmbeddingCollection
+ embeddings_collection = [
+ EmbeddingCollection(collection=get_openai_embedding(text), cindex=i)
+ for i, text in enumerate(input_text_list)
+ ]
+ print(
+ embeddings_collection
+ ) # Debugging output to verify embeddings collection content
+
+ # Demonstrate the usage of embeddings by analyzing similarities, finding neighbors, etc.
+ demonstrate_embeddings_usage(embeddings_collection, input_text_list)
+
+To execute the complete example, simply call the `run_model_client_embedding_usage()` function:
+
+.. code-block:: python
+
+ run_model_client_embedding_usage()
+
+
+This will trigger the embedding retrieval and processing functions, and you will see the results printed out, demonstrating how embeddings can be used for similarity analysis, neighbor finding, and averaging.
+
+OPENAI LLM Chat - Multichat Usage
+-------------------------------------------------
+This example demonstrates how to create a multichat system using OpenAI's LLM with adalflow, where the assistant's responses depend on the entire conversation history. This allows for a more dynamic and context-aware conversation flow.
+
+.. code-block:: python
+
+ from adalflow.components.model_client import OpenAIClient
+ from adalflow.core.types import ModelType
+ from adalflow.utils import setup_env
+ from typing import List, Dict
+
+ChatConversation Class
+
+Here, we define a ``ChatConversation`` class to manage the conversation history and make API calls to the OpenAI model. The assistant's responses are generated based on the entire conversation history.
+
+.. code-block:: python
+
+ class ChatConversation:
+ def __init__(self):
+ # Initialize the OpenAI client for managing API calls
+ self.openai_client = OpenAIClient()
+ # Initialize an empty conversation history to store chat messages
+ self.conversation_history: str = ""
+ # Model parameters to customize the API call
+ self.model_kwargs = {
+ "model": "gpt-3.5-turbo",
+ "temperature": 0.5, # Controls randomness; 0.5 for balanced responses
+ "max_tokens": 100, # Limits the response length
+ }
+
+ def add_user_message(self, message: str):
+ """Add a user message to the conversation history"""
+ self.conversation_history += (
+ f" {message} " # Format for user message
+ )
+
+ def add_assistant_message(self, message: str):
+ """Add an assistant message to the conversation history"""
+ self.conversation_history += (
+ f" {message} " # Format for assistant message
+ )
+
+ def get_response(self) -> str:
+ """Get response from the model based on conversation history"""
+ # Convert the conversation history and model parameters into API arguments
+ api_kwargs = self.openai_client.convert_inputs_to_api_kwargs(
+ input=self.conversation_history,
+ model_kwargs=self.model_kwargs,
+ model_type=ModelType.LLM,
+ )
+ print(f"api_kwargs: {api_kwargs}") # Debugging output to verify API parameters
+
+ # Call the API with the generated arguments to get a response
+ response = self.openai_client.call(
+ api_kwargs=api_kwargs, model_type=ModelType.LLM
+ )
+ print("response: ", response) # Debugging output for raw API response
+
+ # Extract and parse the text response from the API output
+ response_text = self.openai_client.parse_chat_completion(response)
+ # Update conversation history with the assistant's response
+ self.add_assistant_message(response_text)
+ return response_text # Return the assistant's response to the caller
+
+Simulating a Multi-turn Conversation
+
+In the ``check_chat_conversation()`` function, we simulate a multi-turn conversation by iterating over a list of user questions. Each question is added to the conversation history, and the assistant responds based on the accumulated conversation context.
+
+.. code-block:: python
+
+ def check_chat_conversation():
+ # Initialize a new chat conversation
+ chat = ChatConversation()
+
+ # Example list of user questions to simulate a multi-turn conversation
+ questions = [
+ "What is the capital of France?",
+ "What is its population?",
+ "Tell me about its famous landmarks",
+ ]
+
+ # Iterate through each question in the list
+ for question in questions:
+ print(f"\nUser: {question}") # Display the user's question
+ chat.add_user_message(
+ question
+ ) # Add the user question to the conversation history
+
+ response = (
+ chat.get_response()
+ ) # Get assistant's response based on conversation history
+ print(f"Assistant: {response}") # Display the assistant's response
+
+ # Display the full conversation history after all exchanges
+ print("\nFull Conversation History:")
+ print(chat.conversation_history) # Print the accumulated conversation history
+
+Key Points
+You can observe that each question is depended on previous question and the chat responds in apt manner
+check_chat_conversation()
+
+OPENAI LLM Chat - Multichat Usage - Asynchronous
+-------------------------------------------------
+
+This example demonstrates how to create an asynchronous multichat system using OpenAI's LLM with adalflow. The asynchronous approach allows handling multiple questions in parallel, making the interaction more efficient when dealing with unrelated queries.
+
+.. code-block:: python
+
+ import asyncio
+ from adalflow.components.model_client import OpenAIClient
+ from adalflow.core.types import ModelType
+ from typing import List
+
+ChatConversationAsync Class
+
+The ``ChatConversationAsync`` class is designed to handle asynchronous API calls to the OpenAI model. It supports concurrent requests, which improves performance when interacting with multiple questions simultaneously.
+
+.. code-block:: python
+
+ class ChatConversationAsync:
+ def __init__(self):
+ # Initialize with an asynchronous OpenAI client
+ self.openai_client = OpenAIClient()
+
+ # Default model parameters for the chat
+ self.model_kwargs = {
+ "model": "gpt-3.5-turbo", # Model used for chat
+ "temperature": 0.5, # Controls randomness in response
+ "max_tokens": 100, # Maximum tokens in the generated response
+ }
+
+ async def get_response(self, message: str) -> str:
+ """Asynchronously get a response from the model for a given user message"""
+
+ # Convert input message and model parameters into the format expected by the API
+ api_kwargs = self.openai_client.convert_inputs_to_api_kwargs(
+ input=message, # User's message input
+ model_kwargs=self.model_kwargs, # Model-specific settings
+ model_type=ModelType.LLM, # Specify the model type as a language model (LLM)
+ )
+ print(f"api_kwargs: {api_kwargs}") # Log the API arguments for debugging
+
+ # Make an asynchronous API call to OpenAI's model
+ response = await self.openai_client.acall(
+ api_kwargs=api_kwargs, # Pass the prepared arguments
+ model_type=ModelType.LLM, # Specify the model type again
+ )
+ print("response: ", response) # Print the raw response from the API
+
+ # Parse the API response to extract the assistant's reply (chat completion)
+ response_text = self.openai_client.parse_chat_completion(response)
+ return response_text # Return the parsed response text
+
+Running Multiple Asynchronous Chat Sessions
+
+In the ``check_chat_conversations_async()`` function, we handle a list of unrelated user questions concurrently. This is done by creating a list of asynchronous tasks and gathering their responses.
+
+.. code-block:: python
+
+ async def check_chat_conversations_async():
+ # Create an instance of ChatConversationAsync to handle asynchronous operations
+ chat = ChatConversationAsync()
+
+ # List of unrelated questions that will be handled in parallel
+ questions = [
+ "What is the capital of France?", # Question 1
+ "Is dog a wild animal?", # Question 2
+ "Tell me about amazon forest", # Question 3
+ ]
+
+ # Create a list of asynchronous tasks, one for each question
+ # Each task calls the get_response method asynchronously for a question
+ tasks = [chat.get_response(question) for question in questions]
+
+ # Gather the results of all asynchronous tasks concurrently
+ responses = await asyncio.gather(*tasks)
+
+ # Print the responses from the assistant along with the respective user questions
+ for question, response in zip(questions, responses):
+ print(f"\nUser: {question}")
+ print(f"Assistant: {response}")
+
+Running the Asynchronous Function
+
+To execute the asynchronous function, you can use the following methods based on your environment:
+
+.. code-block:: python
+
+ # Run the asynchronous function if in a file
+ # asyncio.run(check_chat_conversations_async())
+
+ # in jupyter notebook
+ await check_chat_conversations_async()
+
+This approach allows you to handle multiple independent conversations concurrently, improving the system's performance and responsiveness.
+
+OPENAI LLM Chat - Multichat Usage - Benchmark sync() vs async()
+---------------------------------------------------------------------
+
+This section compares the performance of synchronous (``call()``) vs. asynchronous (``acall()``) API calls to OpenAI's language model, benchmarking them using a sample prompt to determine which approach is more efficient for handling multiple API requests.
+
+.. code-block:: python
+
+ import asyncio
+ import time
+ from adalflow.components.model_client import (
+ OpenAIClient,
+ ) # Assuming OpenAIClient with .call() and .acall() is available
+ from adalflow.core.types import ModelType
+
+Setup for Benchmarking
+
+We initialize the OpenAI client and set up a sample prompt to test both synchronous and asynchronous API calls.
+
+.. code-block:: python
+
+ # Initialize the OpenAI client
+ openai_client = OpenAIClient()
+
+ # Sample prompt for testing
+ prompt = "Tell me a joke."
+
+ model_kwargs = {"model": "gpt-3.5-turbo", "temperature": 0.5, "max_tokens": 100}
+
+Synchronous Benchmarking
+
+The ``benchmark_sync_call`` function runs the synchronous ``.call()`` method multiple times and measures the total time taken for all requests.
+
+.. code-block:: python
+
+ # Synchronous function for benchmarking .call()
+ def benchmark_sync_call(api_kwargs, runs=10):
+ """
+ Benchmark the synchronous .call() method by running it multiple times.
+
+ Parameters:
+ - api_kwargs: The arguments to be passed to the API call
+ - runs: The number of times to run the call (default is 10)
+ """
+ # List to store responses
+ responses = []
+
+ # Record the start time of the benchmark
+ start_time = time.time()
+
+ # Perform synchronous API calls for the specified number of runs
+ responses = [
+ openai_client.call(
+ api_kwargs=api_kwargs, # API arguments
+ model_type=ModelType.LLM, # Model type (e.g., LLM for language models)
+ )
+ for _ in range(runs) # Repeat 'runs' times
+ ]
+
+ # Record the end time after all calls are completed
+ end_time = time.time()
+
+ # Output the results of each synchronous call
+ for i, response in enumerate(responses):
+ print(f"sync call {i + 1} completed: {response}")
+
+ # Print the total time taken for all synchronous calls
+ print(f"\nSynchronous benchmark completed in {end_time - start_time:.2f} seconds")
+
+
+ # Asynchronous function for benchmarking .acall()
+ async def benchmark_async_acall(api_kwargs, runs=10):
+ """
+ Benchmark the asynchronous .acall() method by running it multiple times concurrently.
+
+ Parameters:
+ - api_kwargs: The arguments to be passed to the API call
+ - runs: The number of times to run the asynchronous call (default is 10)
+ """
+ # Record the start time of the benchmark
+ start_time = time.time()
+
+ # Create a list of asynchronous tasks for the specified number of runs
+ tasks = [
+ openai_client.acall(
+ api_kwargs=api_kwargs, # API arguments
+ model_type=ModelType.LLM, # Model type (e.g., LLM for language models)
+ )
+ for _ in range(runs) # Repeat 'runs' times
+ ]
+
+ # Execute all tasks concurrently and wait for them to finish
+ responses = await asyncio.gather(*tasks)
+
+ # Record the end time after all tasks are completed
+ end_time = time.time()
+
+ # Output the results of each asynchronous call
+ for i, response in enumerate(responses):
+ print(f"Async call {i + 1} completed: {response}")
+
+ # Print the total time taken for all asynchronous calls
+ print(f"\nAsynchronous benchmark completed in {end_time - start_time:.2f} seconds")
+
+.. code-block:: python
+
+ api_kwargs = openai_client.convert_inputs_to_api_kwargs(
+ input=prompt, model_kwargs=model_kwargs, model_type=ModelType.LLM
+ )
+
+ # Run both benchmarks
+ print("Starting synchronous benchmark...\n")
+ benchmark_sync_call(api_kwargs)
+
+ # Run the asynchronous function if in a file
+ # asyncio.run(benchmark_async_acall(api_kwargs))
+
+ print("\nStarting asynchronous benchmark...\n")
+ await benchmark_async_acall(api_kwargs)
+
+OPENAI LLM Chat - Additional Utils
+-------------------------------------------------
+
+This section demonstrates the use of additional utility functions for OpenAI's language model client. The following utility functions are included:
+
+- ``get_first_message_content()``
+- ``get_all_messages_content()``
+- ``get_probabilities()``
+
+These utilities can be used to interact with the OpenAI model in various ways, such as extracting the first message content, retrieving all message content from a multi-chat scenario, and calculating the probabilities of tokens.
+
+Code Setup
+
+First, we import necessary components for utilizing the OpenAI client and the utilities from the ``adalflow`` library.
+
+.. code-block:: python
+
+ from adalflow.components.model_client import OpenAIClient
+ from adalflow.core.types import ModelType
+ from adalflow.utils import setup_env
+ from adalflow.components.model_client.openai_client import (
+ get_first_message_content,
+ get_all_messages_content,
+ get_probabilities,
+ )
+ from adalflow.core import Generator
+
+Function: ``check_openai_additional_utils``
+
+This function demonstrates how to use the OpenAI client along with a custom utility function for generating responses from the model, based on the given query and utility function.
+
+.. code-block:: python
+
+ def check_openai_additional_utils(func, model_kwargs):
+ """
+ This function demonstrates the usage of the OpenAI client and a custom utility function
+ for generating responses from the LLM model, based on the given query in openai client.
+
+ Parameters:
+ - func: A function that will be used to parse the chat completion (for custom parsing).
+ - model_kwargs: The additional model parameters (e.g., temperature, max_tokens) to be used in the model.
+
+ Returns:
+ - output: The generated response from the model based on the query.
+ """
+
+ # Initialize the OpenAI client with a custom chat completion parser
+ openai_client = OpenAIClient(chat_completion_parser=func)
+
+ # Define a sample query (user question)
+ query = "What is the capital of France?"
+
+ # Set the model type to LLM (Large Language Model)
+ model_type = ModelType.LLM
+
+ # Create the prompt by formatting the user query as a conversation
+ prompt = f"User: {query}\n"
+
+ # Define any additional parameters needed for the model (e.g., the input string)
+ prompt_kwargs = {
+ "input_str": "What is the capital of France?",
+ }
+
+ # Initialize the Generator with the OpenAI client and model parameters
+ generator = Generator(model_client=openai_client, model_kwargs=model_kwargs)
+
+ # Execute the generator to get a response for the prompt (using the defined prompt_kwargs)
+ output = generator(prompt_kwargs=prompt_kwargs)
+
+ # Return the generated output (response from the LLM)
+ return output
+
+Function: ``run_utils_functions``
+
+This function runs a series of utility functions using different model configurations for generating responses. It demonstrates how to check OpenAI model outputs using various utility functions.
+
+.. code-block:: python
+
+ def run_utils_functions():
+ """
+ This function runs a series of utility functions using different model
+ configurations for generating responses. It demonstrates how to check
+ OpenAI model outputs using various utility functions.
+ """
+
+ # Define the model arguments for the probability-based function (with logprobs)
+ probability_model_kwargs = {
+ "model": "gpt-3.5-turbo", # Specify the model version
+ "logprobs": True, # Enable logprobs to get probability distributions for tokens
+ "n": 2, # Request 2 different completions for each query
+ }
+
+ # Define general model arguments for most other functions
+ model_kwargs = {
+ "model": "gpt-3.5-turbo", # Specify the model version
+ "temperature": 0.5, # Control the randomness of responses (0 is deterministic)
+ "max_tokens": 100, # Set the maximum number of tokens (words) in the response
+ }
+
+ # List of functions to run with corresponding model arguments
+ func_list = [
+ [
+ get_probabilities,
+ probability_model_kwargs,
+ ], # Function to get probabilities with specific kwargs
+ [
+ get_first_message_content,
+ model_kwargs,
+ ], # Function to get first message content
+ [
+ get_all_messages_content,
+ model_kwargs,
+ ], # Function to get all messages content in multi-chat scenarios
+ ]
+
+ # Loop through each function and its corresponding arguments
+ for each_func in func_list:
+ # Check the function output using the specified arguments
+ result = check_openai_additional_utils(each_func[0], each_func[1])
+
+ # Print the function and result for debugging purposes
+ print(f"Function: {each_func[0].__name__}, Model Args: {each_func[1]}")
+ print(f"Result: {result}")
+
+Running the Utility Functions
+
+To execute the utility functions, we call the ``run_utils_functions()`` method, which runs the defined functions and prints their results.
+
+.. code-block:: python
+
+ run_utils_functions()
+
+Purpose and Usage
+These utilities (``get_first_message_content``, ``get_all_messages_content``, and ``get_probabilities``) allow users to extract specific information from the OpenAI LLM responses, such as individual message contents in a chat or the probability distribution over tokens.
+
+
+Groq LLM Chat - Multichat Usage
+-------------------------------------------------
+
+Note: Groq doesnt have embedder method to get embeddings like openai
+
+The following example demonstrates how to set up a multi-turn conversation with the Groq LLM using the ``GroqAPIClient``.
+
+.. code-block:: python
+
+ from adalflow.components.model_client import GroqAPIClient
+ from adalflow.core.types import ModelType
+ from adalflow.utils import setup_env
+ from typing import List, Dict
+
+ChatConversation Class
+
+This class handles the conversation flow by interacting with the Groq model, keeping track of the conversation history, and generating responses.
+
+.. code-block:: python
+
+ class ChatConversation:
+ def __init__(self):
+ """
+ Initialize a new ChatConversation object.
+ - GroqAPIClient is used to interact with the Groq model.
+ - conversation_history keeps track of the conversation between the user and assistant.
+ - model_kwargs contains the model parameters like temperature and max tokens.
+ """
+ self.groq_client = (
+ GroqAPIClient()
+ ) # Initialize GroqAPIClient for model interaction
+ self.conversation_history: str = (
+ "" # Initialize conversation history as an empty string
+ )
+ self.model_kwargs = {
+ "model": "llama3-8b-8192", # Specify the model to use
+ "temperature": 0.5, # Set the temperature for response variability
+ "max_tokens": 100, # Limit the number of tokens in the response
+ }
+
+ def add_user_message(self, message: str):
+ """
+ Add a user message to the conversation history in the required format.
+ The message is wrapped with tags for better processing by the assistant.
+ """
+ self.conversation_history += (
+ f" {message} " # Append user message to history
+ )
+
+ def add_assistant_message(self, message: str):
+ """
+ Add an assistant message to the conversation history in the required format.
+ The message is wrapped with tags for better processing.
+ """
+ self.conversation_history += (
+ f" {message} " # Append assistant message to history
+ )
+
+ def get_response(self) -> str:
+ """
+ Generate a response from the assistant based on the conversation history.
+ - Converts the conversation history and model kwargs into the format required by the Groq API.
+ - Calls the API to get the response.
+ - Parses and adds the assistant's reply to the conversation history.
+ """
+ # Prepare the request for the Groq API, converting the inputs into the correct format
+ api_kwargs = self.groq_client.convert_inputs_to_api_kwargs(
+ input=self.conversation_history, # Use the conversation history as input
+ model_kwargs=self.model_kwargs, # Include model-specific parameters
+ model_type=ModelType.LLM, # Specify the model type (Large Language Model)
+ )
+ print(f"api_kwargs: {api_kwargs}") # Log the API request parameters
+
+ # Call the Groq model API to get the response
+ response = self.groq_client.call(
+ api_kwargs=api_kwargs,
+ model_type=ModelType.LLM, # Specify the model type again for clarity
+ )
+ print("response: ", response) # Log the API response
+
+ # Parse the response to extract the assistant's reply
+ response_text = self.groq_client.parse_chat_completion(response)
+
+ # Add the assistant's message to the conversation history
+ self.add_assistant_message(response_text)
+
+ # Return the assistant's response text
+ return response_text
+
+Example Multi-Turn Conversation
+
+The following function simulates a multi-turn conversation, where the user asks a series of questions and the assistant responds. It demonstrates how user inputs are processed, and responses are generated while maintaining the conversation history.
+
+.. code-block:: python
+
+ def check_chat_conversation():
+ """
+ This function simulates a multi-turn conversation between a user and an assistant.
+ It demonstrates how user inputs are processed, and the assistant generates responses,
+ while maintaining the conversation history for each query.
+ """
+ # Initialize the ChatConversation object
+ chat = ChatConversation() # This creates an instance of the ChatConversation class
+
+ # Define a list of user questions for a multi-turn conversation
+ questions = [
+ "What is the capital of France?", # First user question
+ "What is its population?", # Second user question
+ "Tell me about its famous landmarks", # Third user question
+ ]
+
+ # Loop through each question and get the assistant's response
+ for question in questions:
+ # Print the current question from the user
+ print(f"\nUser: {question}")
+
+ # Add the user's message to the conversation history
+ chat.add_user_message(question)
+
+ # Get the assistant's response based on the conversation history
+ response = chat.get_response()
+
+ # Print the assistant's response
+ print(f"Assistant: {response}")
+
+ # After the conversation, print the full conversation history
+ print("\nFull Conversation History:")
+ print(
+ chat.conversation_history
+ ) # This will print all messages (user and assistant) in the conversation history
+
+Run the following to use groq_client multichat ability
+
+.. code-block:: python
+
+ check_chat_conversation()
+
+Groq LLM Chat - Multichat Usage - Asynchronous
+-------------------------------------------------
+
+This example demonstrates how to perform multi-turn conversations with the Groq LLM using asynchronous calls for each query. It uses Python's `asyncio` to handle multiple independent requests concurrently.
+
+.. code-block:: python
+
+ import asyncio
+ from adalflow.components.model_client import GroqAPIClient
+ from adalflow.core.types import ModelType
+ from typing import List
+
+ChatConversation Class
+
+This class allows you to interact asynchronously with the Groq model. The get_response method fetches responses from the model for a single user input asynchronously.
+
+.. code-block:: python
+
+ class ChatConversation:
+ def __init__(self):
+ # Using an asynchronous client for communication with GroqAPI
+ self.groq_client = GroqAPIClient() # Create an instance of GroqAPIClient
+ # Model configuration parameters (e.g., Llama model with 8b parameters and 8192 context length)
+ self.model_kwargs = {
+ "model": "llama3-8b-8192", # Llama model with specific size
+ "temperature": 0.5, # Degree of randomness in the model's responses
+ "max_tokens": 100, # Maximum number of tokens in the response
+ }
+
+ async def get_response(self, message: str) -> str:
+ """Get response from the model for a single message asynchronously"""
+
+ # Convert the user input message to the appropriate format for the Groq API
+ api_kwargs = self.groq_client.convert_inputs_to_api_kwargs(
+ input=message, # User's input message
+ model_kwargs=self.model_kwargs, # Model parameters
+ model_type=ModelType.LLM, # Model type for large language models (LLM)
+ )
+ print(f"api_kwargs: {api_kwargs}") # Print the API arguments for debugging
+
+ # Asynchronously call the Groq API with the provided API arguments
+ response = await self.groq_client.acall(
+ api_kwargs=api_kwargs, # Pass the API arguments
+ model_type=ModelType.LLM, # Specify the model type
+ )
+ print("response: ", response) # Print the API response for debugging
+
+ # Parse the response to extract the assistant's reply from the API response
+ response_text = self.groq_client.parse_chat_completion(response)
+ return response_text # Return the assistant's response text
+
+Example Asynchronous Multi-Turn Conversation
+
+The following function demonstrates how multiple independent questions are handled asynchronously. Each question is processed concurrently, and their responses are gathered using asyncio.gather.
+
+.. code-block:: python
+
+ async def check_chat_conversations():
+ # Create an instance of ChatConversation
+ chat = ChatConversation()
+
+ # List of unrelated questions for independent async calls
+ questions = [
+ "What is the capital of France?",
+ "Is dog a wild animal ?",
+ "Tell me about amazon forest",
+ ]
+
+ # Run each question as an independent asynchronous task
+ tasks = [chat.get_response(question) for question in questions]
+ # Gather all the responses concurrently
+ responses = await asyncio.gather(*tasks)
+
+ # Display each response alongside the question
+ for question, response in zip(questions, responses):
+ print(f"\nUser: {question}")
+ print(f"Assistant: {response}")
+
+To execute the function, run the following:
+
+.. code-block:: python
+
+ # Run the asynchronous function if in a file
+ # asyncio.run(check_chat_conversations())
+
+ await check_chat_conversations()
+
+Groq LLM Chat - Multichat Usage - Benchmark sync() vs async()
+-----------------------------------------------------------------
+
+This example demonstrates how to benchmark the synchronous ``.call()`` method versus the asynchronous ``.acall()`` method for making API calls using Groq. The benchmark compares the time taken to execute multiple API requests synchronously and asynchronously.
+
+
+.. code-block:: python
+
+ import asyncio
+ import time
+ from adalflow.components.model_client import (
+ GroqAPIClient,
+ ) # Assuming GroqAPI with .call() and .acall() is available
+ from adalflow.core.types import ModelType
+
+Initialization
+
+The following code initializes the Groq client and sets up the sample prompt and model parameters for testing.
+
+.. code-block:: python
+
+ # Initialize the Groq client
+ groq_client = GroqAPIClient()
+
+ # Sample prompt for testing
+ prompt = "Tell me a joke."
+
+ model_kwargs = {"model": "llama3-8b-8192", "temperature": 0.5, "max_tokens": 100}
+
+Benchmarking Synchronous `.call()` Method
+
+This function benchmarks the synchronous `.call()` method by calling the Groq API synchronously multiple times.
+
+.. code-block:: python
+
+ # Synchronous function for benchmarking .call()
+ def benchmark_sync_call(api_kwargs, runs=10):
+ # List to store responses from each synchronous call
+ responses = []
+
+ # Record the start time for benchmarking
+ start_time = time.time()
+
+ # Perform synchronous API calls in a loop
+ responses = [
+ groq_client.call( # Calling the API synchronously
+ api_kwargs=api_kwargs, # Passing the API arguments
+ model_type=ModelType.LLM, # Defining the model type
+ )
+ for _ in range(runs) # Repeat the call 'runs' times
+ ]
+
+ # Record the end time after all calls are completed
+ end_time = time.time()
+
+ # Print out the response from each synchronous call
+ for i, response in enumerate(responses):
+ print(f"sync call {i + 1} completed: {response}")
+
+ # Print the total time taken for the synchronous benchmark
+ print(f"\nSynchronous benchmark completed in {end_time - start_time:.2f} seconds")
+
+Benchmarking Asynchronous ``.acall()`` Method
+
+This asynchronous function benchmarks the ``.acall()`` method by calling the Groq API asynchronously multiple times using asyncio.gather() to execute tasks concurrently.
+
+.. code-block:: python
+
+ # Asynchronous function for benchmarking .acall()
+ async def benchmark_async_acall(api_kwargs, runs=10):
+ # Record the start time for benchmarking
+ start_time = time.time()
+
+ # Create a list of tasks for asynchronous API calls
+ tasks = [
+ groq_client.acall( # Calling the API asynchronously
+ api_kwargs=api_kwargs, # Passing the API arguments
+ model_type=ModelType.LLM, # Defining the model type
+ )
+ for _ in range(runs) # Repeat the call 'runs' times
+ ]
+
+ # Await the completion of all tasks concurrently
+ responses = await asyncio.gather(
+ *tasks
+ ) # Gather all the responses from asynchronous calls
+
+ # Record the end time after all asynchronous calls are completed
+ end_time = time.time()
+
+ # Print out the response from each asynchronous call
+ for i, response in enumerate(responses):
+ print(f"Async call {i + 1} completed: {response}")
+
+ # Print the total time taken for the asynchronous benchmark
+ print(f"\nAsynchronous benchmark completed in {end_time - start_time:.2f} seconds")
+
+Running the Benchmarks
+
+The following code sets up the API arguments and runs both the synchronous and asynchronous benchmarks.
+
+.. code-block:: python
+
+ api_kwargs = groq_client.convert_inputs_to_api_kwargs(
+ input=prompt, model_kwargs=model_kwargs, model_type=ModelType.LLM
+ )
+
+ # Run both benchmarks
+ print("Starting synchronous benchmark...\n")
+ benchmark_sync_call(api_kwargs)
+
+ print("\nStarting asynchronous benchmark...\n")
+ await benchmark_async_acall(api_kwargs)
+
+Building Custom Model client
+-------------------------------------------------
+
+Building a Synchronous api call
+
+Note: I am using openai api as a example to build custom model client
+in adalflow. Even though its already there in adalflow repo below
+code will definitly be a starter code whom ever wants to build a
+custom model client
+
+.. code-block:: python
+
+ # Building simple custom third party model client and using it
+ # I have modified convert_inputs_to_api_kwargs() to make sure it follows the prompt of openai and i have used appropiate
+ # openai api call in __call__()
+
+ import openai
+ from adalflow.core.model_client import ModelClient
+ from adalflow.core.types import ModelType, GeneratorOutput, EmbedderOutput
+ from openai.types import (
+ CreateEmbeddingResponse,
+ )
+ from adalflow.components.model_client.utils import parse_embedding_response
+
+This class defines the custom model client. The constructor initializes the client by calling the parent class’s initializer (ModelClient), which is essential for the setup of the Adalflow framework.
+
+.. code-block:: python
+
+ class SimpleCustomModelClient(ModelClient):
+ # Initialize the custom model client
+ def __init__(self):
+ # Call the parent class's initializer
+ super().__init__()
+ pass # Placeholder for any initialization logic if needed in the future
+
+ # Method to convert input into API parameters for different model types (LLM or Embedder)
+ def convert_inputs_to_api_kwargs(
+ self, input=None, model_kwargs={}, model_type=ModelType.UNDEFINED
+ ):
+ """
+ Convert the inputs into API arguments based on the model type.
+
+ Args:
+ input (str): The input text to be processed.
+ model_kwargs (dict): Additional model parameters like temperature, max_tokens, etc.
+ model_type (ModelType): The type of model to use (LLM or Embedder).
+
+ Returns:
+ dict: API arguments formatted for the specified model type.
+ """
+ if (
+ model_type == ModelType.LLM
+ ): # If the model type is a large language model (LLM)
+ return {
+ "model": model_kwargs[
+ "model"
+ ], # Set the model to use (e.g., GPT-3, GPT-4)
+ "messages": input, # Provide the input as the message
+ "temperature": model_kwargs[
+ "temperature"
+ ], # Set the temperature (creativity of the response)
+ "max_tokens": model_kwargs[
+ "max_tokens"
+ ], # Max tokens to generate in the response
+ }
+ elif model_type == ModelType.EMBEDDER: # If the model type is an embedder
+ return {
+ "model": model_kwargs["model"], # Model name for embedding
+ "input": [input], # Provide the input in a list format for embedding
+ }
+ else:
+ # Raise an error if the model type is unsupported
+ raise ValueError(f"model_type {model_type} is not supported")
+
+ # Method to make the actual API call to OpenAI for either completions (LLM) or embeddings
+ def call(self, api_kwargs={}, model_type=ModelType.UNDEFINED):
+ """
+ Call the appropriate OpenAI API method based on the model type (LLM or Embedder).
+
+ Args:
+ api_kwargs (dict): Arguments to be passed to the API call.
+ model_type (ModelType): The type of model (LLM or Embedder).
+
+ Returns:
+ Response: The API response from OpenAI.
+ """
+ if model_type == ModelType.LLM: # If the model type is LLM (e.g., GPT-3, GPT-4)
+ return openai.chat.completions.create(
+ **api_kwargs
+ ) # Call the chat API for completion
+ elif model_type == ModelType.EMBEDDER: # If the model type is Embedder
+ return openai.embeddings.create(**api_kwargs) # Call the embedding API
+ else:
+ # Raise an error if an invalid model type is passed
+ raise ValueError(f"Unsupported model type: {model_type}")
+
+ # Method to parse the response from a chat completion API call
+ def parse_chat_completion(self, completion):
+ """
+ Parse the response from a chat completion API call into a custom output format.
+
+ Args:
+ completion: The completion response from the OpenAI API.
+
+ Returns:
+ GeneratorOutput: A custom data structure containing the parsed response.
+ """
+ # Note: GeneratorOutput is a adalflow dataclass that contains the parsed completion data
+ return GeneratorOutput(
+ data=completion, # Store the raw completion data
+ error=None, # No error in this case
+ raw_response=str(completion), # Store the raw response as a string
+ )
+
+ # Method to parse the response from an embedding API call
+ def parse_embedding_response(
+ self, response: CreateEmbeddingResponse
+ ) -> EmbedderOutput:
+ """
+ Parse the response from an embedding API call into a custom output format.
+
+ Args:
+ response (CreateEmbeddingResponse): The response from the embedding API.
+
+ Returns:
+ EmbedderOutput: A custom data structure containing the parsed embedding response.
+ """
+ try:
+ # Attempt to parse the embedding response using a helper function
+ return parse_embedding_response(response)
+ except Exception as e:
+ # If parsing fails, return an error message with the raw response
+ return EmbedderOutput(data=[], error=str(e), raw_response=response)
+
+In below block, the custom model client is instantiated, and a query is defined for processing by both an LLM (like GPT-3.5) and an Embedder model. The API arguments are converted, and the call() method is used to fetch responses. Finally, both types of responses (LLM and Embedder) are parsed and printed.
+
+.. code-block:: python
+
+ def build_custom_model_client():
+ # Instantiate the custom model client (SimpleCustomModelClient)
+ custom_client = SimpleCustomModelClient()
+
+ # Define the query for the model to process
+ query = "What is the capital of France?"
+
+ # Set the model type for a Large Language Model (LLM)
+ model_type = ModelType.LLM
+
+ # Prepare the message prompt as expected by the OpenAI chat API.
+ # This format is suitable for GPT-like models (e.g., gpt-3.5-turbo).
+ message_prompt = [
+ {
+ "role": "user", # Define the user role in the conversation
+ "content": [
+ {
+ "type": "text", # Specify that the input is a text type
+ "text": query, # The actual query to be processed by the model
+ }
+ ],
+ }
+ ]
+
+ # Print message indicating the usage of the LLM model type
+ print("ModelType LLM")
+
+ # Define additional model parameters like model name, temperature, and max tokens for LLM
+ model_kwargs = {"model": "gpt-3.5-turbo", "temperature": 0.5, "max_tokens": 100}
+
+ # Convert the input message and model kwargs into the required API parameters
+ api_kwargs = custom_client.convert_inputs_to_api_kwargs(
+ input=message_prompt, model_kwargs=model_kwargs, model_type=model_type
+ )
+
+ # Print the API arguments that will be passed to the call method
+ print(f"api_kwargs: {api_kwargs}")
+
+ # Call the LLM model using the prepared API arguments
+ result = custom_client.call(api_kwargs, ModelType.LLM)
+
+ # Print the result of the LLM model call (response from OpenAI)
+ print(result)
+
+ # Parse the chat completion response and output a more structured result
+ response_text = custom_client.parse_chat_completion(result)
+
+ # Print the structured response from the chat completion
+ print(f"response_text: {response_text}")
+
+ # Switch to using the Embedder model type
+ print("ModelType EMBEDDER")
+
+ # Define model-specific parameters for the embedding model
+ model_kwargs = {
+ "model": "text-embedding-3-small",
+ "dimensions": 8,
+ "encoding_format": "float",
+ }
+
+ # Convert the input query for the embedder model
+ api_kwargs = custom_client.convert_inputs_to_api_kwargs(
+ input=query, model_kwargs=model_kwargs, model_type=ModelType.EMBEDDER
+ )
+
+ # Print the API arguments that will be passed to the embedder model
+ print(f"embedder api_kwargs: {api_kwargs}")
+
+ # Call the Embedder model using the prepared API arguments
+ result = custom_client.call(api_kwargs, ModelType.EMBEDDER)
+
+ # Print the result of the Embedder model call (embedding response)
+ print(result)
+
+ # Parse the embedding response and output a more structured result
+ response_text = custom_client.parse_embedding_response(result)
+
+ # Print the structured response from the embedding model
+ print(f"response_text: {response_text}")
+
+This is the function call that triggers the execution of the custom model client, processing the defined query and displaying results for both LLM and Embedder.
+
+.. code-block:: python
+
+ build_custom_model_client()
.. admonition:: API reference
:class: highlight
diff --git a/notebooks/tutorials/adalflow_modelclient.ipynb b/notebooks/tutorials/adalflow_modelclient.ipynb
new file mode 100644
index 00000000..1674c69a
--- /dev/null
+++ b/notebooks/tutorials/adalflow_modelclient.ipynb
@@ -0,0 +1,2063 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "hGLYrUwBmvUD"
+ },
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "gHK6HFngl6iP"
+ },
+ "source": [
+ "# 🤗 Welcome to AdalFlow!\n",
+ "## The PyTorch library to auto-optimize any LLM task pipelines\n",
+ "\n",
+ "Thanks for trying us out, we're here to provide you with the best LLM application development experience you can dream of 😊 any questions or concerns you may have, [come talk to us on discord,](https://discord.gg/ezzszrRZvT) we're always here to help! ⭐ Star us on Github ⭐\n",
+ "\n",
+ "\n",
+ "# Quick Links\n",
+ "\n",
+ "Github repo: https://github.com/SylphAI-Inc/AdalFlow\n",
+ "\n",
+ "Full Tutorials: https://adalflow.sylph.ai/index.html#.\n",
+ "\n",
+ "Deep dive on each API: check out the [developer notes](https://adalflow.sylph.ai/tutorials/index.html).\n",
+ "\n",
+ "Common use cases along with the auto-optimization: check out [Use cases](https://adalflow.sylph.ai/use_cases/index.html).\n",
+ "\n",
+ "# Author\n",
+ "This notebook was created by community contributor [Ajith](https://github.com/ajithvcoder/).\n",
+ "\n",
+ "# Outline\n",
+ "\n",
+ "This is a quick introduction of what AdalFlow is capable of. We will cover:\n",
+ "\n",
+ "* How to use model client in sync and async calls\n",
+ "* How to do develop custom model client using adalflow\n",
+ "\n",
+ "**Next: Try our [auto-optimization](https://colab.research.google.com/drive/1n3mHUWekTEYHiBdYBTw43TKlPN41A9za?usp=sharing)**\n",
+ "\n",
+ "\n",
+ "# Installation\n",
+ "\n",
+ "1. Use `pip` to install the `adalflow` Python package. We will need `openai`, `groq`, and `faiss`(cpu version) from the extra packages.\n",
+ "\n",
+ " ```bash\n",
+ " pip install adalflow[openai,groq,faiss-cpu]\n",
+ " ```\n",
+ "2. Setup `openai` and `groq` API key in the environment variables"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "nqe-vxB1BCux"
+ },
+ "source": [
+ "### Install adalflow"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {
+ "id": "ZaaevxNH9JMQ"
+ },
+ "outputs": [],
+ "source": [
+ "# Install adalflow with necessary dependencies\n",
+ "from IPython.display import clear_output\n",
+ "\n",
+ "!pip install -U adalflow[openai,groq,faiss-cpu]\n",
+ "\n",
+ "clear_output()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "NGE70aZ8BLuf"
+ },
+ "source": [
+ "### Set Environment Variables\n",
+ "\n",
+ "Note: Enter your api keys in below cell #todo"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "j2xmGr_99YDq",
+ "outputId": "5f4ef3fe-3c20-481b-e4f6-93c14af1fd32"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Writing .env\n"
+ ]
+ }
+ ],
+ "source": [
+ "%%writefile .env\n",
+ "\n",
+ "OPENAI_API_KEY=\"PASTE-OPENAI_API_KEY_HERE\"\n",
+ "GROQ_API_KEY=\"PASTE-GROQ_API_KEY-HERE\""
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "metadata": {
+ "id": "etSUh9KNjmdy"
+ },
+ "outputs": [],
+ "source": [
+ "from adalflow.utils import setup_env\n",
+ "\n",
+ "# Load environment variables - Make sure to have OPENAI_API_KEY in .env file and .env is present in current folder\n",
+ "setup_env(\".env\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "ZxBkm77uBZpl"
+ },
+ "source": [
+ "### Basic Vannila Usage Example - model_client() - LLM Chat"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 4,
+ "metadata": {
+ "id": "wOAiKg899Z2u"
+ },
+ "outputs": [],
+ "source": [
+ "from adalflow.components.model_client import OpenAIClient\n",
+ "from adalflow.core.types import ModelType"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "jv5124_27ioY",
+ "outputId": "8e593b49-4705-49c0-9501-58cee43831d1"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': 'User: What is the capital of France?\\n'}]}\n",
+ "response_text: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=16, total_tokens=23), raw_response='The capital of France is Paris.', metadata=None)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Initialize the OpenAI client for API interactions\n",
+ "openai_client = OpenAIClient()\n",
+ "query = \"What is the capital of France?\"\n",
+ "\n",
+ "# Set the model type to Large Language Model (LLM)\n",
+ "model_type = ModelType.LLM\n",
+ "\n",
+ "# Construct the prompt by formatting the user's query\n",
+ "prompt = f\"User: {query}\\n\"\n",
+ "\n",
+ "# Configure model parameters:\n",
+ "# - model: Specifies GPT-3.5-turbo as the model to use\n",
+ "# - temperature: Controls randomness (0.5 = balanced between deterministic and creative)\n",
+ "# - max_tokens: Limits the response length to 100 tokens\n",
+ "model_kwargs = {\"model\": \"gpt-3.5-turbo\", \"temperature\": 0.5, \"max_tokens\": 100}\n",
+ "\n",
+ "# Convert the inputs into the format required by OpenAI's API\n",
+ "api_kwargs = openai_client.convert_inputs_to_api_kwargs(\n",
+ " input=prompt, model_kwargs=model_kwargs, model_type=model_type\n",
+ ")\n",
+ "print(f\"api_kwargs: {api_kwargs}\")\n",
+ "\n",
+ "\n",
+ "response = openai_client.call(api_kwargs=api_kwargs, model_type=model_type)\n",
+ "\n",
+ "# Extract the text from the chat completion response\n",
+ "response_text = openai_client.parse_chat_completion(response)\n",
+ "print(f\"response_text: {response_text}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "MBW5viOG9hM8"
+ },
+ "source": [
+ "### Basic Vannila Usage Example - model_client() - Embedding"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 6,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "YA4pAIek9ewc",
+ "outputId": "442d9708-966d-498a-de06-6a4833ba93ac"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['What is the capital of France?', 'What is the capital of France?']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[0.63402575, 0.24025092, 0.42818537, 0.37026355, -0.3518905, -0.041650757, -0.21627253, 0.21798527], index=0), Embedding(embedding=[0.63402575, 0.24025092, 0.42818537, 0.37026355, -0.3518905, -0.041650757, -0.21627253, 0.21798527], index=1)], model='text-embedding-3-small', usage=Usage(prompt_tokens=14, total_tokens=14), error=None, raw_response=None, input=None)\n"
+ ]
+ }
+ ],
+ "source": [
+ "openai_client = OpenAIClient()\n",
+ "query = \"What is the capital of France?\"\n",
+ "\n",
+ "# Specify the model type to be used, setting it to EMBEDDER for embedding functionality\n",
+ "model_type = ModelType.EMBEDDER\n",
+ "\n",
+ "# Create a batch of inputs by duplicating the query; useful for testing batch embedding capabilities\n",
+ "input = [query] * 2\n",
+ "\n",
+ "# Set the model's parameters:\n",
+ "# - \"text-embedding-3-small\" is the model being used\n",
+ "# - \"dimensions\" defines the dimensionality of the embeddings\n",
+ "# - \"encoding_format\" specifies the data format for the embeddings\n",
+ "model_kwargs = {\n",
+ " \"model\": \"text-embedding-3-small\",\n",
+ " \"dimensions\": 8,\n",
+ " \"encoding_format\": \"float\",\n",
+ "}\n",
+ "\n",
+ "# Convert the inputs and model parameters to the format expected by the API using OpenAI client's helper method\n",
+ "api_kwargs = openai_client.convert_inputs_to_api_kwargs(\n",
+ " input=input, model_kwargs=model_kwargs, model_type=model_type\n",
+ ")\n",
+ "print(f\"api_kwargs: {api_kwargs}\")\n",
+ "\n",
+ "response = openai_client.call(api_kwargs=api_kwargs, model_type=model_type)\n",
+ "\n",
+ "# Parse the embedding response to extract the embeddings in a usable format\n",
+ "reponse_embedder_output = openai_client.parse_embedding_response(response)\n",
+ "print(f\"reponse_embedder_output: {reponse_embedder_output}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "k42xTszF34Lx"
+ },
+ "source": [
+ "### Adalflow - model_client() - **OpenAI model** Embedding Usage (ModelType.EMBEDDER)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 48,
+ "metadata": {
+ "id": "NPT6NmaKiKJ9"
+ },
+ "outputs": [],
+ "source": [
+ "from typing import List\n",
+ "import numpy as np\n",
+ "from adalflow.core.types import ModelType, EmbedderOutput\n",
+ "from adalflow.components.model_client import OpenAIClient\n",
+ "from dataclasses import dataclass\n",
+ "from enum import Enum\n",
+ "from numpy.linalg import norm"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 49,
+ "metadata": {
+ "id": "Uwtb2sejiN6z"
+ },
+ "outputs": [],
+ "source": [
+ "@dataclass\n",
+ "class EmbeddingCollection:\n",
+ " collection: List[float]\n",
+ " cindex: int\n",
+ "\n",
+ "\n",
+ "@dataclass\n",
+ "class Usage:\n",
+ " prompt_tokens: int\n",
+ " total_tokens: int"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "metadata": {
+ "id": "qyzVv9qviUB1"
+ },
+ "outputs": [],
+ "source": [
+ "openai_client = OpenAIClient()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 51,
+ "metadata": {
+ "id": "ufpyMmmZicVe"
+ },
+ "outputs": [],
+ "source": [
+ "def get_openai_embedding(text):\n",
+ " # Set model type to EMBEDDER for embedding functionality\n",
+ " model_type = ModelType.EMBEDDER\n",
+ "\n",
+ " # Prepare input and model-specific parameters\n",
+ " input = text\n",
+ " model_kwargs = {\n",
+ " \"model\": \"text-embedding-3-small\",\n",
+ " \"dimensions\": 8,\n",
+ " \"encoding_format\": \"float\",\n",
+ " }\n",
+ "\n",
+ " # Convert inputs to the required API format\n",
+ " api_kwargs = openai_client.convert_inputs_to_api_kwargs(\n",
+ " input=input, model_kwargs=model_kwargs, model_type=model_type\n",
+ " )\n",
+ " print(f\"api_kwargs: {api_kwargs}\") # Debug output to verify API arguments\n",
+ "\n",
+ " # Call OpenAI API and parse response for embeddings\n",
+ " response = openai_client.call(api_kwargs=api_kwargs, model_type=model_type)\n",
+ " reponse_embedder_output = openai_client.parse_embedding_response(response)\n",
+ " print(\n",
+ " f\"reponse_embedder_output: {reponse_embedder_output}\"\n",
+ " ) # Debug output to verify embeddings\n",
+ " return reponse_embedder_output\n",
+ "\n",
+ "\n",
+ "def process_embeddings(embeddings_collection):\n",
+ " # Extract embedding data for each item in the collection\n",
+ " embeddingOutput = [emb.collection for emb in embeddings_collection]\n",
+ " embeddingDataList = [each_emb_out.data for each_emb_out in embeddingOutput]\n",
+ " embeddingList = [\n",
+ " each_item.embedding\n",
+ " for each_emb_data in embeddingDataList\n",
+ " for each_item in each_emb_data\n",
+ " ]\n",
+ "\n",
+ " # Convert to numpy array for easier manipulation and calculations\n",
+ " embeddings_array = np.array(embeddingList)\n",
+ "\n",
+ " def calculate_similarity(emb1, emb2):\n",
+ " # Compute cosine similarity between two embeddings\n",
+ " return np.dot(emb1, emb2) / (norm(emb1) * norm(emb2))\n",
+ "\n",
+ " def get_average_embedding(embeddings_list):\n",
+ " # Calculate the mean embedding across a list of embeddings\n",
+ " return np.mean(embeddings_list, axis=0)\n",
+ "\n",
+ " def find_nearest_neighbors(\n",
+ " query_index: int, embedding_list: List[List[float]], k: int = 5\n",
+ " ):\n",
+ " # Find top-k most similar embeddings to a query embedding, based on cosine similarity\n",
+ " query_embedding = embedding_list[query_index]\n",
+ " similarities = [\n",
+ " (i, calculate_similarity(query_embedding, emb))\n",
+ " for i, emb in enumerate(embedding_list)\n",
+ " if i != query_index\n",
+ " ]\n",
+ " return sorted(similarities, key=lambda x: x[1], reverse=True)[:k]\n",
+ "\n",
+ " # Return dictionary of functions and processed data for further use\n",
+ " return {\n",
+ " \"embeddings_array\": embeddings_array,\n",
+ " \"calculate_similarity\": calculate_similarity,\n",
+ " \"average_embedding\": get_average_embedding,\n",
+ " \"find_nearest_neighbors\": find_nearest_neighbors,\n",
+ " }\n",
+ "\n",
+ "\n",
+ "# Demonstrate embeddings usage with sample data\n",
+ "def demonstrate_embeddings_usage(sample_embeddings, input_text_list):\n",
+ " # Initialize processor and retrieve embeddings array\n",
+ " processor = process_embeddings(sample_embeddings)\n",
+ " embeddings = processor[\"embeddings_array\"]\n",
+ "\n",
+ " print(\"1. Analyzing Semantic Similarities:\")\n",
+ " print(\"-\" * 50)\n",
+ "\n",
+ " # Select a few random indices for similarity testing\n",
+ " num_indices = 5\n",
+ " assert len(input_text_list) == len(embeddings)\n",
+ " indices = np.random.choice(len(input_text_list), num_indices, replace=False)\n",
+ " selected_text = np.array(input_text_list)[indices]\n",
+ " selected_embeddings = np.array(embeddings)[indices]\n",
+ "\n",
+ " # Display selected texts and their embeddings\n",
+ " print(\"Selected indices:\", indices)\n",
+ " print(\"Selected elements from array1:\", selected_text)\n",
+ " print(\"Selected elements from array2:\", selected_embeddings)\n",
+ "\n",
+ " # Calculate similarity between each pair of selected texts\n",
+ " for i in range(len(selected_text)):\n",
+ " for j in range(i + 1, len(selected_text)):\n",
+ " similarity = processor[\"calculate_similarity\"](\n",
+ " selected_embeddings[i], selected_embeddings[j]\n",
+ " )\n",
+ " print(f\"\\nComparing:\\n'{selected_text[i]}' \\nwith:\\n'{selected_text[j]}'\")\n",
+ " print(f\"Similarity score: {similarity:.4f}\")\n",
+ "\n",
+ " print(\"\\n2. Finding Nearest Neighbors:\")\n",
+ " print(\"-\" * 50)\n",
+ "\n",
+ " # Find and display the 3 nearest neighbors for the first text\n",
+ " query_idx = 0\n",
+ " neighbors = processor[\"find_nearest_neighbors\"](query_idx, embeddings, k=3)\n",
+ " print(f\"\\nQuery text: '{input_text_list[query_idx]}'\")\n",
+ " print(\"\\nNearest neighbors:\")\n",
+ "\n",
+ " for idx, similarity in neighbors:\n",
+ " print(f\"- '{input_text_list[idx]}' (similarity: {similarity:.4f})\")\n",
+ "\n",
+ " print(\"\\n3. Using Average Embeddings:\")\n",
+ " print(\"-\" * 50)\n",
+ "\n",
+ " # Calculate and compare the average embedding for texts containing \"Paris\"\n",
+ " paris_indices = [i for i, text in enumerate(input_text_list) if \"Paris\" in text]\n",
+ " paris_embeddings = embeddings[paris_indices]\n",
+ " avg_paris_embedding = processor[\"average_embedding\"](paris_embeddings)\n",
+ "\n",
+ " print(\"\\nComparing average 'Paris' embedding with all texts:\")\n",
+ " for i, text in enumerate(input_text_list):\n",
+ " similarity = processor[\"calculate_similarity\"](\n",
+ " avg_paris_embedding, embeddings[i]\n",
+ " )\n",
+ " print(f\"- '{text}' (similarity: {similarity:.4f})\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 52,
+ "metadata": {
+ "id": "EuvZkL2kifvF"
+ },
+ "outputs": [],
+ "source": [
+ "def run_model_client_embedding_usage():\n",
+ " # Define a set of sample texts to test embedding and similarity functionalities\n",
+ " sample_texts = [\n",
+ " \"What is the capital of France?\",\n",
+ " \"Paris is the capital of France.\",\n",
+ " \"What is the population of France?\",\n",
+ " \"How big is Paris?\",\n",
+ " \"What is the weather like in Paris?\",\n",
+ " ]\n",
+ "\n",
+ " # Duplicate each sample text to form an input list with repeated entries (for embedding testing)\n",
+ " input_text_list = [text for text in sample_texts for _ in range(2)]\n",
+ "\n",
+ " # Generate embeddings for each text in the input list, and store them in an EmbeddingCollection\n",
+ " embeddings_collection = [\n",
+ " EmbeddingCollection(collection=get_openai_embedding(text), cindex=i)\n",
+ " for i, text in enumerate(input_text_list)\n",
+ " ]\n",
+ " print(\n",
+ " embeddings_collection\n",
+ " ) # Debugging output to verify embeddings collection content\n",
+ "\n",
+ " # Demonstrate the usage of embeddings by analyzing similarities, finding neighbors, etc.\n",
+ " demonstrate_embeddings_usage(embeddings_collection, input_text_list)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 53,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "M9EpJd_7izCf",
+ "outputId": "ed1d938c-da36-4d1d-8cdf-5449047af403"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['What is the capital of France?']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[0.63402575, 0.24025092, 0.42818537, 0.37026355, -0.3518905, -0.041650757, -0.21627253, 0.21798527], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None)\n",
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['What is the capital of France?']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[0.63402575, 0.24025092, 0.42818537, 0.37026355, -0.3518905, -0.041650757, -0.21627253, 0.21798527], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None)\n",
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['Paris is the capital of France.']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[0.32851914, 0.31952682, -0.22016178, -0.34650376, -0.31563017, 0.49667537, -0.3447053, 0.395362], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None)\n",
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['Paris is the capital of France.']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[0.32851914, 0.31952682, -0.22016178, -0.34650376, -0.31563017, 0.49667537, -0.3447053, 0.395362], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None)\n",
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['What is the population of France?']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[0.69818175, 0.33034775, 0.48146424, -0.041622937, -0.38599142, 0.13369127, -0.011028064, 0.05374008], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None)\n",
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['What is the population of France?']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[0.69818175, 0.33034775, 0.48146424, -0.041622937, -0.38599142, 0.13369127, -0.011028064, 0.05374008], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None)\n",
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['How big is Paris?']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[0.13988405, -0.35962427, 0.14219283, 0.0681765, -0.51662034, -0.116185255, -0.44545603, -0.58941436], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=5, total_tokens=5), error=None, raw_response=None, input=None)\n",
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['How big is Paris?']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[0.13988405, -0.35962427, 0.14219283, 0.0681765, -0.51662034, -0.116185255, -0.44545603, -0.58941436], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=5, total_tokens=5), error=None, raw_response=None, input=None)\n",
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['What is the weather like in Paris?']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[-0.16997108, -0.14322221, -0.6407088, -0.16881266, -0.08045719, -0.20030048, -0.021483332, -0.6815693], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=8, total_tokens=8), error=None, raw_response=None, input=None)\n",
+ "api_kwargs: {'model': 'text-embedding-3-small', 'dimensions': 8, 'encoding_format': 'float', 'input': ['What is the weather like in Paris?']}\n",
+ "reponse_embedder_output: EmbedderOutput(data=[Embedding(embedding=[-0.16997108, -0.14322221, -0.6407088, -0.16881266, -0.08045719, -0.20030048, -0.021483332, -0.6815693], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=8, total_tokens=8), error=None, raw_response=None, input=None)\n",
+ "[EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[0.63402575, 0.24025092, 0.42818537, 0.37026355, -0.3518905, -0.041650757, -0.21627253, 0.21798527], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None), cindex=0), EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[0.63402575, 0.24025092, 0.42818537, 0.37026355, -0.3518905, -0.041650757, -0.21627253, 0.21798527], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None), cindex=1), EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[0.32851914, 0.31952682, -0.22016178, -0.34650376, -0.31563017, 0.49667537, -0.3447053, 0.395362], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None), cindex=2), EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[0.32851914, 0.31952682, -0.22016178, -0.34650376, -0.31563017, 0.49667537, -0.3447053, 0.395362], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None), cindex=3), EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[0.69818175, 0.33034775, 0.48146424, -0.041622937, -0.38599142, 0.13369127, -0.011028064, 0.05374008], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None), cindex=4), EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[0.69818175, 0.33034775, 0.48146424, -0.041622937, -0.38599142, 0.13369127, -0.011028064, 0.05374008], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None), cindex=5), EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[0.13988405, -0.35962427, 0.14219283, 0.0681765, -0.51662034, -0.116185255, -0.44545603, -0.58941436], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=5, total_tokens=5), error=None, raw_response=None, input=None), cindex=6), EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[0.13988405, -0.35962427, 0.14219283, 0.0681765, -0.51662034, -0.116185255, -0.44545603, -0.58941436], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=5, total_tokens=5), error=None, raw_response=None, input=None), cindex=7), EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[-0.16997108, -0.14322221, -0.6407088, -0.16881266, -0.08045719, -0.20030048, -0.021483332, -0.6815693], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=8, total_tokens=8), error=None, raw_response=None, input=None), cindex=8), EmbeddingCollection(collection=EmbedderOutput(data=[Embedding(embedding=[-0.16997108, -0.14322221, -0.6407088, -0.16881266, -0.08045719, -0.20030048, -0.021483332, -0.6815693], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=8, total_tokens=8), error=None, raw_response=None, input=None), cindex=9)]\n",
+ "1. Analyzing Semantic Similarities:\n",
+ "--------------------------------------------------\n",
+ "Selected indices: [5 6 4 2 0]\n",
+ "Selected elements from array1: ['What is the population of France?' 'How big is Paris?'\n",
+ " 'What is the population of France?' 'Paris is the capital of France.'\n",
+ " 'What is the capital of France?']\n",
+ "Selected elements from array2: [[ 0.69818175 0.33034775 0.48146424 -0.04162294 -0.38599142 0.13369127\n",
+ " -0.01102806 0.05374008]\n",
+ " [ 0.13988405 -0.35962427 0.14219283 0.0681765 -0.51662034 -0.11618526\n",
+ " -0.44545603 -0.58941436]\n",
+ " [ 0.69818175 0.33034775 0.48146424 -0.04162294 -0.38599142 0.13369127\n",
+ " -0.01102806 0.05374008]\n",
+ " [ 0.32851914 0.31952682 -0.22016178 -0.34650376 -0.31563017 0.49667537\n",
+ " -0.3447053 0.395362 ]\n",
+ " [ 0.63402575 0.24025092 0.42818537 0.37026355 -0.3518905 -0.04165076\n",
+ " -0.21627253 0.21798527]]\n",
+ "\n",
+ "Comparing:\n",
+ "'What is the population of France?' \n",
+ "with:\n",
+ "'How big is Paris?'\n",
+ "Similarity score: 0.2016\n",
+ "\n",
+ "Comparing:\n",
+ "'What is the population of France?' \n",
+ "with:\n",
+ "'What is the population of France?'\n",
+ "Similarity score: 1.0000\n",
+ "\n",
+ "Comparing:\n",
+ "'What is the population of France?' \n",
+ "with:\n",
+ "'Paris is the capital of France.'\n",
+ "Similarity score: 0.4566\n",
+ "\n",
+ "Comparing:\n",
+ "'What is the population of France?' \n",
+ "with:\n",
+ "'What is the capital of France?'\n",
+ "Similarity score: 0.8571\n",
+ "\n",
+ "Comparing:\n",
+ "'How big is Paris?' \n",
+ "with:\n",
+ "'What is the population of France?'\n",
+ "Similarity score: 0.2016\n",
+ "\n",
+ "Comparing:\n",
+ "'How big is Paris?' \n",
+ "with:\n",
+ "'Paris is the capital of France.'\n",
+ "Similarity score: -0.0980\n",
+ "\n",
+ "Comparing:\n",
+ "'How big is Paris?' \n",
+ "with:\n",
+ "'What is the capital of France?'\n",
+ "Similarity score: 0.2429\n",
+ "\n",
+ "Comparing:\n",
+ "'What is the population of France?' \n",
+ "with:\n",
+ "'Paris is the capital of France.'\n",
+ "Similarity score: 0.4566\n",
+ "\n",
+ "Comparing:\n",
+ "'What is the population of France?' \n",
+ "with:\n",
+ "'What is the capital of France?'\n",
+ "Similarity score: 0.8571\n",
+ "\n",
+ "Comparing:\n",
+ "'Paris is the capital of France.' \n",
+ "with:\n",
+ "'What is the capital of France?'\n",
+ "Similarity score: 0.3136\n",
+ "\n",
+ "2. Finding Nearest Neighbors:\n",
+ "--------------------------------------------------\n",
+ "\n",
+ "Query text: 'What is the capital of France?'\n",
+ "\n",
+ "Nearest neighbors:\n",
+ "- 'What is the capital of France?' (similarity: 1.0000)\n",
+ "- 'What is the population of France?' (similarity: 0.8571)\n",
+ "- 'What is the population of France?' (similarity: 0.8571)\n",
+ "\n",
+ "3. Using Average Embeddings:\n",
+ "--------------------------------------------------\n",
+ "\n",
+ "Comparing average 'Paris' embedding with all texts:\n",
+ "- 'What is the capital of France?' (similarity: -0.0168)\n",
+ "- 'What is the capital of France?' (similarity: -0.0168)\n",
+ "- 'Paris is the capital of France.' (similarity: 0.3752)\n",
+ "- 'Paris is the capital of France.' (similarity: 0.3752)\n",
+ "- 'What is the population of France?' (similarity: 0.0897)\n",
+ "- 'What is the population of France?' (similarity: 0.0897)\n",
+ "- 'How big is Paris?' (similarity: 0.7366)\n",
+ "- 'How big is Paris?' (similarity: 0.7366)\n",
+ "- 'What is the weather like in Paris?' (similarity: 0.6574)\n",
+ "- 'What is the weather like in Paris?' (similarity: 0.6574)\n"
+ ]
+ }
+ ],
+ "source": [
+ "run_model_client_embedding_usage()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "kOKN7jZAkZz0"
+ },
+ "source": [
+ "### Adalflow - model_client() - **OpenAI model** LLM Multichat Usage (ModelType.LLM)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 13,
+ "metadata": {
+ "id": "VIQaK1dmkHiJ"
+ },
+ "outputs": [],
+ "source": [
+ "from adalflow.components.model_client import OpenAIClient\n",
+ "from adalflow.core.types import ModelType\n",
+ "from adalflow.utils import setup_env\n",
+ "from typing import List, Dict"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 14,
+ "metadata": {
+ "id": "BRRfPHh4kHY9"
+ },
+ "outputs": [],
+ "source": [
+ "class ChatConversation:\n",
+ " def __init__(self):\n",
+ " # Initialize the OpenAI client for managing API calls\n",
+ " self.openai_client = OpenAIClient()\n",
+ " # Initialize an empty conversation history to store chat messages\n",
+ " self.conversation_history: str = \"\"\n",
+ " # Model parameters to customize the API call\n",
+ " self.model_kwargs = {\n",
+ " \"model\": \"gpt-3.5-turbo\",\n",
+ " \"temperature\": 0.5, # Controls randomness; 0.5 for balanced responses\n",
+ " \"max_tokens\": 100, # Limits the response length\n",
+ " }\n",
+ "\n",
+ " def add_user_message(self, message: str):\n",
+ " \"\"\"Add a user message to the conversation history\"\"\"\n",
+ " self.conversation_history += (\n",
+ " f\" {message} \" # Format for user message\n",
+ " )\n",
+ "\n",
+ " def add_assistant_message(self, message: str):\n",
+ " \"\"\"Add an assistant message to the conversation history\"\"\"\n",
+ " self.conversation_history += (\n",
+ " f\" {message} \" # Format for assistant message\n",
+ " )\n",
+ "\n",
+ " def get_response(self) -> str:\n",
+ " \"\"\"Get response from the model based on conversation history\"\"\"\n",
+ " # Convert the conversation history and model parameters into API arguments\n",
+ " api_kwargs = self.openai_client.convert_inputs_to_api_kwargs(\n",
+ " input=self.conversation_history,\n",
+ " model_kwargs=self.model_kwargs,\n",
+ " model_type=ModelType.LLM,\n",
+ " )\n",
+ " print(f\"api_kwargs: {api_kwargs}\") # Debugging output to verify API parameters\n",
+ "\n",
+ " # Call the API with the generated arguments to get a response\n",
+ " response = self.openai_client.call(\n",
+ " api_kwargs=api_kwargs, model_type=ModelType.LLM\n",
+ " )\n",
+ " print(\"response: \", response) # Debugging output for raw API response\n",
+ "\n",
+ " # Extract and parse the text response from the API output\n",
+ " response_text = self.openai_client.parse_chat_completion(response)\n",
+ " # Update conversation history with the assistant's response\n",
+ " self.add_assistant_message(response_text)\n",
+ " return response_text # Return the assistant's response to the caller"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 15,
+ "metadata": {
+ "id": "9HWtlBnZkHLU"
+ },
+ "outputs": [],
+ "source": [
+ "def check_chat_conversation():\n",
+ " # Initialize a new chat conversation\n",
+ " chat = ChatConversation()\n",
+ "\n",
+ " # Example list of user questions to simulate a multi-turn conversation\n",
+ " questions = [\n",
+ " \"What is the capital of France?\",\n",
+ " \"What is its population?\",\n",
+ " \"Tell me about its famous landmarks\",\n",
+ " ]\n",
+ "\n",
+ " # Iterate through each question in the list\n",
+ " for question in questions:\n",
+ " print(f\"\\nUser: {question}\") # Display the user's question\n",
+ " chat.add_user_message(\n",
+ " question\n",
+ " ) # Add the user question to the conversation history\n",
+ "\n",
+ " response = (\n",
+ " chat.get_response()\n",
+ " ) # Get assistant's response based on conversation history\n",
+ " print(f\"Assistant: {response}\") # Display the assistant's response\n",
+ "\n",
+ " # Display the full conversation history after all exchanges\n",
+ " print(\"\\nFull Conversation History:\")\n",
+ " print(chat.conversation_history) # Print the accumulated conversation history"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 16,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "mLFopVh0lJJh",
+ "outputId": "eb6d555e-1562-4c0c-ad94-57044a853eb4"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "User: What is the capital of France?\n",
+ "api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': ' What is the capital of France? '}]}\n",
+ "response: ChatCompletion(id='chatcmpl-ASHotWDnw55BOd5d3zWzs0ucxztJr', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The capital of France is Paris.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305047, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=20, total_tokens=27, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=20, total_tokens=27), raw_response='The capital of France is Paris.', metadata=None)\n",
+ "\n",
+ "User: What is its population?\n",
+ "api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': \" What is the capital of France? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=20, total_tokens=27), raw_response='The capital of France is Paris.', metadata=None) What is its population? \"}]}\n",
+ "response: ChatCompletion(id='chatcmpl-ASHouXidu63zZHiV9uFZ1rH5SFNYj', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The population of Paris, the capital of France, is approximately 2.2 million people.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305048, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=19, prompt_tokens=82, total_tokens=101, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=19, prompt_tokens=82, total_tokens=101), raw_response='The population of Paris, the capital of France, is approximately 2.2 million people.', metadata=None)\n",
+ "\n",
+ "User: Tell me about its famous landmarks\n",
+ "api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': \" What is the capital of France? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=20, total_tokens=27), raw_response='The capital of France is Paris.', metadata=None) What is its population? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=19, prompt_tokens=82, total_tokens=101), raw_response='The population of Paris, the capital of France, is approximately 2.2 million people.', metadata=None) Tell me about its famous landmarks \"}]}\n",
+ "response: ChatCompletion(id='chatcmpl-ASHovLb6YpzmwJ8Noc90GdeLpvvrW', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Paris, the capital of France, is known for its iconic landmarks such as the Eiffel Tower, Notre-Dame Cathedral, Louvre Museum, Champs-Élysées, and Arc de Triomphe. These landmarks attract millions of tourists from around the world each year and are symbols of the city's rich history and cultural heritage.\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305049, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=69, prompt_tokens=157, total_tokens=226, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=69, prompt_tokens=157, total_tokens=226), raw_response=\"Paris, the capital of France, is known for its iconic landmarks such as the Eiffel Tower, Notre-Dame Cathedral, Louvre Museum, Champs-Élysées, and Arc de Triomphe. These landmarks attract millions of tourists from around the world each year and are symbols of the city's rich history and cultural heritage.\", metadata=None)\n",
+ "\n",
+ "Full Conversation History:\n",
+ " What is the capital of France? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=20, total_tokens=27), raw_response='The capital of France is Paris.', metadata=None) What is its population? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=19, prompt_tokens=82, total_tokens=101), raw_response='The population of Paris, the capital of France, is approximately 2.2 million people.', metadata=None) Tell me about its famous landmarks GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=69, prompt_tokens=157, total_tokens=226), raw_response=\"Paris, the capital of France, is known for its iconic landmarks such as the Eiffel Tower, Notre-Dame Cathedral, Louvre Museum, Champs-Élysées, and Arc de Triomphe. These landmarks attract millions of tourists from around the world each year and are symbols of the city's rich history and cultural heritage.\", metadata=None) \n"
+ ]
+ }
+ ],
+ "source": [
+ "# You can observe that each question is depended on previous question and the chat responds in apt manner\n",
+ "check_chat_conversation()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "BhD8AJWyldK-"
+ },
+ "source": [
+ "### Adalflow - model_client() - **OpenAI model** LLM Multichat Usage (ModelType.LLM) - asynchronous (async())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "metadata": {
+ "id": "AUjjiCnulcF8"
+ },
+ "outputs": [],
+ "source": [
+ "import asyncio\n",
+ "from adalflow.components.model_client import OpenAIClient\n",
+ "from adalflow.core.types import ModelType\n",
+ "from typing import List"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "metadata": {
+ "id": "Z5i9_q3vlo3C"
+ },
+ "outputs": [],
+ "source": [
+ "class ChatConversationAsync:\n",
+ " def __init__(self):\n",
+ " # Initialize with an asynchronous OpenAI client\n",
+ " self.openai_client = OpenAIClient()\n",
+ "\n",
+ " # Default model parameters for the chat\n",
+ " self.model_kwargs = {\n",
+ " \"model\": \"gpt-3.5-turbo\", # Model used for chat\n",
+ " \"temperature\": 0.5, # Controls randomness in response\n",
+ " \"max_tokens\": 100, # Maximum tokens in the generated response\n",
+ " }\n",
+ "\n",
+ " async def get_response(self, message: str) -> str:\n",
+ " \"\"\"Asynchronously get a response from the model for a given user message\"\"\"\n",
+ "\n",
+ " # Convert input message and model parameters into the format expected by the API\n",
+ " api_kwargs = self.openai_client.convert_inputs_to_api_kwargs(\n",
+ " input=message, # User's message input\n",
+ " model_kwargs=self.model_kwargs, # Model-specific settings\n",
+ " model_type=ModelType.LLM, # Specify the model type as a language model (LLM)\n",
+ " )\n",
+ " print(f\"api_kwargs: {api_kwargs}\") # Log the API arguments for debugging\n",
+ "\n",
+ " # Make an asynchronous API call to OpenAI's model\n",
+ " response = await self.openai_client.acall(\n",
+ " api_kwargs=api_kwargs, # Pass the prepared arguments\n",
+ " model_type=ModelType.LLM, # Specify the model type again\n",
+ " )\n",
+ " print(\"response: \", response) # Print the raw response from the API\n",
+ "\n",
+ " # Parse the API response to extract the assistant's reply (chat completion)\n",
+ " response_text = self.openai_client.parse_chat_completion(response)\n",
+ " return response_text # Return the parsed response text"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "metadata": {
+ "id": "nNVmwsO2lrWX"
+ },
+ "outputs": [],
+ "source": [
+ "async def check_chat_conversations_async():\n",
+ " # Create an instance of ChatConversationAsync to handle asynchronous operations\n",
+ " chat = ChatConversationAsync()\n",
+ "\n",
+ " # List of unrelated questions that will be handled in parallel\n",
+ " questions = [\n",
+ " \"What is the capital of France?\", # Question 1\n",
+ " \"Is dog a wild animal?\", # Question 2\n",
+ " \"Tell me about amazon forest\", # Question 3\n",
+ " ]\n",
+ "\n",
+ " # Create a list of asynchronous tasks, one for each question\n",
+ " # Each task calls the get_response method asynchronously for a question\n",
+ " tasks = [chat.get_response(question) for question in questions]\n",
+ "\n",
+ " # Gather the results of all asynchronous tasks concurrently\n",
+ " responses = await asyncio.gather(*tasks)\n",
+ "\n",
+ " # Print the responses from the assistant along with the respective user questions\n",
+ " for question, response in zip(questions, responses):\n",
+ " print(f\"\\nUser: {question}\")\n",
+ " print(f\"Assistant: {response}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 20,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "T8hdUjjeltVo",
+ "outputId": "53dc0385-afb6-4268-c3cc-dde9385b565e"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': 'What is the capital of France?'}]}\n",
+ "api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': 'Is dog a wild animal?'}]}\n",
+ "api_kwargs: {'model': 'gpt-3.5-turbo', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': 'Tell me about amazon forest'}]}\n",
+ "response: ChatCompletion(id='chatcmpl-ASHqEOWoBOIiulzd0aoXeyKKb9npb', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The capital of France is Paris.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305130, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=14, total_tokens=21, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "response: ChatCompletion(id='chatcmpl-ASHqE6WAmS4wnRMdD20PdjsdyOcuP', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Dogs were originally domesticated from wolves thousands of years ago, and they are now considered domestic animals. While they share some characteristics with wild animals, such as hunting instincts and pack behavior, dogs have been bred and trained by humans for companionship and various roles, such as working dogs, service animals, and pets. So, in general, dogs are not considered wild animals.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305130, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=76, prompt_tokens=13, total_tokens=89, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "response: ChatCompletion(id='chatcmpl-ASHqEexoH4z6uZsDkoRwwmyFuoSyZ', choices=[Choice(finish_reason='length', index=0, logprobs=None, message=ChatCompletionMessage(content='The Amazon Rainforest, also known as the Amazon Jungle, is a vast and dense tropical rainforest that covers much of the Amazon Basin in South America. It is the largest rainforest in the world, spanning over 5.5 million square kilometers (2.1 million square miles) across nine countries, including Brazil, Peru, Colombia, and Venezuela.\\n\\nThe Amazon Rainforest is home to an incredibly diverse array of plant and animal species, many of which are found nowhere else on Earth. It', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305130, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=100, prompt_tokens=12, total_tokens=112, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "\n",
+ "User: What is the capital of France?\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=14, total_tokens=21), raw_response='The capital of France is Paris.', metadata=None)\n",
+ "\n",
+ "User: Is dog a wild animal?\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=76, prompt_tokens=13, total_tokens=89), raw_response='Dogs were originally domesticated from wolves thousands of years ago, and they are now considered domestic animals. While they share some characteristics with wild animals, such as hunting instincts and pack behavior, dogs have been bred and trained by humans for companionship and various roles, such as working dogs, service animals, and pets. So, in general, dogs are not considered wild animals.', metadata=None)\n",
+ "\n",
+ "User: Tell me about amazon forest\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=100, prompt_tokens=12, total_tokens=112), raw_response='The Amazon Rainforest, also known as the Amazon Jungle, is a vast and dense tropical rainforest that covers much of the Amazon Basin in South America. It is the largest rainforest in the world, spanning over 5.5 million square kilometers (2.1 million square miles) across nine countries, including Brazil, Peru, Colombia, and Venezuela.\\n\\nThe Amazon Rainforest is home to an incredibly diverse array of plant and animal species, many of which are found nowhere else on Earth. It', metadata=None)\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Run the asynchronous function if in a file\n",
+ "# asyncio.run(check_chat_conversations_async())\n",
+ "\n",
+ "# in jupyter notebook\n",
+ "await check_chat_conversations_async()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "_VvhvKmimfIt"
+ },
+ "source": [
+ "### Adalflow - model_client() - **OpenAI model** LLM Multichat Usage (ModelType.LLM) - Benchmark sync() vs async()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 21,
+ "metadata": {
+ "id": "tMT0BsaRmt6z"
+ },
+ "outputs": [],
+ "source": [
+ "import asyncio\n",
+ "import time\n",
+ "from adalflow.components.model_client import (\n",
+ " OpenAIClient,\n",
+ ") # Assuming OpenAIClient with .call() and .acall() is available\n",
+ "from adalflow.core.types import ModelType"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "metadata": {
+ "id": "QrzYgdsEm9sz"
+ },
+ "outputs": [],
+ "source": [
+ "# Initialize the OpenAI client\n",
+ "openai_client = OpenAIClient()\n",
+ "\n",
+ "# Sample prompt for testing\n",
+ "prompt = \"Tell me a joke.\"\n",
+ "\n",
+ "model_kwargs = {\"model\": \"gpt-3.5-turbo\", \"temperature\": 0.5, \"max_tokens\": 100}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "metadata": {
+ "id": "amdQNSmvnB-L"
+ },
+ "outputs": [],
+ "source": [
+ "# Synchronous function for benchmarking .call()\n",
+ "def benchmark_sync_call(api_kwargs, runs=10):\n",
+ " \"\"\"\n",
+ " Benchmark the synchronous .call() method by running it multiple times.\n",
+ "\n",
+ " Parameters:\n",
+ " - api_kwargs: The arguments to be passed to the API call\n",
+ " - runs: The number of times to run the call (default is 10)\n",
+ " \"\"\"\n",
+ " # List to store responses\n",
+ " responses = []\n",
+ "\n",
+ " # Record the start time of the benchmark\n",
+ " start_time = time.time()\n",
+ "\n",
+ " # Perform synchronous API calls for the specified number of runs\n",
+ " responses = [\n",
+ " openai_client.call(\n",
+ " api_kwargs=api_kwargs, # API arguments\n",
+ " model_type=ModelType.LLM, # Model type (e.g., LLM for language models)\n",
+ " )\n",
+ " for _ in range(runs) # Repeat 'runs' times\n",
+ " ]\n",
+ "\n",
+ " # Record the end time after all calls are completed\n",
+ " end_time = time.time()\n",
+ "\n",
+ " # Output the results of each synchronous call\n",
+ " for i, response in enumerate(responses):\n",
+ " print(f\"sync call {i + 1} completed: {response}\")\n",
+ "\n",
+ " # Print the total time taken for all synchronous calls\n",
+ " print(f\"\\nSynchronous benchmark completed in {end_time - start_time:.2f} seconds\")\n",
+ "\n",
+ "\n",
+ "# Asynchronous function for benchmarking .acall()\n",
+ "async def benchmark_async_acall(api_kwargs, runs=10):\n",
+ " \"\"\"\n",
+ " Benchmark the asynchronous .acall() method by running it multiple times concurrently.\n",
+ "\n",
+ " Parameters:\n",
+ " - api_kwargs: The arguments to be passed to the API call\n",
+ " - runs: The number of times to run the asynchronous call (default is 10)\n",
+ " \"\"\"\n",
+ " # Record the start time of the benchmark\n",
+ " start_time = time.time()\n",
+ "\n",
+ " # Create a list of asynchronous tasks for the specified number of runs\n",
+ " tasks = [\n",
+ " openai_client.acall(\n",
+ " api_kwargs=api_kwargs, # API arguments\n",
+ " model_type=ModelType.LLM, # Model type (e.g., LLM for language models)\n",
+ " )\n",
+ " for _ in range(runs) # Repeat 'runs' times\n",
+ " ]\n",
+ "\n",
+ " # Execute all tasks concurrently and wait for them to finish\n",
+ " responses = await asyncio.gather(*tasks)\n",
+ "\n",
+ " # Record the end time after all tasks are completed\n",
+ " end_time = time.time()\n",
+ "\n",
+ " # Output the results of each asynchronous call\n",
+ " for i, response in enumerate(responses):\n",
+ " print(f\"Async call {i + 1} completed: {response}\")\n",
+ "\n",
+ " # Print the total time taken for all asynchronous calls\n",
+ " print(f\"\\nAsynchronous benchmark completed in {end_time - start_time:.2f} seconds\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "FNmiGG_bnD7Q",
+ "outputId": "242071e3-18e1-44aa-c99a-8fac71fbb84c"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Starting synchronous benchmark...\n",
+ "\n",
+ "sync call 1 completed: ChatCompletion(id='chatcmpl-ASHqYcxCVNAnLlsrnRvxh5cRrQOwf', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself? Because it was two-tired!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305150, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "sync call 2 completed: ChatCompletion(id='chatcmpl-ASHqZz3G3jqGlHtKjoO9mbYjjS1Af', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305151, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "sync call 3 completed: ChatCompletion(id='chatcmpl-ASHqZJmWUUDSrVElavHZOCuvSNQ8q', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305151, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "sync call 4 completed: ChatCompletion(id='chatcmpl-ASHqdMDEfY4pVAAom6RbmvnsBYdc1', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself? Because it was two-tired!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305155, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "sync call 5 completed: ChatCompletion(id='chatcmpl-ASHqdrGYZofAuDFGyAPq7mPsub78v', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305155, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=13, prompt_tokens=12, total_tokens=25, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "sync call 6 completed: ChatCompletion(id='chatcmpl-ASHqerqL9a6ev9YRNaSzy3mskQOmY', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305156, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "sync call 7 completed: ChatCompletion(id='chatcmpl-ASHqeQq3iPrHepIld9SSg2Pzsxc4N', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself? Because it was two tired!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305156, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=16, prompt_tokens=12, total_tokens=28, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "sync call 8 completed: ChatCompletion(id='chatcmpl-ASHqfD6yeMEwZ6StOT8Ncd00R3No1', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself? Because it was two tired!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305157, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=16, prompt_tokens=12, total_tokens=28, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "sync call 9 completed: ChatCompletion(id='chatcmpl-ASHqgl57WJILsEu4PUj59MayzYnZr', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why don't scientists trust atoms? \\n\\nBecause they make up everything!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305158, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=14, prompt_tokens=12, total_tokens=26, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "sync call 10 completed: ChatCompletion(id='chatcmpl-ASHqgoVKX9legJ43xcSkLR4kgRxTn', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Why was the math book sad?\\n\\nBecause it had too many problems.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305158, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=14, prompt_tokens=12, total_tokens=26, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "\n",
+ "Synchronous benchmark completed in 8.77 seconds\n",
+ "\n",
+ "Starting asynchronous benchmark...\n",
+ "\n",
+ "Async call 1 completed: ChatCompletion(id='chatcmpl-ASHqhKMKo8PMbdyiYpNHBQW4oU3J7', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself? Because it was two tired!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=16, prompt_tokens=12, total_tokens=28, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Async call 2 completed: ChatCompletion(id='chatcmpl-ASHqhWwUpBXRbgKstjV6qei7FzgfG', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=13, prompt_tokens=12, total_tokens=25, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Async call 3 completed: ChatCompletion(id='chatcmpl-ASHqhgTqUmUh5FW2nwTyRLagoKMQ5', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Async call 4 completed: ChatCompletion(id='chatcmpl-ASHqhYzaxguL4P2MDG1AakTiGMIyg', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Async call 5 completed: ChatCompletion(id='chatcmpl-ASHqhsdbfpywUP4KBhqPvUNOcOm1x', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Async call 6 completed: ChatCompletion(id='chatcmpl-ASHqhqdaOKxe5zjf4vpKZAFbH8x5n', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself? Because it was two tired!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=16, prompt_tokens=12, total_tokens=28, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Async call 7 completed: ChatCompletion(id='chatcmpl-ASHqhrXadr2Tf62QM4SAXjLg8iSql', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Why did the golfer bring two pairs of pants? In case he got a hole in one!', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=20, prompt_tokens=12, total_tokens=32, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Async call 8 completed: ChatCompletion(id='chatcmpl-ASHqh8mQiGew9qwCOY5UgUilx2SYL', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Async call 9 completed: ChatCompletion(id='chatcmpl-ASHqhNWUNOnSj9LLE7utDW0wz7USX', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why don't scientists trust atoms?\\n\\nBecause they make up everything!\", refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=13, prompt_tokens=12, total_tokens=25, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "Async call 10 completed: ChatCompletion(id='chatcmpl-ASHqhX5u0K2xFoFxyhebnOI9WsT0l', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='Why did the scarecrow win an award? Because he was outstanding in his field!', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305159, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=17, prompt_tokens=12, total_tokens=29, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "\n",
+ "Asynchronous benchmark completed in 0.75 seconds\n"
+ ]
+ }
+ ],
+ "source": [
+ "api_kwargs = openai_client.convert_inputs_to_api_kwargs(\n",
+ " input=prompt, model_kwargs=model_kwargs, model_type=ModelType.LLM\n",
+ ")\n",
+ "\n",
+ "# Run both benchmarks\n",
+ "print(\"Starting synchronous benchmark...\\n\")\n",
+ "benchmark_sync_call(api_kwargs)\n",
+ "\n",
+ "print(\"\\nStarting asynchronous benchmark...\\n\")\n",
+ "await benchmark_async_acall(api_kwargs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "QtbUd2K-nPaL"
+ },
+ "source": [
+ "### Adalflow - model_client() - **OpenAI model** LLM Multichat Usage (ModelType.LLM) - Additional Utils -\n",
+ "- get_first_message_content()\n",
+ "- get_all_messages_content()\n",
+ "- get_probabilities()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "metadata": {
+ "id": "ghyzD7tynO4A"
+ },
+ "outputs": [],
+ "source": [
+ "from adalflow.components.model_client import OpenAIClient\n",
+ "from adalflow.core.types import ModelType\n",
+ "from adalflow.utils import setup_env\n",
+ "from adalflow.components.model_client.openai_client import (\n",
+ " get_first_message_content,\n",
+ " get_all_messages_content,\n",
+ " get_probabilities,\n",
+ ")\n",
+ "from adalflow.core import Generator"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "metadata": {
+ "id": "QAaOFTZVn4Yx"
+ },
+ "outputs": [],
+ "source": [
+ "def check_openai_additional_utils(func, model_kwargs):\n",
+ " \"\"\"\n",
+ " This function demonstrates the usage of the OpenAI client and a custom utility function\n",
+ " for generating responses from the LLM model, based on the given query in openai client.\n",
+ "\n",
+ " Parameters:\n",
+ " - func: A function that will be used to parse the chat completion (for custom parsing).\n",
+ " - model_kwargs: The additional model parameters (e.g., temperature, max_tokens) to be used in the model.\n",
+ "\n",
+ " Returns:\n",
+ " - output: The generated response from the model based on the query.\n",
+ " \"\"\"\n",
+ "\n",
+ " # Initialize the OpenAI client with a custom chat completion parser\n",
+ " openai_client = OpenAIClient(chat_completion_parser=func)\n",
+ "\n",
+ " # Define a sample query (user question)\n",
+ " query = \"What is the capital of France?\"\n",
+ "\n",
+ " # Set the model type to LLM (Large Language Model)\n",
+ " model_type = ModelType.LLM\n",
+ "\n",
+ " # Create the prompt by formatting the user query as a conversation\n",
+ " prompt = f\"User: {query}\\n\"\n",
+ "\n",
+ " # Define any additional parameters needed for the model (e.g., the input string)\n",
+ " prompt_kwargs = {\n",
+ " \"input_str\": \"What is the capital of France?\",\n",
+ " }\n",
+ "\n",
+ " # Initialize the Generator with the OpenAI client and model parameters\n",
+ " generator = Generator(model_client=openai_client, model_kwargs=model_kwargs)\n",
+ "\n",
+ " # Execute the generator to get a response for the prompt (using the defined prompt_kwargs)\n",
+ " output = generator(prompt_kwargs=prompt_kwargs)\n",
+ "\n",
+ " # Return the generated output (response from the LLM)\n",
+ " return output"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 27,
+ "metadata": {
+ "id": "hNnBpFjkoXil"
+ },
+ "outputs": [],
+ "source": [
+ "def run_utils_functions():\n",
+ " \"\"\"\n",
+ " This function runs a series of utility functions using different model\n",
+ " configurations for generating responses. It demonstrates how to check\n",
+ " OpenAI model outputs using various utility functions.\n",
+ " \"\"\"\n",
+ "\n",
+ " # Define the model arguments for the probability-based function (with logprobs)\n",
+ " probability_model_kwargs = {\n",
+ " \"model\": \"gpt-3.5-turbo\", # Specify the model version\n",
+ " \"logprobs\": True, # Enable logprobs to get probability distributions for tokens\n",
+ " \"n\": 2, # Request 2 different completions for each query\n",
+ " }\n",
+ "\n",
+ " # Define general model arguments for most other functions\n",
+ " model_kwargs = {\n",
+ " \"model\": \"gpt-3.5-turbo\", # Specify the model version\n",
+ " \"temperature\": 0.5, # Control the randomness of responses (0 is deterministic)\n",
+ " \"max_tokens\": 100, # Set the maximum number of tokens (words) in the response\n",
+ " }\n",
+ "\n",
+ " # List of functions to run with corresponding model arguments\n",
+ " func_list = [\n",
+ " [\n",
+ " get_probabilities,\n",
+ " probability_model_kwargs,\n",
+ " ], # Function to get probabilities with specific kwargs\n",
+ " [\n",
+ " get_first_message_content,\n",
+ " model_kwargs,\n",
+ " ], # Function to get first message content\n",
+ " [\n",
+ " get_all_messages_content,\n",
+ " model_kwargs,\n",
+ " ], # Function to get all messages content in multi-chat scenarios\n",
+ " ]\n",
+ "\n",
+ " # Loop through each function and its corresponding arguments\n",
+ " for each_func in func_list:\n",
+ " # Check the function output using the specified arguments\n",
+ " result = check_openai_additional_utils(each_func[0], each_func[1])\n",
+ "\n",
+ " # Print the function and result for debugging purposes\n",
+ " print(f\"Function: {each_func[0].__name__}, Model Args: {each_func[1]}\")\n",
+ " print(f\"Result: {result}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 28,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "mU6kFzslo6qr",
+ "outputId": "29e6b00e-99d3-4189-d161-3c79806fd19d"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ChatCompletionTokenLogprob(token='The', bytes=[84, 104, 101], logprob=-7.076218e-05, top_logprobs=[]), ChatCompletionTokenLogprob(token=' capital', bytes=[32, 99, 97, 112, 105, 116, 97, 108], logprob=-1.9361265e-07, top_logprobs=[]), ChatCompletionTokenLogprob(token=' of', bytes=[32, 111, 102], logprob=-0.00020163313, top_logprobs=[]), ChatCompletionTokenLogprob(token=' France', bytes=[32, 70, 114, 97, 110, 99, 101], logprob=-1.2664457e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' is', bytes=[32, 105, 115], logprob=-6.704273e-07, top_logprobs=[]), ChatCompletionTokenLogprob(token=' Paris', bytes=[32, 80, 97, 114, 105, 115], logprob=0.0, top_logprobs=[]), ChatCompletionTokenLogprob(token='.', bytes=[46], logprob=-2.1769476e-05, top_logprobs=[])]\n",
+ "[ChatCompletionTokenLogprob(token='The', bytes=[84, 104, 101], logprob=-7.076218e-05, top_logprobs=[]), ChatCompletionTokenLogprob(token=' capital', bytes=[32, 99, 97, 112, 105, 116, 97, 108], logprob=-1.9361265e-07, top_logprobs=[]), ChatCompletionTokenLogprob(token=' of', bytes=[32, 111, 102], logprob=-0.00020163313, top_logprobs=[]), ChatCompletionTokenLogprob(token=' France', bytes=[32, 70, 114, 97, 110, 99, 101], logprob=-1.2664457e-06, top_logprobs=[]), ChatCompletionTokenLogprob(token=' is', bytes=[32, 105, 115], logprob=-6.704273e-07, top_logprobs=[]), ChatCompletionTokenLogprob(token=' Paris', bytes=[32, 80, 97, 114, 105, 115], logprob=0.0, top_logprobs=[]), ChatCompletionTokenLogprob(token='.', bytes=[46], logprob=-2.1769476e-05, top_logprobs=[])]\n",
+ "Function: get_probabilities, Model Args: {'model': 'gpt-3.5-turbo', 'logprobs': True, 'n': 2}\n",
+ "Result: GeneratorOutput(id=None, data=[[TokenLogProb(token='The', logprob=-7.076218e-05), TokenLogProb(token=' capital', logprob=-1.9361265e-07), TokenLogProb(token=' of', logprob=-0.00020163313), TokenLogProb(token=' France', logprob=-1.2664457e-06), TokenLogProb(token=' is', logprob=-6.704273e-07), TokenLogProb(token=' Paris', logprob=0.0), TokenLogProb(token='.', logprob=-2.1769476e-05)], [TokenLogProb(token='The', logprob=-7.076218e-05), TokenLogProb(token=' capital', logprob=-1.9361265e-07), TokenLogProb(token=' of', logprob=-0.00020163313), TokenLogProb(token=' France', logprob=-1.2664457e-06), TokenLogProb(token=' is', logprob=-6.704273e-07), TokenLogProb(token=' Paris', logprob=0.0), TokenLogProb(token='.', logprob=-2.1769476e-05)]], error=None, usage=CompletionUsage(completion_tokens=14, prompt_tokens=48, total_tokens=62), raw_response=[[TokenLogProb(token='The', logprob=-7.076218e-05), TokenLogProb(token=' capital', logprob=-1.9361265e-07), TokenLogProb(token=' of', logprob=-0.00020163313), TokenLogProb(token=' France', logprob=-1.2664457e-06), TokenLogProb(token=' is', logprob=-6.704273e-07), TokenLogProb(token=' Paris', logprob=0.0), TokenLogProb(token='.', logprob=-2.1769476e-05)], [TokenLogProb(token='The', logprob=-7.076218e-05), TokenLogProb(token=' capital', logprob=-1.9361265e-07), TokenLogProb(token=' of', logprob=-0.00020163313), TokenLogProb(token=' France', logprob=-1.2664457e-06), TokenLogProb(token=' is', logprob=-6.704273e-07), TokenLogProb(token=' Paris', logprob=0.0), TokenLogProb(token='.', logprob=-2.1769476e-05)]], metadata=None)\n",
+ "Function: get_first_message_content, Model Args: {'model': 'gpt-3.5-turbo', 'temperature': 0.5, 'max_tokens': 100}\n",
+ "Result: GeneratorOutput(id=None, data='The capital of France is Paris.', error=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=48, total_tokens=55), raw_response='The capital of France is Paris.', metadata=None)\n",
+ "Function: get_all_messages_content, Model Args: {'model': 'gpt-3.5-turbo', 'temperature': 0.5, 'max_tokens': 100}\n",
+ "Result: GeneratorOutput(id=None, data=['The capital of France is Paris.'], error=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=48, total_tokens=55), raw_response=['The capital of France is Paris.'], metadata=None)\n"
+ ]
+ }
+ ],
+ "source": [
+ "run_utils_functions()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "mkvEEtXLrDZm"
+ },
+ "source": [
+ "### Adalflow - model_client() - **Groq model** LLM Multichat Usage (ModelType.LLM)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "metadata": {
+ "id": "SFHuk3RErCvP"
+ },
+ "outputs": [],
+ "source": [
+ "from adalflow.components.model_client import GroqAPIClient\n",
+ "from adalflow.core.types import ModelType\n",
+ "from adalflow.utils import setup_env\n",
+ "from typing import List, Dict"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 29,
+ "metadata": {
+ "id": "cN4hsbLdrS7k"
+ },
+ "outputs": [],
+ "source": [
+ "class ChatConversation:\n",
+ " def __init__(self):\n",
+ " \"\"\"\n",
+ " Initialize a new ChatConversation object.\n",
+ " - GroqAPIClient is used to interact with the Groq model.\n",
+ " - conversation_history keeps track of the conversation between the user and assistant.\n",
+ " - model_kwargs contains the model parameters like temperature and max tokens.\n",
+ " \"\"\"\n",
+ " self.groq_client = (\n",
+ " GroqAPIClient()\n",
+ " ) # Initialize GroqAPIClient for model interaction\n",
+ " self.conversation_history: str = (\n",
+ " \"\" # Initialize conversation history as an empty string\n",
+ " )\n",
+ " self.model_kwargs = {\n",
+ " \"model\": \"llama3-8b-8192\", # Specify the model to use\n",
+ " \"temperature\": 0.5, # Set the temperature for response variability\n",
+ " \"max_tokens\": 100, # Limit the number of tokens in the response\n",
+ " }\n",
+ "\n",
+ " def add_user_message(self, message: str):\n",
+ " \"\"\"\n",
+ " Add a user message to the conversation history in the required format.\n",
+ " The message is wrapped with tags for better processing by the assistant.\n",
+ " \"\"\"\n",
+ " self.conversation_history += (\n",
+ " f\" {message} \" # Append user message to history\n",
+ " )\n",
+ "\n",
+ " def add_assistant_message(self, message: str):\n",
+ " \"\"\"\n",
+ " Add an assistant message to the conversation history in the required format.\n",
+ " The message is wrapped with tags for better processing.\n",
+ " \"\"\"\n",
+ " self.conversation_history += (\n",
+ " f\" {message} \" # Append assistant message to history\n",
+ " )\n",
+ "\n",
+ " def get_response(self) -> str:\n",
+ " \"\"\"\n",
+ " Generate a response from the assistant based on the conversation history.\n",
+ " - Converts the conversation history and model kwargs into the format required by the Groq API.\n",
+ " - Calls the API to get the response.\n",
+ " - Parses and adds the assistant's reply to the conversation history.\n",
+ " \"\"\"\n",
+ " # Prepare the request for the Groq API, converting the inputs into the correct format\n",
+ " api_kwargs = self.groq_client.convert_inputs_to_api_kwargs(\n",
+ " input=self.conversation_history, # Use the conversation history as input\n",
+ " model_kwargs=self.model_kwargs, # Include model-specific parameters\n",
+ " model_type=ModelType.LLM, # Specify the model type (Large Language Model)\n",
+ " )\n",
+ " print(f\"api_kwargs: {api_kwargs}\") # Log the API request parameters\n",
+ "\n",
+ " # Call the Groq model API to get the response\n",
+ " response = self.groq_client.call(\n",
+ " api_kwargs=api_kwargs,\n",
+ " model_type=ModelType.LLM, # Specify the model type again for clarity\n",
+ " )\n",
+ " print(\"response: \", response) # Log the API response\n",
+ "\n",
+ " # Parse the response to extract the assistant's reply\n",
+ " response_text = self.groq_client.parse_chat_completion(response)\n",
+ "\n",
+ " # Add the assistant's message to the conversation history\n",
+ " self.add_assistant_message(response_text)\n",
+ "\n",
+ " # Return the assistant's response text\n",
+ " return response_text"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "metadata": {
+ "id": "pvqsFTEsrV2M"
+ },
+ "outputs": [],
+ "source": [
+ "def check_chat_conversation():\n",
+ " \"\"\"\n",
+ " This function simulates a multi-turn conversation between a user and an assistant.\n",
+ " It demonstrates how user inputs are processed, and the assistant generates responses,\n",
+ " while maintaining the conversation history for each query.\n",
+ " \"\"\"\n",
+ " # Initialize the ChatConversation object\n",
+ " chat = ChatConversation() # This creates an instance of the ChatConversation class\n",
+ "\n",
+ " # Define a list of user questions for a multi-turn conversation\n",
+ " questions = [\n",
+ " \"What is the capital of France?\", # First user question\n",
+ " \"What is its population?\", # Second user question\n",
+ " \"Tell me about its famous landmarks\", # Third user question\n",
+ " ]\n",
+ "\n",
+ " # Loop through each question and get the assistant's response\n",
+ " for question in questions:\n",
+ " # Print the current question from the user\n",
+ " print(f\"\\nUser: {question}\")\n",
+ "\n",
+ " # Add the user's message to the conversation history\n",
+ " chat.add_user_message(question)\n",
+ "\n",
+ " # Get the assistant's response based on the conversation history\n",
+ " response = chat.get_response()\n",
+ "\n",
+ " # Print the assistant's response\n",
+ " print(f\"Assistant: {response}\")\n",
+ "\n",
+ " # After the conversation, print the full conversation history\n",
+ " print(\"\\nFull Conversation History:\")\n",
+ " print(\n",
+ " chat.conversation_history\n",
+ " ) # This will print all messages (user and assistant) in the conversation history"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "dBNWQn_arXcE",
+ "outputId": "743e5d80-8a6b-4b0f-cff2-af11f0df051d"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "User: What is the capital of France?\n",
+ "api_kwargs: {'model': 'llama3-8b-8192', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': ' What is the capital of France? '}]}\n",
+ "response: ChatCompletion(id='chatcmpl-c68fccb5-ed2b-4745-be81-acbac792387f', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The capital of France is Paris.', role='assistant', function_call=None, tool_calls=None))], created=1731305352, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_a97cfe35ae', usage=CompletionUsage(completion_tokens=8, prompt_tokens=23, total_tokens=31, completion_time=0.006666667, prompt_time=0.003034232, queue_time=0.010475318, total_time=0.009700899), x_groq={'id': 'req_01jccxebfgf5qbnaea72y9atrm'})\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=8, prompt_tokens=23, total_tokens=31), raw_response='The capital of France is Paris.', metadata=None)\n",
+ "\n",
+ "User: What is its population?\n",
+ "api_kwargs: {'model': 'llama3-8b-8192', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': \" What is the capital of France? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=8, prompt_tokens=23, total_tokens=31), raw_response='The capital of France is Paris.', metadata=None) What is its population? \"}]}\n",
+ "response: ChatCompletion(id='chatcmpl-e6ff7c1e-437c-49d9-bef7-5c6834d3e169', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The population of Paris, the capital of France, is approximately 2.1 million people within its city limits. However, the metropolitan area of Paris, which includes the surrounding suburbs, has a population of over 12.2 million people, making it one of the most populous metropolitan areas in Europe.', role='assistant', function_call=None, tool_calls=None))], created=1731305352, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_179b0f92c9', usage=CompletionUsage(completion_tokens=62, prompt_tokens=85, total_tokens=147, completion_time=0.051666667, prompt_time=0.003680399, queue_time=0.009721731, total_time=0.055347066), x_groq={'id': 'req_01jccxebk7ejstbdxzerdj643q'})\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=62, prompt_tokens=85, total_tokens=147), raw_response='The population of Paris, the capital of France, is approximately 2.1 million people within its city limits. However, the metropolitan area of Paris, which includes the surrounding suburbs, has a population of over 12.2 million people, making it one of the most populous metropolitan areas in Europe.', metadata=None)\n",
+ "\n",
+ "User: Tell me about its famous landmarks\n",
+ "api_kwargs: {'model': 'llama3-8b-8192', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': \" What is the capital of France? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=8, prompt_tokens=23, total_tokens=31), raw_response='The capital of France is Paris.', metadata=None) What is its population? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=62, prompt_tokens=85, total_tokens=147), raw_response='The population of Paris, the capital of France, is approximately 2.1 million people within its city limits. However, the metropolitan area of Paris, which includes the surrounding suburbs, has a population of over 12.2 million people, making it one of the most populous metropolitan areas in Europe.', metadata=None) Tell me about its famous landmarks \"}]}\n",
+ "response: ChatCompletion(id='chatcmpl-6d202bb8-d1fc-471e-a7cd-9dd63fe4f9b8', choices=[Choice(finish_reason='length', index=0, logprobs=None, message=ChatCompletionMessage(content=\"GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=50, prompt_tokens=74, total_tokens=124), raw_response='Paris, the capital of France, is famous for its stunning architecture, art museums, and iconic landmarks. Some of the most famous landmarks in Paris include:\\n\\n* The Eiffel Tower: Built for the 1889 World\\\\'s Fair, the Eiffel Tower is an iron lattice tower that stands 324 meters tall and is\", role='assistant', function_call=None, tool_calls=None))], created=1731305352, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_179b0f92c9', usage=CompletionUsage(completion_tokens=100, prompt_tokens=202, total_tokens=302, completion_time=0.083333333, prompt_time=0.008920166, queue_time=0.006389374, total_time=0.092253499), x_groq={'id': 'req_01jccxebrfemjb5ag1a66d6jxc'})\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=100, prompt_tokens=202, total_tokens=302), raw_response=\"GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=50, prompt_tokens=74, total_tokens=124), raw_response='Paris, the capital of France, is famous for its stunning architecture, art museums, and iconic landmarks. Some of the most famous landmarks in Paris include:\\n\\n* The Eiffel Tower: Built for the 1889 World\\\\'s Fair, the Eiffel Tower is an iron lattice tower that stands 324 meters tall and is\", metadata=None)\n",
+ "\n",
+ "Full Conversation History:\n",
+ " What is the capital of France? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=8, prompt_tokens=23, total_tokens=31), raw_response='The capital of France is Paris.', metadata=None) What is its population? GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=62, prompt_tokens=85, total_tokens=147), raw_response='The population of Paris, the capital of France, is approximately 2.1 million people within its city limits. However, the metropolitan area of Paris, which includes the surrounding suburbs, has a population of over 12.2 million people, making it one of the most populous metropolitan areas in Europe.', metadata=None) Tell me about its famous landmarks GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=100, prompt_tokens=202, total_tokens=302), raw_response=\"GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=50, prompt_tokens=74, total_tokens=124), raw_response='Paris, the capital of France, is famous for its stunning architecture, art museums, and iconic landmarks. Some of the most famous landmarks in Paris include:\\n\\n* The Eiffel Tower: Built for the 1889 World\\\\'s Fair, the Eiffel Tower is an iron lattice tower that stands 324 meters tall and is\", metadata=None) \n"
+ ]
+ }
+ ],
+ "source": [
+ "check_chat_conversation()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "EhF6taMXniS7"
+ },
+ "source": [
+ "### Adalflow - model_client() - **Groq model** LLM Multichat Usage (ModelType.LLM) - asynchronous (async())"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "metadata": {
+ "id": "6pqSxmL_s11g"
+ },
+ "outputs": [],
+ "source": [
+ "import asyncio\n",
+ "from adalflow.components.model_client import GroqAPIClient\n",
+ "from adalflow.core.types import ModelType\n",
+ "from typing import List"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {
+ "id": "40LRTSyOr884"
+ },
+ "outputs": [],
+ "source": [
+ "class ChatConversation:\n",
+ " def __init__(self):\n",
+ " # Using an asynchronous client for communication with GroqAPI\n",
+ " self.groq_client = GroqAPIClient() # Create an instance of GroqAPIClient\n",
+ " # Model configuration parameters (e.g., Llama model with 8b parameters and 8192 context length)\n",
+ " self.model_kwargs = {\n",
+ " \"model\": \"llama3-8b-8192\", # Llama model with specific size\n",
+ " \"temperature\": 0.5, # Degree of randomness in the model's responses\n",
+ " \"max_tokens\": 100, # Maximum number of tokens in the response\n",
+ " }\n",
+ "\n",
+ " async def get_response(self, message: str) -> str:\n",
+ " \"\"\"Get response from the model for a single message asynchronously\"\"\"\n",
+ "\n",
+ " # Convert the user input message to the appropriate format for the Groq API\n",
+ " api_kwargs = self.groq_client.convert_inputs_to_api_kwargs(\n",
+ " input=message, # User's input message\n",
+ " model_kwargs=self.model_kwargs, # Model parameters\n",
+ " model_type=ModelType.LLM, # Model type for large language models (LLM)\n",
+ " )\n",
+ " print(f\"api_kwargs: {api_kwargs}\") # Print the API arguments for debugging\n",
+ "\n",
+ " # Asynchronously call the Groq API with the provided API arguments\n",
+ " response = await self.groq_client.acall(\n",
+ " api_kwargs=api_kwargs, # Pass the API arguments\n",
+ " model_type=ModelType.LLM, # Specify the model type\n",
+ " )\n",
+ " print(\"response: \", response) # Print the API response for debugging\n",
+ "\n",
+ " # Parse the response to extract the assistant's reply from the API response\n",
+ " response_text = self.groq_client.parse_chat_completion(response)\n",
+ " return response_text # Return the assistant's response text"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "metadata": {
+ "id": "Y-n1ksBSsC-J"
+ },
+ "outputs": [],
+ "source": [
+ "async def check_chat_conversations():\n",
+ " # Create an instance of ChatConversation\n",
+ " chat = ChatConversation()\n",
+ "\n",
+ " # List of unrelated questions for independent async calls\n",
+ " questions = [\n",
+ " \"What is the capital of France?\",\n",
+ " \"Is dog a wild animal ?\",\n",
+ " \"Tell me about amazon forest\",\n",
+ " ]\n",
+ "\n",
+ " # Run each question as an independent asynchronous task\n",
+ " tasks = [chat.get_response(question) for question in questions]\n",
+ " # Gather all the responses concurrently\n",
+ " responses = await asyncio.gather(*tasks)\n",
+ "\n",
+ " # Display each response alongside the question\n",
+ " for question, response in zip(questions, responses):\n",
+ " print(f\"\\nUser: {question}\")\n",
+ " print(f\"Assistant: {response}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "kvqOTUknsKMI",
+ "outputId": "df47682f-db10-4439-98fc-7cd0c8486776"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "api_kwargs: {'model': 'llama3-8b-8192', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': 'What is the capital of France?'}]}\n",
+ "api_kwargs: {'model': 'llama3-8b-8192', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': 'Is dog a wild animal ?'}]}\n",
+ "api_kwargs: {'model': 'llama3-8b-8192', 'temperature': 0.5, 'max_tokens': 100, 'messages': [{'role': 'system', 'content': 'Tell me about amazon forest'}]}\n",
+ "response: ChatCompletion(id='chatcmpl-d2fb086a-5d23-409e-b060-4c00578611fe', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The capital of France is Paris.', role='assistant', function_call=None, tool_calls=None))], created=1731305379, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_6a6771ae9c', usage=CompletionUsage(completion_tokens=8, prompt_tokens=17, total_tokens=25, completion_time=0.006666667, prompt_time=0.003519913, queue_time=0.010127806000000001, total_time=0.01018658), x_groq={'id': 'req_01jccxf5szf5sas99m0xhrz2g8'})\n",
+ "response: ChatCompletion(id='chatcmpl-37af21d1-dd36-4ee4-a4f3-6cce914b25dd', choices=[Choice(finish_reason='length', index=0, logprobs=None, message=ChatCompletionMessage(content='The answer to this question is a bit nuanced.\\n\\nDomesticated dogs (Canis lupus familiaris) are not considered wild animals in the classical sense. They have been selectively bred by humans for thousands of years, which has led to significant changes in their behavior, physiology, and genetics. As a result, domesticated dogs have adapted to living alongside humans and have lost many of the characteristics that define wild animals.\\n\\nHowever, there are some feral dog populations that have descended from domesticated dogs', role='assistant', function_call=None, tool_calls=None))], created=1731305379, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_6a6771ae9c', usage=CompletionUsage(completion_tokens=100, prompt_tokens=16, total_tokens=116, completion_time=0.083333333, prompt_time=0.005273133, queue_time=0.007805676, total_time=0.088606466), x_groq={'id': 'req_01jccxf5t0epbv6dxgj28hvjpt'})\n",
+ "response: ChatCompletion(id='chatcmpl-85a584e8-5647-4112-84ec-bc770f16b091', choices=[Choice(finish_reason='length', index=0, logprobs=None, message=ChatCompletionMessage(content='The Amazon rainforest, also known as Amazonia, is the largest tropical rainforest in the world, covering an area of over 5.5 million square kilometers (2.1 million square miles) across nine countries in South America, including Brazil, Peru, Colombia, Venezuela, Ecuador, Bolivia, Guyana, Suriname, and French Guiana.\\n\\nHere are some fascinating facts about the Amazon rainforest:\\n\\n1. Biodiversity hotspots: The Amazon rainforest is home to an estimated', role='assistant', function_call=None, tool_calls=None))], created=1731305379, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_179b0f92c9', usage=CompletionUsage(completion_tokens=100, prompt_tokens=15, total_tokens=115, completion_time=0.086005899, prompt_time=0.000504017, queue_time=0.014784051999999999, total_time=0.086509916), x_groq={'id': 'req_01jccxf5ste18rkg69qqmfrjnk'})\n",
+ "\n",
+ "User: What is the capital of France?\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=8, prompt_tokens=17, total_tokens=25), raw_response='The capital of France is Paris.', metadata=None)\n",
+ "\n",
+ "User: Is dog a wild animal ?\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=100, prompt_tokens=16, total_tokens=116), raw_response='The answer to this question is a bit nuanced.\\n\\nDomesticated dogs (Canis lupus familiaris) are not considered wild animals in the classical sense. They have been selectively bred by humans for thousands of years, which has led to significant changes in their behavior, physiology, and genetics. As a result, domesticated dogs have adapted to living alongside humans and have lost many of the characteristics that define wild animals.\\n\\nHowever, there are some feral dog populations that have descended from domesticated dogs', metadata=None)\n",
+ "\n",
+ "User: Tell me about amazon forest\n",
+ "Assistant: GeneratorOutput(id=None, data=None, error=None, usage=CompletionUsage(completion_tokens=100, prompt_tokens=15, total_tokens=115), raw_response='The Amazon rainforest, also known as Amazonia, is the largest tropical rainforest in the world, covering an area of over 5.5 million square kilometers (2.1 million square miles) across nine countries in South America, including Brazil, Peru, Colombia, Venezuela, Ecuador, Bolivia, Guyana, Suriname, and French Guiana.\\n\\nHere are some fascinating facts about the Amazon rainforest:\\n\\n1. Biodiversity hotspots: The Amazon rainforest is home to an estimated', metadata=None)\n"
+ ]
+ }
+ ],
+ "source": [
+ "await check_chat_conversations()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "wHO2_4L7sOnL"
+ },
+ "source": [
+ "### Adalflow - model_client() - **Groq model** LLM Multichat Usage (ModelType.LLM) - Benchmark sync() vs async()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "metadata": {
+ "id": "4yGh8iy8sON1"
+ },
+ "outputs": [],
+ "source": [
+ "import asyncio\n",
+ "import time\n",
+ "from adalflow.components.model_client import (\n",
+ " GroqAPIClient,\n",
+ ") # Assuming GroqAPI with .call() and .acall() is available\n",
+ "from adalflow.core.types import ModelType"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "metadata": {
+ "id": "DkMkbt7DtDQv"
+ },
+ "outputs": [],
+ "source": [
+ "# Initialize the Groq client\n",
+ "groq_client = GroqAPIClient()\n",
+ "\n",
+ "# Sample prompt for testing\n",
+ "prompt = \"Tell me a joke.\"\n",
+ "\n",
+ "model_kwargs = {\"model\": \"llama3-8b-8192\", \"temperature\": 0.5, \"max_tokens\": 100}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "metadata": {
+ "id": "IUEJ6KM7tDQw"
+ },
+ "outputs": [],
+ "source": [
+ "# Synchronous function for benchmarking .call()\n",
+ "def benchmark_sync_call(api_kwargs, runs=10):\n",
+ " # List to store responses from each synchronous call\n",
+ " responses = []\n",
+ "\n",
+ " # Record the start time for benchmarking\n",
+ " start_time = time.time()\n",
+ "\n",
+ " # Perform synchronous API calls in a loop\n",
+ " responses = [\n",
+ " groq_client.call( # Calling the API synchronously\n",
+ " api_kwargs=api_kwargs, # Passing the API arguments\n",
+ " model_type=ModelType.LLM, # Defining the model type\n",
+ " )\n",
+ " for _ in range(runs) # Repeat the call 'runs' times\n",
+ " ]\n",
+ "\n",
+ " # Record the end time after all calls are completed\n",
+ " end_time = time.time()\n",
+ "\n",
+ " # Print out the response from each synchronous call\n",
+ " for i, response in enumerate(responses):\n",
+ " print(f\"sync call {i + 1} completed: {response}\")\n",
+ "\n",
+ " # Print the total time taken for the synchronous benchmark\n",
+ " print(f\"\\nSynchronous benchmark completed in {end_time - start_time:.2f} seconds\")\n",
+ "\n",
+ "\n",
+ "# Asynchronous function for benchmarking .acall()\n",
+ "async def benchmark_async_acall(api_kwargs, runs=10):\n",
+ " # Record the start time for benchmarking\n",
+ " start_time = time.time()\n",
+ "\n",
+ " # Create a list of tasks for asynchronous API calls\n",
+ " tasks = [\n",
+ " groq_client.acall( # Calling the API asynchronously\n",
+ " api_kwargs=api_kwargs, # Passing the API arguments\n",
+ " model_type=ModelType.LLM, # Defining the model type\n",
+ " )\n",
+ " for _ in range(runs) # Repeat the call 'runs' times\n",
+ " ]\n",
+ "\n",
+ " # Await the completion of all tasks concurrently\n",
+ " responses = await asyncio.gather(\n",
+ " *tasks\n",
+ " ) # Gather all the responses from asynchronous calls\n",
+ "\n",
+ " # Record the end time after all asynchronous calls are completed\n",
+ " end_time = time.time()\n",
+ "\n",
+ " # Print out the response from each asynchronous call\n",
+ " for i, response in enumerate(responses):\n",
+ " print(f\"Async call {i + 1} completed: {response}\")\n",
+ "\n",
+ " # Print the total time taken for the asynchronous benchmark\n",
+ " print(f\"\\nAsynchronous benchmark completed in {end_time - start_time:.2f} seconds\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "idOjFAo8tDQw",
+ "outputId": "cb790957-8960-4e58-a7de-39dfd0dd3504"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Starting synchronous benchmark...\n",
+ "\n",
+ "sync call 1 completed: ChatCompletion(id='chatcmpl-a6bc4231-b712-4014-a87d-0e9368f5d8f4', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305394, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_179b0f92c9', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.000141559, queue_time=0.01454033, total_time=0.015141559), x_groq={'id': 'req_01jccxfkx7epcsynkkex05e6v6'})\n",
+ "sync call 2 completed: ChatCompletion(id='chatcmpl-00586f1c-f6fb-4650-a549-ff24d462c6bf', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305394, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_179b0f92c9', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.000141569, queue_time=0.013657111000000001, total_time=0.015141569), x_groq={'id': 'req_01jccxfm15fs0vyr85remr47wm'})\n",
+ "sync call 3 completed: ChatCompletion(id='chatcmpl-a5fe8868-ca01-445e-89ba-d5791da524fa', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305394, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_179b0f92c9', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.000138579, queue_time=0.014364931000000001, total_time=0.015138579), x_groq={'id': 'req_01jccxfm4ye4z89hff0f8d0yas'})\n",
+ "sync call 4 completed: ChatCompletion(id='chatcmpl-7ae04f5f-79c0-49b4-9f08-decc05393809', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305394, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_a97cfe35ae', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.002182427, queue_time=0.011133002, total_time=0.017182427), x_groq={'id': 'req_01jccxfm8wf4pacws56qqkbcrg'})\n",
+ "sync call 5 completed: ChatCompletion(id='chatcmpl-4023328d-0e1b-4127-b124-06b1d2ec4c86', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305394, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_179b0f92c9', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.000136529, queue_time=0.013371651, total_time=0.015136529), x_groq={'id': 'req_01jccxfmcpfs0twwzvvvrf8g5s'})\n",
+ "sync call 6 completed: ChatCompletion(id='chatcmpl-9713209a-bbad-491b-8eec-7f9ba3faf0c0', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305394, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_6a6771ae9c', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.002243946, queue_time=0.011401844, total_time=0.017243946), x_groq={'id': 'req_01jccxfmgcf85vtdzmt7mwfk8x'})\n",
+ "sync call 7 completed: ChatCompletion(id='chatcmpl-1bf326d8-68f8-4117-801e-4146d0085114', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Here's one:\\n\\nWhy couldn't the bicycle stand up by itself?\\n\\n(Wait for it...)\\n\\nBecause it was two-tired!\\n\\nHope that made you laugh!\", role='assistant', function_call=None, tool_calls=None))], created=1731305394, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_6a6771ae9c', usage=CompletionUsage(completion_tokens=33, prompt_tokens=15, total_tokens=48, completion_time=0.0275, prompt_time=0.002932829, queue_time=0.011706590000000001, total_time=0.030432829), x_groq={'id': 'req_01jccxfmm7e4ztjp9fn0kkbjx0'})\n",
+ "sync call 8 completed: ChatCompletion(id='chatcmpl-1d5ecb3b-c923-4c36-a89b-ad086ee677e6', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305394, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_a97cfe35ae', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.002190067, queue_time=0.011227092999999999, total_time=0.017190067), x_groq={'id': 'req_01jccxfmrgfdpbjbb07248341m'})\n",
+ "sync call 9 completed: ChatCompletion(id='chatcmpl-d5f1ff90-9100-472b-aad0-2e18e67a1871', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_179b0f92c9', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.000136839, queue_time=0.014356821, total_time=0.015136839), x_groq={'id': 'req_01jccxfmw9f4p9qvktvtp0g557'})\n",
+ "sync call 10 completed: ChatCompletion(id='chatcmpl-c19f72d7-a2c4-48e3-848f-bef6a514a842', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_6a6771ae9c', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.002243396, queue_time=0.011192144, total_time=0.017243396), x_groq={'id': 'req_01jccxfn06f85td634z5vyhzrt'})\n",
+ "\n",
+ "Synchronous benchmark completed in 1.42 seconds\n",
+ "\n",
+ "Starting asynchronous benchmark...\n",
+ "\n",
+ "Async call 1 completed: ChatCompletion(id='chatcmpl-06c89067-a76f-484a-87ba-159f6b36564a', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_6a6771ae9c', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.002921139, queue_time=0.015738821, total_time=0.017921139), x_groq={'id': 'req_01jccxfn9cejvbpr29s0k0nkhr'})\n",
+ "Async call 2 completed: ChatCompletion(id='chatcmpl-2a5e8ccf-8058-4a77-a60a-5f7b86c71fb9', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_a97cfe35ae', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.004858751, queue_time=0.010167037, total_time=0.019858751), x_groq={'id': 'req_01jccxfn9dfrxvvexkv623ezng'})\n",
+ "Async call 3 completed: ChatCompletion(id='chatcmpl-54d3f2e8-5603-4d2f-8396-b72a2716da2a', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_6a6771ae9c', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.002244876, queue_time=0.012254712000000001, total_time=0.017244876), x_groq={'id': 'req_01jccxfn8ye8tasgfq5hzjrzyd'})\n",
+ "Async call 4 completed: ChatCompletion(id='chatcmpl-a4fd586f-1ec8-423b-af69-b0300b940d11', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_a97cfe35ae', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.002188018, queue_time=0.01165656, total_time=0.017188018), x_groq={'id': 'req_01jccxfn96e0b9swhyd96cs7mg'})\n",
+ "Async call 5 completed: ChatCompletion(id='chatcmpl-4d63c669-7242-4f31-be2d-b31eb0870245', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Here's one:\\n\\nWhy couldn't the bicycle stand up by itself?\\n\\n(wait for it...)\\n\\nBecause it was two-tired!\\n\\nHope that made you laugh!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_a97cfe35ae', usage=CompletionUsage(completion_tokens=32, prompt_tokens=15, total_tokens=47, completion_time=0.026666667, prompt_time=0.002829583, queue_time=0.011314187, total_time=0.02949625), x_groq={'id': 'req_01jccxfn9ee4zrdjw0n9jktjkt'})\n",
+ "Async call 6 completed: ChatCompletion(id='chatcmpl-5c30e90c-135b-49dc-8f8e-966fdb391dc7', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_179b0f92c9', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.000155179, queue_time=0.014245601, total_time=0.015155179), x_groq={'id': 'req_01jccxfna4f5vv7b66gyk9zwam'})\n",
+ "Async call 7 completed: ChatCompletion(id='chatcmpl-eda5d2dc-82e3-40ca-a544-c770726bc8d0', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_6a6771ae9c', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.004997677, queue_time=0.008474321, total_time=0.019997677), x_groq={'id': 'req_01jccxfn9ff8ar78qnbtnqryec'})\n",
+ "Async call 8 completed: ChatCompletion(id='chatcmpl-1132c5ca-1ba2-49ae-94ee-359c3049d4d1', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_a97cfe35ae', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.014361024, queue_time=0.0029951239999999983, total_time=0.029361024), x_groq={'id': 'req_01jccxfndfe1b8hre70xfj9cde'})\n",
+ "Async call 9 completed: ChatCompletion(id='chatcmpl-44ea61fb-a1a8-4b70-a5b0-96d793041a48', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Why couldn't the bicycle stand up by itself?\\n\\nBecause it was two-tired!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_a97cfe35ae', usage=CompletionUsage(completion_tokens=18, prompt_tokens=15, total_tokens=33, completion_time=0.015, prompt_time=0.004858171, queue_time=0.010396207000000001, total_time=0.019858171), x_groq={'id': 'req_01jccxfn9gfh49k150pw1gsysz'})\n",
+ "Async call 10 completed: ChatCompletion(id='chatcmpl-2ac98624-8d3f-41f8-abef-5f8b5aebf7ab', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content=\"Here's one:\\n\\nWhy couldn't the bicycle stand up by itself?\\n\\n(Wait for it...)\\n\\nBecause it was two-tired!\\n\\nHope that made you laugh!\", role='assistant', function_call=None, tool_calls=None))], created=1731305395, model='llama3-8b-8192', object='chat.completion', system_fingerprint='fp_6a6771ae9c', usage=CompletionUsage(completion_tokens=33, prompt_tokens=15, total_tokens=48, completion_time=0.0275, prompt_time=0.002554123, queue_time=0.010962996, total_time=0.030054123), x_groq={'id': 'req_01jccxfn9de0b97f7wj7kvsznw'})\n",
+ "\n",
+ "Asynchronous benchmark completed in 0.44 seconds\n"
+ ]
+ }
+ ],
+ "source": [
+ "api_kwargs = groq_client.convert_inputs_to_api_kwargs(\n",
+ " input=prompt, model_kwargs=model_kwargs, model_type=ModelType.LLM\n",
+ ")\n",
+ "\n",
+ "# Run both benchmarks\n",
+ "print(\"Starting synchronous benchmark...\\n\")\n",
+ "benchmark_sync_call(api_kwargs)\n",
+ "\n",
+ "print(\"\\nStarting asynchronous benchmark...\\n\")\n",
+ "await benchmark_async_acall(api_kwargs)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "bcfOfW5wteYr"
+ },
+ "source": [
+ "### Adalflow - model_client() - **Custom Model** client building (ModelType.LLM) and (ModelType.EMBEDDER) - Synchronous\n",
+ "Note: I am using openai api as a example to build custom model client in adalflow. Even though its already there in adalflow repo below code will definitly be a starter code whom ever wants to build a custom model client"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "metadata": {
+ "id": "kOeBbL31tmLz"
+ },
+ "outputs": [],
+ "source": [
+ "# Building simple custom third party model client and using it\n",
+ "# I have modified convert_inputs_to_api_kwargs() to make sure it follows the prompt of openai and i have used appropiate\n",
+ "# openai api call in __call__()\n",
+ "\n",
+ "import openai\n",
+ "from adalflow.core.model_client import ModelClient\n",
+ "from adalflow.core.types import ModelType, GeneratorOutput, EmbedderOutput\n",
+ "from openai.types import (\n",
+ " CreateEmbeddingResponse,\n",
+ ")\n",
+ "from adalflow.components.model_client.utils import parse_embedding_response"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "metadata": {
+ "id": "7GhkATzXuBdQ"
+ },
+ "outputs": [],
+ "source": [
+ "class SimpleCustomModelClient(ModelClient):\n",
+ " # Initialize the custom model client\n",
+ " def __init__(self):\n",
+ " # Call the parent class's initializer\n",
+ " super().__init__()\n",
+ " pass # Placeholder for any initialization logic if needed in the future\n",
+ "\n",
+ " # Method to convert input into API parameters for different model types (LLM or Embedder)\n",
+ " def convert_inputs_to_api_kwargs(\n",
+ " self, input=None, model_kwargs={}, model_type=ModelType.UNDEFINED\n",
+ " ):\n",
+ " \"\"\"\n",
+ " Convert the inputs into API arguments based on the model type.\n",
+ "\n",
+ " Args:\n",
+ " input (str): The input text to be processed.\n",
+ " model_kwargs (dict): Additional model parameters like temperature, max_tokens, etc.\n",
+ " model_type (ModelType): The type of model to use (LLM or Embedder).\n",
+ "\n",
+ " Returns:\n",
+ " dict: API arguments formatted for the specified model type.\n",
+ " \"\"\"\n",
+ " if (\n",
+ " model_type == ModelType.LLM\n",
+ " ): # If the model type is a large language model (LLM)\n",
+ " return {\n",
+ " \"model\": model_kwargs[\n",
+ " \"model\"\n",
+ " ], # Set the model to use (e.g., GPT-3, GPT-4)\n",
+ " \"messages\": input, # Provide the input as the message\n",
+ " \"temperature\": model_kwargs[\n",
+ " \"temperature\"\n",
+ " ], # Set the temperature (creativity of the response)\n",
+ " \"max_tokens\": model_kwargs[\n",
+ " \"max_tokens\"\n",
+ " ], # Max tokens to generate in the response\n",
+ " }\n",
+ " elif model_type == ModelType.EMBEDDER: # If the model type is an embedder\n",
+ " return {\n",
+ " \"model\": model_kwargs[\"model\"], # Model name for embedding\n",
+ " \"input\": [input], # Provide the input in a list format for embedding\n",
+ " }\n",
+ " else:\n",
+ " # Raise an error if the model type is unsupported\n",
+ " raise ValueError(f\"model_type {model_type} is not supported\")\n",
+ "\n",
+ " # Method to make the actual API call to OpenAI for either completions (LLM) or embeddings\n",
+ " def call(self, api_kwargs={}, model_type=ModelType.UNDEFINED):\n",
+ " \"\"\"\n",
+ " Call the appropriate OpenAI API method based on the model type (LLM or Embedder).\n",
+ "\n",
+ " Args:\n",
+ " api_kwargs (dict): Arguments to be passed to the API call.\n",
+ " model_type (ModelType): The type of model (LLM or Embedder).\n",
+ "\n",
+ " Returns:\n",
+ " Response: The API response from OpenAI.\n",
+ " \"\"\"\n",
+ " if model_type == ModelType.LLM: # If the model type is LLM (e.g., GPT-3, GPT-4)\n",
+ " return openai.chat.completions.create(\n",
+ " **api_kwargs\n",
+ " ) # Call the chat API for completion\n",
+ " elif model_type == ModelType.EMBEDDER: # If the model type is Embedder\n",
+ " return openai.embeddings.create(**api_kwargs) # Call the embedding API\n",
+ " else:\n",
+ " # Raise an error if an invalid model type is passed\n",
+ " raise ValueError(f\"Unsupported model type: {model_type}\")\n",
+ "\n",
+ " # Method to parse the response from a chat completion API call\n",
+ " def parse_chat_completion(self, completion):\n",
+ " \"\"\"\n",
+ " Parse the response from a chat completion API call into a custom output format.\n",
+ "\n",
+ " Args:\n",
+ " completion: The completion response from the OpenAI API.\n",
+ "\n",
+ " Returns:\n",
+ " GeneratorOutput: A custom data structure containing the parsed response.\n",
+ " \"\"\"\n",
+ " # Note: GeneratorOutput is a adalflow dataclass that contains the parsed completion data\n",
+ " return GeneratorOutput(\n",
+ " data=completion, # Store the raw completion data\n",
+ " error=None, # No error in this case\n",
+ " raw_response=str(completion), # Store the raw response as a string\n",
+ " )\n",
+ "\n",
+ " # Method to parse the response from an embedding API call\n",
+ " def parse_embedding_response(\n",
+ " self, response: CreateEmbeddingResponse\n",
+ " ) -> EmbedderOutput:\n",
+ " \"\"\"\n",
+ " Parse the response from an embedding API call into a custom output format.\n",
+ "\n",
+ " Args:\n",
+ " response (CreateEmbeddingResponse): The response from the embedding API.\n",
+ "\n",
+ " Returns:\n",
+ " EmbedderOutput: A custom data structure containing the parsed embedding response.\n",
+ " \"\"\"\n",
+ " try:\n",
+ " # Attempt to parse the embedding response using a helper function\n",
+ " return parse_embedding_response(response)\n",
+ " except Exception as e:\n",
+ " # If parsing fails, return an error message with the raw response\n",
+ " return EmbedderOutput(data=[], error=str(e), raw_response=response)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "metadata": {
+ "id": "W0p7jVaeuE66"
+ },
+ "outputs": [],
+ "source": [
+ "def build_custom_model_client():\n",
+ " # Instantiate the custom model client (SimpleCustomModelClient)\n",
+ " custom_client = SimpleCustomModelClient()\n",
+ "\n",
+ " # Define the query for the model to process\n",
+ " query = \"What is the capital of France?\"\n",
+ "\n",
+ " # Set the model type for a Large Language Model (LLM)\n",
+ " model_type = ModelType.LLM\n",
+ "\n",
+ " # Prepare the message prompt as expected by the OpenAI chat API.\n",
+ " # This format is suitable for GPT-like models (e.g., gpt-3.5-turbo).\n",
+ " message_prompt = [\n",
+ " {\n",
+ " \"role\": \"user\", # Define the user role in the conversation\n",
+ " \"content\": [\n",
+ " {\n",
+ " \"type\": \"text\", # Specify that the input is a text type\n",
+ " \"text\": query, # The actual query to be processed by the model\n",
+ " }\n",
+ " ],\n",
+ " }\n",
+ " ]\n",
+ "\n",
+ " # Print message indicating the usage of the LLM model type\n",
+ " print(\"ModelType LLM\")\n",
+ "\n",
+ " # Define additional model parameters like model name, temperature, and max tokens for LLM\n",
+ " model_kwargs = {\"model\": \"gpt-3.5-turbo\", \"temperature\": 0.5, \"max_tokens\": 100}\n",
+ "\n",
+ " # Convert the input message and model kwargs into the required API parameters\n",
+ " api_kwargs = custom_client.convert_inputs_to_api_kwargs(\n",
+ " input=message_prompt, model_kwargs=model_kwargs, model_type=model_type\n",
+ " )\n",
+ "\n",
+ " # Print the API arguments that will be passed to the call method\n",
+ " print(f\"api_kwargs: {api_kwargs}\")\n",
+ "\n",
+ " # Call the LLM model using the prepared API arguments\n",
+ " result = custom_client.call(api_kwargs, ModelType.LLM)\n",
+ "\n",
+ " # Print the result of the LLM model call (response from OpenAI)\n",
+ " print(result)\n",
+ "\n",
+ " # Parse the chat completion response and output a more structured result\n",
+ " response_text = custom_client.parse_chat_completion(result)\n",
+ "\n",
+ " # Print the structured response from the chat completion\n",
+ " print(f\"response_text: {response_text}\")\n",
+ "\n",
+ " # Switch to using the Embedder model type\n",
+ " print(\"ModelType EMBEDDER\")\n",
+ "\n",
+ " # Define model-specific parameters for the embedding model\n",
+ " model_kwargs = {\n",
+ " \"model\": \"text-embedding-3-small\",\n",
+ " \"dimensions\": 8,\n",
+ " \"encoding_format\": \"float\",\n",
+ " }\n",
+ "\n",
+ " # Convert the input query for the embedder model\n",
+ " api_kwargs = custom_client.convert_inputs_to_api_kwargs(\n",
+ " input=query, model_kwargs=model_kwargs, model_type=ModelType.EMBEDDER\n",
+ " )\n",
+ "\n",
+ " # Print the API arguments that will be passed to the embedder model\n",
+ " print(f\"embedder api_kwargs: {api_kwargs}\")\n",
+ "\n",
+ " # Call the Embedder model using the prepared API arguments\n",
+ " result = custom_client.call(api_kwargs, ModelType.EMBEDDER)\n",
+ "\n",
+ " # Print the result of the Embedder model call (embedding response)\n",
+ " print(result)\n",
+ "\n",
+ " # Parse the embedding response and output a more structured result\n",
+ " response_text = custom_client.parse_embedding_response(result)\n",
+ "\n",
+ " # Print the structured response from the embedding model\n",
+ " print(f\"response_text: {response_text}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/"
+ },
+ "id": "RkVCvbWruKs4",
+ "outputId": "ffa02fa3-7570-4bf1-9880-0288d358f815"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "ModelType LLM\n",
+ "api_kwargs: {'model': 'gpt-3.5-turbo', 'messages': [{'role': 'user', 'content': [{'type': 'text', 'text': 'What is the capital of France?'}]}], 'temperature': 0.5, 'max_tokens': 100}\n",
+ "ChatCompletion(id='chatcmpl-ASHw0PEDqdMlIAIZwr8w2t4L3C9u2', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The capital of France is Paris.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305488, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=14, total_tokens=21, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\n",
+ "response_text: GeneratorOutput(id=None, data=ChatCompletion(id='chatcmpl-ASHw0PEDqdMlIAIZwr8w2t4L3C9u2', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The capital of France is Paris.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305488, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=14, total_tokens=21, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0))), error=None, usage=None, raw_response=\"ChatCompletion(id='chatcmpl-ASHw0PEDqdMlIAIZwr8w2t4L3C9u2', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='The capital of France is Paris.', refusal=None, role='assistant', audio=None, function_call=None, tool_calls=None))], created=1731305488, model='gpt-3.5-turbo-0125', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=7, prompt_tokens=14, total_tokens=21, completion_tokens_details=CompletionTokensDetails(audio_tokens=0, reasoning_tokens=0, accepted_prediction_tokens=0, rejected_prediction_tokens=0), prompt_tokens_details=PromptTokensDetails(audio_tokens=0, cached_tokens=0)))\", metadata=None)\n",
+ "ModelType EMBEDDER\n",
+ "embedder api_kwargs: {'model': 'text-embedding-3-small', 'input': ['What is the capital of France?']}\n",
+ "CreateEmbeddingResponse(data=[Embedding(embedding=[0.04169800877571106, 0.0158005952835083, 0.028160491958260536, 0.024351144209504128, -0.023142803460359573, -0.002739247865974903, -0.014223608188331127, 0.01433624979108572, 0.010834109038114548, -0.010199218057096004, 0.006942841224372387, -0.024043940007686615, -0.06164587661623955, -0.01508378330618143, -0.014233848080039024, 0.023163283243775368, -0.006625395733863115, 0.019446099177002907, 0.07241854071617126, -0.024392105638980865, 0.003002932295203209, -0.010091695934534073, -0.04100167378783226, 0.011970768682658672, 0.06209644302725792, 0.0070964437909424305, -0.04554831609129906, -0.007347328122705221, 0.00364038348197937, 0.03942468762397766, 0.04214857518672943, -0.0251498781144619, -0.0019558740314096212, 0.04309067130088806, -0.024535467848181725, -0.03995717689394951, -0.03764289617538452, -0.039342764765024185, 0.021320052444934845, 0.029676036909222603, -0.003136054612696171, -0.01302550733089447, 0.00684555945917964, 0.013230310752987862, -0.027320796623826027, -0.030679574236273766, -0.009221280924975872, -0.039936695247888565, -0.03360826522111893, 0.02748463861644268, 0.03883075714111328, 0.004044870380312204, 0.03252280876040459, 0.03262520954012871, -0.016814373433589935, 0.004218953661620617, 0.024678830057382584, 0.009641128592193127, 0.04665425419807434, 0.015544591471552849, 0.036127351224422455, -0.010265778750181198, 0.026358218863606453, 0.0043085552752017975, 0.0005580897559411824, 0.0354514978826046, -0.0039322287775576115, 0.03788866102695465, 0.05906534940004349, 0.04612176492810249, -0.011059393174946308, 0.016312604770064354, -0.00918543990701437, 0.004631120711565018, -0.006594675127416849, -0.018145596608519554, -0.003968069329857826, -0.0059649040922522545, -0.03207223862409592, -0.031867437064647675, -0.036168310791254044, 0.0010604985291138291, -0.01807391457259655, -0.008606869727373123, 0.008248464204370975, -0.044647179543972015, 0.026767827570438385, 0.03383354842662811, -0.022917520254850388, -0.04767827317118645, 0.0033997392747551203, 0.011141314171254635, -0.025928132236003876, 0.027115993201732635, -0.010388661175966263, 0.01921057514846325, 0.03549245744943619, 0.0011750605190172791, -0.06819958984851837, 0.000605450535658747, 0.019323216751217842, -0.023982498794794083, -0.031109662726521492, 0.026972630992531776, 0.02560044638812542, 0.040182460099458694, 0.015862036496400833, -0.004974166862666607, 0.003153975121676922, -0.03852355107665062, -0.025661887601017952, 0.011212995275855064, 0.0033536585979163647, 0.02431018464267254, -0.04812883958220482, -0.029102588072419167, -0.023859616369009018, -0.02416682057082653, 0.02902066521346569, -0.02574380859732628, 0.033157698810100555, 0.052511636167764664, -0.04718674346804619, 0.010337459854781628, 0.010752187110483646, -0.013424874283373356, -0.0027725284453481436, -0.002777648391202092, 0.03491900861263275, -0.03870787471532822, 0.01074194721877575, -0.02752560004591942, 0.024535467848181725, 0.033055298030376434, 0.031232545152306557, 0.01897505111992359, 0.026952149346470833, -0.016937255859375, -0.018544962629675865, 0.010782907716929913, 0.007931018248200417, 0.013189350254833698, 0.021668218076229095, 0.003315257839858532, -0.02668590471148491, -0.01458201464265585, -0.04143176227807999, 0.040530625730752945, 0.01154068112373352, -0.042312417179346085, 0.040428224951028824, -0.02312232367694378, -0.0038989479653537273, 0.01604636013507843, -0.0056525785475969315, -0.036721281707286835, -0.008970396593213081, 0.019824985414743423, 0.0059649040922522545, 0.04341835901141167, -0.03878979757428169, 0.04927574098110199, -0.03719232976436615, -0.006026345305144787, 0.012257494032382965, 0.03287097439169884, -0.03643455356359482, -0.02140197344124317, 0.00695820152759552, -0.005381213966757059, -0.02461738884449005, 0.004137032199651003, 0.054354868829250336, 0.021156208589673042, 0.03006516396999359, -0.024392105638980865, -0.04943958297371864, 0.0406944714486599, 0.0003852867230307311, -0.01936417818069458, -0.028344813734292984, -0.02803760953247547, 0.011735244654119015, 0.013045987114310265, 0.061277229338884354, 0.029532674700021744, -0.011284676380455494, -0.025477563962340355, -0.014428411610424519, 0.012564699165523052, 0.03582014515995979, -0.02020387165248394, 0.06160491332411766, -0.008207502774894238, -0.043950848281383514, 0.0198147464543581, 0.03352634608745575, 0.01265686098486185, 0.012267733924090862, -0.007997579872608185, -0.020490597933530807, 0.02193446271121502, -0.00551945623010397, 0.014377210289239883, -0.02158629707992077, 0.030536212027072906, -0.011591882444918156, -0.013496555387973785, -0.01398808415979147, -0.010286259464919567, 0.0009939373703673482, -0.003008052473887801, -0.02521131932735443, 0.00474120257422328, 0.0012096210848540068, 0.025026995688676834, -9.424164454685524e-05, 0.01112083438783884, 0.004208713304251432, 0.024494506418704987, 0.022815117612481117, 0.015216905623674393, 0.003947588615119457, -0.01148948073387146, -0.05591137334704399, 0.047473467886447906, 0.06185067817568779, 0.011110593564808369, 0.007116924040019512, -0.0036890243645757437, 0.021012846380472183, -0.03192887827754021, 0.0009395363740622997, -0.011223236098885536, -0.03283001109957695, 0.017705269157886505, 0.014141686260700226, 0.02832433395087719, -0.03524669632315636, 0.022815117612481117, -0.010803388431668282, 0.021135728806257248, 0.02863154001533985, -0.006625395733863115, -0.012298454530537128, -0.005204570945352316, 0.027464158833026886, 0.036270711570978165, 0.005877862684428692, 0.04337739571928978, 0.057426922023296356, -0.0076238131150603294, -0.0018624324584379792, -0.005703779403120279, -0.019743064418435097, 0.059556879103183746, -0.024494506418704987, -0.02818097174167633, -0.0359635055065155, -0.018145596608519554, 0.006650995928794146, 0.004362315870821476, -0.002106916857883334, -0.014326009899377823, 0.020869484171271324, 0.00018768326845020056, -0.01986594684422016, -0.024678830057382584, -0.014684416353702545, -0.008709271438419819, 0.009738409891724586, -0.003530301619321108, -0.0166812501847744, 0.009892012923955917, -0.019005771726369858, 0.015872277319431305, -0.01856544427573681, -0.00817166268825531, -0.021258611232042313, 0.0370284840464592, -0.0268907081335783, 0.04481102153658867, -0.012892385013401508, 0.0419028103351593, -0.051774341613054276, 0.0009952173568308353, 0.04423757269978523, 0.021258611232042313, -0.012605659663677216, 0.03065909445285797, 0.021033326163887978, 0.01985570602118969, -0.019435858353972435, -0.002831409452483058, -0.0029978123493492603, -0.04427853226661682, -0.003950148820877075, 0.0011648202780634165, 0.026870228350162506, -0.001858592382632196, -0.022753676399588585, 0.022466951981186867, -0.005186650436371565, 0.010035375133156776, -0.04517966881394386, 0.06574194878339767, -0.0051431297324597836, 0.047063861042261124, 0.05214298889040947, 0.00638987123966217, -0.039240363985300064, -0.03143734857439995, 0.024637870490550995, -0.03422267735004425, -0.010224818252027035, 0.045589275658130646, -0.013240550644695759, -0.0004217673558741808, 0.029635077342391014, -0.00687115965411067, 0.025129398331046104, 0.00804365985095501, 0.02451498806476593, -0.008376466110348701, -0.0023782814387232065, 0.01683485321700573, 0.012370135635137558, 0.02650158293545246, -0.03506237268447876, -0.02381865493953228, -0.0005033687921240926, 0.011407558806240559, 0.004651600960642099, -0.00990737322717905, -0.026112455874681473, -0.02099236659705639, 0.004933205898851156, 0.03901508077979088, 0.0013401834294199944, -0.014151927083730698, -0.0333625003695488, 0.04640848934650421, 0.009205920621752739, 0.03094581887125969, 0.003264056984335184, -0.026071494445204735, 0.018852168694138527, 0.02465835027396679, 0.012237013317644596, 0.0034663004335016012, -0.027402717620134354, -0.007209085859358311, -0.009190560318529606, -0.008176782168447971, -0.027771364897489548, 0.002693166956305504, 0.0702066645026207, -0.022405510768294334, -0.06353006511926651, 0.03995717689394951, -0.04046918451786041, -0.0492347776889801, -0.025784770026803017, -0.04837460443377495, -0.03381307050585747, -0.0039271083660423756, 0.013353193178772926, 0.004339275881648064, -0.020275553688406944, 0.06266989558935165, -0.03268665075302124, 0.0050637684762477875, -0.004106311593204737, -0.02090020477771759, 0.0425991415977478, -0.030085645616054535, 0.04235338047146797, 0.02119717001914978, 0.013793520629405975, -0.01633308455348015, -0.028590578585863113, -0.01782815158367157, 0.015472909435629845, -0.026112455874681473, 0.06140011176466942, 0.014418171718716621, 0.0824129581451416, -0.04210761561989784, -0.009810090996325016, 0.03045429103076458, -0.005196890793740749, 0.010414261370897293, 0.03174455463886261, 0.03784770146012306, -0.07372928410768509, 0.00563209829851985, 0.01000465452671051, 0.018135355785489082, -0.007413889281451702, -0.038892198354005814, 0.021750139072537422, -0.0187702476978302, -0.03147830814123154, -0.049644384533166885, 0.07581828534603119, 0.02055203914642334, 0.026010053232312202, 0.0049127256497740746, 0.014817538671195507, -0.03244088590145111, -0.004838484339416027, -0.06045801565051079, 0.008407186716794968, -0.011806925758719444, 0.002859569853171706, 0.05161049962043762, 0.06066281720995903, -0.06926456838846207, 0.026276297867298126, -0.015677712857723236, -0.003386939177289605, -0.0044570378959178925, -0.046531371772289276, 0.00856590922921896, -0.022303108125925064, -0.008227983489632607, -0.015493390150368214, -0.04690001904964447, 0.003983429633080959, 0.02867249958217144, -0.0010368181392550468, -0.0045363991521298885, 0.0017062698025256395, 0.0016051479615271091, 0.0011558601399883628, -0.007229566108435392, 0.006482033059000969, 0.04550735279917717, -0.03199031949043274, 0.023347606882452965, 0.016957735642790794, 0.0008672151016071439, 0.002657326404005289, -0.013865201734006405, -0.03676224127411842, -0.018729286268353462, 0.03743809461593628, 0.013066467829048634, -0.04616272449493408, 0.046777136623859406, -0.022446472197771072, 0.007966859266161919, -0.02134053222835064, -0.01714205928146839, -0.007772295735776424, 0.03743809461593628, 0.026071494445204735, 0.0901135727763176, 0.008571029640734196, 0.0002102436701534316, 0.003084853757172823, 0.059597838670015335, 0.013240550644695759, 0.027853285893797874, 0.034447960555553436, 0.023654812946915627, 0.026583503931760788, 0.015821075066924095, -0.04046918451786041, -0.04603984206914902, -0.005135449580848217, 0.04509774595499039, 0.010158257558941841, 0.014305529184639454, -0.027791844680905342, -0.020971884950995445, -0.058164212852716446, 0.014991621486842632, -0.05423198640346527, -0.024781232699751854, -0.03844163194298744, 0.008611990138888359, -0.0031642152462154627, -0.02584621123969555, -0.08204431086778641, 0.006246509030461311, 0.005030487664043903, 0.03838019073009491, -0.032113201916217804, 0.02203686349093914, 0.04186185076832771, -0.013783280737698078, -0.0034995810128748417, -0.005806181114166975, 0.02818097174167633, -0.008089740760624409, -0.04341835901141167, -0.01732638292014599, -0.017705269157886505, -0.05644386261701584, -0.015964439138770103, 0.015012102201581001, 0.006722677033394575, 0.009948333725333214, 0.04218953475356102, 0.05820517614483833, 0.04694097861647606, 0.026030534878373146, -0.023654812946915627, -0.010516663081943989, 0.014520573429763317, -0.04829268157482147, 0.012626140378415585, 0.020080991089344025, -0.011755725368857384, 0.008253583684563637, -0.02381865493953228, 0.012011729180812836, -0.0015705873956903815, -0.026808787137269974, 0.025047477334737778, 0.01603611931204796, -0.002360361162573099, 0.006313070189207792, 0.027607521042227745, -0.008007819764316082, -0.009784490801393986, 0.001804831437766552, 0.03153974935412407, -0.056525785475969315, 0.005724259652197361, -0.021504374220967293, -0.011581641621887684, -0.0017830710858106613, -0.009118879213929176, 0.0008339345222339034, -0.009513125754892826, 0.04927574098110199, 0.016599329188466072, 0.04358220100402832, -0.0006348910974338651, -0.003113014390692115, 0.005381213966757059, -0.014244087971746922, -0.03608638793230057, -0.01856544427573681, 0.006313070189207792, -0.05136473476886749, -0.01970210298895836, -0.03362874686717987, -0.022446472197771072, 0.0576317235827446, -0.04431949183344841, -0.0436641201376915, -0.0021849980112165213, 0.008924315683543682, -0.028201451525092125, -0.0027981288731098175, 0.04235338047146797, -0.003909188322722912, 0.029245950281620026, 0.028733940795063972, 0.037417612969875336, 0.005606497637927532, 0.004183113109320402, 0.004213833715766668, 0.006394991651177406, 0.003141174791380763, -0.010255538858473301, -0.03455036133527756, 0.019599702209234238, 0.04354123771190643, 0.017049897462129593, -0.013568236492574215, 0.01190932746976614, 0.010700986720621586, -0.03180599585175514, 0.0026624463498592377, -0.006400111597031355, 0.05996648594737053, 0.013629677705466747, 0.0020390755962580442, 0.011929808184504509, -0.05472351610660553, -0.0569353923201561, 0.0225283931940794, -0.004899925552308559, 0.015923477709293365, -0.018299199640750885, -0.056771550327539444, -0.01986594684422016, 0.06279277801513672, 0.007444609887897968, -0.005831781774759293, 0.0036839041858911514, 0.005360733717679977, 0.00666123628616333, -0.023060882464051247, -0.025969093665480614, -0.009876652620732784, 0.0010195377981290221, 0.008673431351780891, -0.0253137219697237, -0.04218953475356102, 0.02476075105369091, -0.014797057956457138, 0.025539005175232887, -0.027709923684597015, 0.013936882838606834, 0.005785700865089893, 0.010368180461227894, -0.0006460912409238517, 0.005714019760489464, 0.01344535406678915, -0.006302829831838608, 0.012185812927782536, -0.0016345884650945663, 0.0309048593044281, -0.004649041220545769, 0.022671755403280258, -0.006533233914524317, 0.028652019798755646, -0.025887170806527138, 0.0092571210116148, -0.01698845624923706, 0.018084155395627022, 0.011161794885993004, 0.010388661175966263, 0.022446472197771072, -0.02639918029308319, -0.01643548719584942, 0.021852541714906693, -0.015565071254968643, 0.010311859659850597, -0.02207782492041588, -0.03278905153274536, 0.022016383707523346, -0.0009350563050247729, 0.00790029764175415, -0.0403258241713047, 0.045630235224962234, 0.005114969331771135, -0.008489107713103294, 0.01793055236339569, -0.021852541714906693, -0.001845792168751359, 0.024924594908952713, 0.04143176227807999, 0.006640756037086248, -0.014592254534363747, 0.030781976878643036, 0.029204988852143288, 0.004679761826992035, 0.007265406660735607, 0.004825684241950512, 0.022548872977495193, 0.03119158372282982, -0.0024730032309889793, -0.026010053232312202, 0.06570098549127579, -0.02451498806476593, -0.027361758053302765, 0.03360826522111893, -0.015339787118136883, -0.034591324627399445, 0.005975143983960152, -0.03524669632315636, 0.00684555945917964, 0.05255259573459625, 0.032809533178806305, -0.03348538279533386, -0.00364038348197937, 0.02392105758190155, 0.03129398450255394, -0.03598398715257645, -0.008560789749026299, -0.007567491848021746, -0.0017190700164064765, -0.025661887601017952, 0.009579687379300594, 0.04530255123972893, 0.028488175943493843, -0.01124371588230133, 0.03907652199268341, -0.0022566793486475945, 0.009164960123598576, 0.019876185804605484, 0.05017687380313873, 0.027894247323274612, -0.012370135635137558, -0.01876000687479973, -0.0022029185201972723, -0.02238503098487854, 0.04067398980259895, 0.025375163182616234, -0.03723328933119774, 0.03817538544535637, 0.013291751965880394, 0.013936882838606834, 0.01975330524146557, -0.005790820810943842, 0.00148226588498801, -0.03278905153274536, 0.004065351095050573, 0.00993297342211008, 0.004738642834126949, -0.010956991463899612, -0.005908582825213671, 0.012441816739737988, -0.009477285668253899, -0.041022155433893204, -0.051037050783634186, -0.025641407817602158, 0.008048780262470245, -0.0029542914126068354, 0.04415564984083176, 0.0024294822942465544, -0.011079872958362103, 0.023859616369009018, -0.024453546851873398, -0.006246509030461311, -0.010793148539960384, -0.0309048593044281, -0.025477563962340355, 0.029041146859526634, 0.011212995275855064, -0.003747905371710658, 0.009415844455361366, 0.01599515974521637, 0.009001117199659348, -0.009922732599079609, -0.028099050745368004, -0.00941072404384613, 0.006154347211122513, 0.018708806484937668, 0.014735616743564606, -0.0012313814368098974, -0.00442119687795639, -0.04247625917196274, 0.0018880328861996531, -0.02818097174167633, 0.024822192266583443, 0.006092906463891268, -0.03252280876040459, 0.03502140939235687, 0.048948053270578384, -0.002961971564218402, -0.015964439138770103, -0.021156208589673042, -0.02088996395468712, -0.031416866928339005, 0.0026470862794667482, 0.01856544427573681, -0.023941537365317345, 0.021361012011766434, -0.01059858500957489, -0.006502513308078051, -0.005007447209209204, -0.0041703125461936, -0.05931111425161362, 0.00023616412363480777, 0.010875069536268711, -0.03366970643401146, -0.011643082834780216, -0.006681716535240412, -0.019282257184386253, 0.01185812707990408, -0.004495438188314438, -0.024986036121845245, -0.014653695747256279, -0.019937627017498016, 0.016271643340587616, -0.033997394144535065, -0.000680331839248538, 0.01089555025100708, -0.0074753304943442345, -0.06484080851078033, 0.01487897988408804, -0.0370284840464592, -0.029450753703713417, 0.019988829270005226, 0.005186650436371565, 0.011202755384147167, 0.0272388756275177, -0.008110221475362778, -0.019528020173311234, 0.07876745611429214, -0.05455967038869858, -0.007516290992498398, -0.02412586100399494, 0.009661608375608921, -0.0025792450178414583, -0.004874324891716242, -0.005831781774759293, 0.02064420096576214, -0.012257494032382965, 0.04317259415984154, -0.03237944468855858, 0.00173955038189888, -0.04780115187168121, -0.030679574236273766, 0.010337459854781628, 0.023450009524822235, -0.020582759752869606, 0.012298454530537128, 0.02318376488983631, -0.03432507812976837, 0.007838856428861618, 0.02074660174548626, 0.019149133935570717, -0.018145596608519554, -0.03950661048293114, -0.0006950521492399275, -0.029553154483437538, 0.02867249958217144, 0.016363805159926414, -0.006610035430639982, -0.029307391494512558, 0.038503073155879974, 0.01103891246020794, 0.022323589771986008, 0.007250046357512474, -0.029635077342391014, -0.032850492745637894, 0.0340588353574276, -0.02065443992614746, 0.04460621625185013, 0.04976726695895195, 0.010316980071365833, 0.01638428494334221, -0.007019642274826765, 0.02570284903049469, -0.01037330087274313, 0.02070564031600952, 0.0149301802739501, 0.006594675127416849, 0.012103891000151634, -0.03301433473825455, -0.005178970284759998, 0.00423431396484375, -0.007628933060914278, 0.025887170806527138, 0.03133494779467583, -0.04309067130088806, 0.039793334901332855, 0.006748277693986893, -0.0036275831516832113, 0.023593371734023094, 0.005616737995296717, 0.0040807113982737064, -0.01608731970191002, 0.048456523567438126, -0.04415564984083176, -0.012912864796817303, -0.012257494032382965, 0.02832433395087719, 0.002544684335589409, 0.02748463861644268, 0.02105380780994892, -0.02099236659705639, -0.009344163350760937, -0.00446727778762579, -0.03514429181814194, -0.006231148727238178, -0.0031488549429923296, -0.0235728919506073, -0.02709551341831684, -0.04091975465416908, -0.03618879243731499, 0.037909142673015594, 0.012738781981170177, -0.031273506581783295, 0.003896387992426753, 0.01768478751182556, 0.006236268673092127, 0.03174455463886261, -0.006118506658822298, 0.009272481314837933, -0.017305901274085045, -0.054150063544511795, -0.007219325751066208, 0.0130971884354949, 0.013455594889819622, -0.003136054612696171, -0.012482777237892151, -0.012267733924090862, -0.0032358963508158922, -0.02555948495864868, -0.0026496462523937225, 0.01738782413303852, 0.02891826443374157, 0.026603983715176582, 0.003233336377888918, -0.018882889300584793, -0.04792403429746628, 0.011049152351915836, -0.01391640305519104, 0.04354123771190643, -0.009415844455361366, -0.03663935884833336, -0.02242599055171013, 0.0333625003695488, 0.025928132236003876, -0.013936882838606834, 0.01758238673210144, 0.00643595214933157, -0.03418171778321266, 0.03358778730034828, -0.011714763939380646, -0.03254328668117523, 0.011694284155964851, -0.0198147464543581, -0.01185812707990408, 0.018104635179042816, 0.06660211831331253, -0.03397691249847412, -0.01044498197734356, 0.052020106464624405, -0.03205176070332527, 0.027177434414625168, -0.07282815128564835, 0.049152858555316925, -0.04825172200798988, -0.046531371772289276, 0.025825729593634605, 0.014674175530672073, -0.005345373414456844, 0.04010054096579552, -0.006789238192141056, 0.002626605797559023, -0.009451684542000294, 0.035328615456819534, -0.005401694215834141, -0.017008936032652855, -0.019732823595404625, -0.0006131306872703135, -0.030372370034456253, -0.009308322332799435, -0.029082106426358223, 0.017991993576288223, 0.0047488827258348465, 0.014428411610424519, 0.00817166268825531, 0.002288679825142026, 0.03964997082948685, 0.003059253329411149, 0.035287655889987946, -0.010409141890704632, 0.07528579235076904, 0.013537515886127949, 0.010496183298528194, -0.037110406905412674, -0.018903369084000587, -0.016568608582019806, 0.018360640853643417, 0.0232247244566679, -0.004882005043327808, 0.013936882838606834, -0.016005398705601692, -0.010158257558941841, -0.02207782492041588, 0.016896294429898262, 0.0011219395091757178, 0.0011974609224125743, -0.022507913410663605, 0.020377954468131065, -0.01649692840874195, 0.026563024148344994, 0.022303108125925064, 0.04546639323234558, -0.015196424908936024, -0.022548872977495193, -0.009886892512440681, 0.01367063820362091, -0.013271271251142025, -0.03977285325527191, 0.03950661048293114, 0.012554459273815155, 0.008555669337511063, -4.080071084899828e-05, 0.009461925365030766, 0.0007302527083083987, -0.000619530794210732, -0.0027827685698866844, -0.010588345117866993, 0.0004899285268038511, -0.012933345511555672, -0.002330920659005642, -0.00698380172252655, -0.013015267439186573, 0.03303481638431549, 0.032113201916217804, 0.01797151379287243, -0.020869484171271324, 0.010608824901282787, -0.000775053515098989, 0.0035635821986943483, -0.0031846954952925444, 0.04481102153658867, -0.04163656756281853, -0.06049897521734238, 0.03668031841516495, 0.010772667825222015, -0.015769874677062035, -0.009820330888032913, -0.017510704696178436, -0.012216532602906227, 0.023859616369009018, -0.03967045247554779, -0.07143548876047134, 0.008632470853626728, -0.01826847903430462, 0.0010144177358597517, -0.01220629271119833, -0.02034723572432995, 0.023347606882452965, 0.027771364897489548, -0.004183113109320402, -0.024945074692368507, -0.015053062699735165, -0.027300316840410233, 0.007654533721506596, 0.008739992044866085, -0.0034560603089630604, 0.021156208589673042, -0.017244460061192513, 0.0029491714667528868, -0.011960528790950775, -0.006446192506700754, -0.0019392338581383228, -0.01738782413303852, -0.01094675064086914, -0.038646433502435684, 0.02615341544151306, -0.012329175136983395, 0.012810463085770607, 0.012257494032382965, 0.00544777512550354, -0.02015267126262188, 0.007884937338531017, -0.0041600726544857025, -0.00046176803880371153, -0.03491900861263275, -0.014715136960148811, 0.021258611232042313, -0.010557624511420727, -0.0007635332876816392, 0.02744367904961109, 0.03407931327819824, -0.005621857941150665, -0.05021783709526062, -0.016773412004113197, 0.010875069536268711, -0.03045429103076458, -0.04091975465416908, -0.0333625003695488, -0.004265034571290016, -0.010660026222467422, 0.001943073933944106, 0.014100725762546062, 0.041124556213617325, 0.007567491848021746, 0.028099050745368004, -0.032358963042497635, -0.003194935619831085, 0.031621672213077545, 0.02441258542239666, 0.011581641621887684, -0.0070964437909424305, 0.02189350128173828, -0.0016473886789754033, 0.01354775670915842, 0.014305529184639454, 0.012114130891859531, -0.019026251509785652, -0.008038540370762348, 0.006072425749152899, 0.034447960555553436, 0.01227797381579876, 0.04501582682132721, 0.008750232867896557, -0.014694656245410442, 0.030495252460241318, 0.035431016236543655, -0.03715136647224426, -0.04710482060909271, 0.020971884950995445, -0.020336994901299477, -0.01614876091480255, -0.008770712651312351, 0.015022342093288898, 0.03942468762397766, 0.007895177230238914, -0.015964439138770103, 0.059106308966875076, 0.023142803460359573, -0.012841183692216873, -0.013977843336760998, 0.011141314171254635, 0.0375404953956604, 0.03000372275710106, -0.027853285893797874, 0.02748463861644268, -0.018114876002073288, 0.013568236492574215, -0.0023949218448251486, -0.03856451436877251, -0.00035296616260893643, -0.0068660397082567215, -0.02006050944328308, 0.017838390544056892, 0.03491900861263275, 0.011223236098885536, 0.016466207802295685, -0.023388568311929703, 0.021033326163887978, 0.012830943800508976, -0.010547383688390255, -0.01148948073387146, 0.01044498197734356, 0.04169800877571106, -0.031621672213077545, -0.042517222464084625, -0.00288517028093338, -0.0021389173343777657, -0.020029788836836815, -0.017060138285160065, -0.011212995275855064, -0.010357940569519997, -0.005102168768644333, 0.017459504306316376, 0.016415005549788475, -0.017695028334856033, -0.0022464392241090536, -0.022999441251158714, 0.008028300479054451, -0.017295662313699722, -0.0039117480628192425, -0.02609197422862053, 0.0340588353574276, 0.013906162232160568, -0.018483523279428482, 0.0232247244566679, 0.03016756661236286, -0.008606869727373123, -0.027894247323274612, 0.016322845593094826, 0.04788307473063469, -0.004528719000518322, 0.02633773908019066, 0.008227983489632607, -0.00977937038987875, -0.0025434044655412436, -0.011212995275855064, 0.009820330888032913, -0.009451684542000294, -0.006743157748132944, -0.00517385033890605, 0.02787376567721367, 0.04522062838077545, -0.0051277694292366505, 0.004782163538038731, -0.0031181343365460634, 0.01265686098486185, -0.06258796900510788, -0.022016383707523346, 0.003717184765264392, -0.015923477709293365, -0.007086203433573246, -0.017408303916454315, 0.005319772753864527, 0.011776205152273178, 0.019333457574248314, 0.004185672849416733, 0.01882144808769226, 0.011458760127425194, -0.04431949183344841, -0.024146340787410736, 0.022344069555401802, 0.027156952768564224, -0.004167752806097269, 0.0035482218954712152, 0.04599888250231743, -0.0005158489802852273, -0.027054551988840103, 0.003891267813742161, 0.026665424928069115, -0.028652019798755646, 0.0003868867352139205, 0.013578476384282112, -0.022118786349892616, 0.004920405801385641, -0.0002334440650884062, -0.0031846954952925444, 0.00881167408078909, 0.02312232367694378, -0.007219325751066208, 0.03420219570398331, 0.014674175530672073, -0.020336994901299477, 0.0037402252200990915, -0.01664029061794281, 0.000318565551424399, 0.024965554475784302, 0.0074702100828289986, 0.04530255123972893, 0.012585179880261421, 0.029778439551591873, -0.0001963234244612977, -0.011806925758719444, 0.008105101063847542, -0.008079500868916512, -0.0253137219697237, -0.005493856035172939, 0.03782721981406212, -0.0216477382928133, 0.03690560534596443, 0.03371066972613335, 0.012114130891859531, -0.02912306785583496, 0.009518246166408062, 0.0035405417438596487, 0.008048780262470245, -0.0038503070827573538, -0.01882144808769226, -0.040735431015491486, 0.06094954162836075, -0.01206293050199747, 0.03727424889802933, 0.0033613385166972876, -0.015329547226428986, 0.01369111891835928, -0.05501024052500725, -0.007936138659715652, 0.011653323657810688, 0.02713647298514843, 0.021709179505705833, -0.006420591846108437, -0.0016358685679733753, -0.014029044657945633, -0.01985570602118969, 0.0009254561155103147, 0.061277229338884354, -0.03795010223984718, 0.008658071048557758, -0.028058089315891266, -0.009748649783432484, -0.018401600420475006, -0.01590299792587757, -0.0272388756275177, 0.019005771726369858, 0.0067994785495102406, -0.021606776863336563, 0.0025958851911127567, -0.04804691672325134, -0.029327871277928352, 0.0005596897681243718, -0.024576429277658463, -0.01569819450378418, 0.050832245498895645, 0.005975143983960152, 0.03512381389737129, -0.03381307050585747, -0.02521131932735443, -0.0006137706805020571, -0.0037351050414144993, -0.011571401730179787, 0.002841649577021599, 0.004720722325146198, 0.0034150995779782534, 0.005621857941150665, 0.013496555387973785, -0.027894247323274612, 0.03799106180667877, -0.02684974856674671, 0.00792077835649252, 0.006830199155956507, 0.02684974856674671, 0.031969837844371796, 0.010567864403128624, -0.0034586202818900347, -0.03663935884833336, 0.01753118634223938, -0.00480520399287343, 0.05754980444908142, 0.021115249022841454, 0.039445169270038605, 0.010168497450649738, 0.03215416148304939, 0.00673291739076376, -0.029204988852143288, 0.007337087765336037, -0.03334202244877815, 0.032461367547512054, -0.010977471247315407, -0.03749953582882881, 0.026665424928069115, -0.03180599585175514, -0.006000744644552469, -0.004820564296096563, 0.03846210986375809, 0.008745112456381321, -0.025518525391817093, 0.010071215219795704, 0.03174455463886261, 0.02783280611038208, -0.016322845593094826, -0.014694656245410442, 0.03045429103076458, -0.012769502587616444, -0.04571215808391571, -0.0004825684300158173, 0.0038938280194997787, 0.03555389866232872, -0.006144107319414616, 2.1620377083308995e-05, -0.018954571336507797, -0.004265034571290016, -0.0062413886189460754, -0.020285794511437416, 0.013660398311913013, 0.03739713132381439, 0.014489852823317051, 0.027771364897489548, 0.023757213726639748, -0.009052317589521408, -0.02301992103457451, 0.00821774359792471, -0.017049897462129593, -0.012687581591308117, -0.008760472759604454, -0.016722211614251137, 0.015780115500092506, 0.0028800503350794315, -0.002380841411650181, -0.014418171718716621, 0.012646620161831379, 0.02381865493953228, -0.021422453224658966, -0.01039890106767416, 0.023736733943223953, -0.029143547639250755, 0.016875814646482468, -0.04931670054793358, -0.008929436095058918, 0.02179110050201416, -0.031724072992801666, 0.0432954765856266, 0.03571774438023567, 0.029696518555283546, -0.026726866140961647, 0.0006176107563078403, -0.04841556400060654, -0.002734127687290311, -0.030085645616054535, 0.029839880764484406, 0.010567864403128624, 0.02238503098487854, -0.03784770146012306, -0.008545429445803165, 0.015216905623674393, -0.004930646158754826, -0.021033326163887978, -0.01718301884829998, 0.022118786349892616, -0.012810463085770607, -0.03035189025104046, -0.01831967942416668, 0.006615155376493931, -0.005089368671178818, 0.009282722137868404, 0.011847886256873608, -0.03100726008415222, -0.023040402680635452, -0.033546824008226395, 0.020019549876451492, -0.023163283243775368, -0.009794730693101883, -0.0029696517158299685, -0.0130971884354949, -0.009958573617041111, 0.03237944468855858, 0.012370135635137558, -0.0056525785475969315, 0.02263079397380352, 0.0336901880800724, 0.006579314824193716, 0.017889592796564102, 0.018196796998381615, -0.03383354842662811, -0.013680879026651382, -0.012533978559076786, -0.022364549338817596, 0.009292962029576302, -0.008386706002056599, -0.008048780262470245, 0.0450567863881588, 0.028099050745368004, -0.016056600958108902, -0.047022901475429535, 0.02412586100399494, -0.002428202424198389, -0.0020301153417676687, 0.01643548719584942, 0.01927201636135578, 0.013773039914667606, -0.006922360509634018, -0.014182647690176964, 0.00935952365398407, -0.009989294223487377, 0.008094861172139645, -0.01737758331000805, -0.015216905623674393, 0.0033382982946932316, -0.02713647298514843, -0.030085645616054535, 0.025231800973415375, 0.013814001344144344, -0.023060882464051247, 0.04304971173405647, 0.00407303124666214, 0.0020173152443021536, 0.04980823025107384, 0.03782721981406212, 0.005596257746219635, 0.0309048593044281, -0.041677527129650116, -0.011438279412686825, -0.002384681487455964, -0.022507913410663605, -0.004208713304251432, -0.006528113968670368, 0.03506237268447876, 0.022098304703831673, 0.015206664800643921, 0.0019507539691403508, -0.010219697840511799, 0.041083596646785736, -0.00551945623010397, -0.04034630209207535, -0.01821727678179741, -0.0017805109964683652, -0.01857568323612213, 0.016312604770064354, -0.012400856241583824, 0.012994786724448204, -0.0062772296369075775, -0.0017817910993471742, 0.0007673733634874225, 0.023101842030882835, 0.019128654152154922, 0.015288586728274822, 0.03727424889802933, -0.009236641228199005, 0.023511450737714767, -0.019374417141079903, 0.022958479821681976, 0.00817166268825531, -0.007480450440198183, -0.012237013317644596, 0.015411469154059887, 0.015718674287199974, -0.006282349582761526, 0.03715136647224426, -0.03164215013384819, 0.0043879165314137936, -0.023634331300854683, 0.026603983715176582, 0.010639545507729053, 0.028713461011648178, -0.025375163182616234, -0.016824614256620407, 0.0074702100828289986, -0.00450823875144124, 0.012021970003843307, -0.007040122989565134, -0.015124743804335594, 0.02228262834250927, 0.02490411512553692, 0.012830943800508976, -0.024474026635289192, -0.01793055236339569, -0.02168869785964489, -0.06398063898086548, 0.028549617156386375, 0.021381493657827377, 0.006200428120791912, 0.014592254534363747, -0.026235338300466537, 0.003983429633080959, 0.0033510983921587467, 0.017070377245545387, -0.01005585491657257, 0.018483523279428482, -0.014377210289239883, -0.002320680534467101, 0.00647691311314702, -0.01403928454965353, 0.036864642053842545, -0.033403463661670685, -0.004759123083204031, 0.011950287967920303, -0.04882517084479332, 0.015831315889954567, 0.010153137147426605, -0.00735244806855917, 0.010178737342357635, 0.005591137334704399, -0.02079780213534832, 0.018657606095075607, 0.038748834282159805, -0.008320145308971405, -0.01976354420185089, -0.036967046558856964, 0.003315257839858532, -0.004794963635504246, 0.0333625003695488, -0.0070964437909424305, -0.044196609407663345, 0.027320796623826027, -0.029737478122115135, 0.025477563962340355, 0.04841556400060654, 0.009948333725333214, -0.0030208525713533163, -0.014766337350010872, -0.01426456868648529, -0.02521131932735443, 0.01683485321700573, -0.007183485198765993, -0.03020852617919445, 0.007035002578049898, 0.04362316057085991, -0.02119717001914978, 0.007562371902167797, 0.036270711570978165], index=0, object='embedding')], model='text-embedding-3-small', object='list', usage=Usage(prompt_tokens=7, total_tokens=7))\n",
+ "response_text: EmbedderOutput(data=[Embedding(embedding=[0.04169800877571106, 0.0158005952835083, 0.028160491958260536, 0.024351144209504128, -0.023142803460359573, -0.002739247865974903, -0.014223608188331127, 0.01433624979108572, 0.010834109038114548, -0.010199218057096004, 0.006942841224372387, -0.024043940007686615, -0.06164587661623955, -0.01508378330618143, -0.014233848080039024, 0.023163283243775368, -0.006625395733863115, 0.019446099177002907, 0.07241854071617126, -0.024392105638980865, 0.003002932295203209, -0.010091695934534073, -0.04100167378783226, 0.011970768682658672, 0.06209644302725792, 0.0070964437909424305, -0.04554831609129906, -0.007347328122705221, 0.00364038348197937, 0.03942468762397766, 0.04214857518672943, -0.0251498781144619, -0.0019558740314096212, 0.04309067130088806, -0.024535467848181725, -0.03995717689394951, -0.03764289617538452, -0.039342764765024185, 0.021320052444934845, 0.029676036909222603, -0.003136054612696171, -0.01302550733089447, 0.00684555945917964, 0.013230310752987862, -0.027320796623826027, -0.030679574236273766, -0.009221280924975872, -0.039936695247888565, -0.03360826522111893, 0.02748463861644268, 0.03883075714111328, 0.004044870380312204, 0.03252280876040459, 0.03262520954012871, -0.016814373433589935, 0.004218953661620617, 0.024678830057382584, 0.009641128592193127, 0.04665425419807434, 0.015544591471552849, 0.036127351224422455, -0.010265778750181198, 0.026358218863606453, 0.0043085552752017975, 0.0005580897559411824, 0.0354514978826046, -0.0039322287775576115, 0.03788866102695465, 0.05906534940004349, 0.04612176492810249, -0.011059393174946308, 0.016312604770064354, -0.00918543990701437, 0.004631120711565018, -0.006594675127416849, -0.018145596608519554, -0.003968069329857826, -0.0059649040922522545, -0.03207223862409592, -0.031867437064647675, -0.036168310791254044, 0.0010604985291138291, -0.01807391457259655, -0.008606869727373123, 0.008248464204370975, -0.044647179543972015, 0.026767827570438385, 0.03383354842662811, -0.022917520254850388, -0.04767827317118645, 0.0033997392747551203, 0.011141314171254635, -0.025928132236003876, 0.027115993201732635, -0.010388661175966263, 0.01921057514846325, 0.03549245744943619, 0.0011750605190172791, -0.06819958984851837, 0.000605450535658747, 0.019323216751217842, -0.023982498794794083, -0.031109662726521492, 0.026972630992531776, 0.02560044638812542, 0.040182460099458694, 0.015862036496400833, -0.004974166862666607, 0.003153975121676922, -0.03852355107665062, -0.025661887601017952, 0.011212995275855064, 0.0033536585979163647, 0.02431018464267254, -0.04812883958220482, -0.029102588072419167, -0.023859616369009018, -0.02416682057082653, 0.02902066521346569, -0.02574380859732628, 0.033157698810100555, 0.052511636167764664, -0.04718674346804619, 0.010337459854781628, 0.010752187110483646, -0.013424874283373356, -0.0027725284453481436, -0.002777648391202092, 0.03491900861263275, -0.03870787471532822, 0.01074194721877575, -0.02752560004591942, 0.024535467848181725, 0.033055298030376434, 0.031232545152306557, 0.01897505111992359, 0.026952149346470833, -0.016937255859375, -0.018544962629675865, 0.010782907716929913, 0.007931018248200417, 0.013189350254833698, 0.021668218076229095, 0.003315257839858532, -0.02668590471148491, -0.01458201464265585, -0.04143176227807999, 0.040530625730752945, 0.01154068112373352, -0.042312417179346085, 0.040428224951028824, -0.02312232367694378, -0.0038989479653537273, 0.01604636013507843, -0.0056525785475969315, -0.036721281707286835, -0.008970396593213081, 0.019824985414743423, 0.0059649040922522545, 0.04341835901141167, -0.03878979757428169, 0.04927574098110199, -0.03719232976436615, -0.006026345305144787, 0.012257494032382965, 0.03287097439169884, -0.03643455356359482, -0.02140197344124317, 0.00695820152759552, -0.005381213966757059, -0.02461738884449005, 0.004137032199651003, 0.054354868829250336, 0.021156208589673042, 0.03006516396999359, -0.024392105638980865, -0.04943958297371864, 0.0406944714486599, 0.0003852867230307311, -0.01936417818069458, -0.028344813734292984, -0.02803760953247547, 0.011735244654119015, 0.013045987114310265, 0.061277229338884354, 0.029532674700021744, -0.011284676380455494, -0.025477563962340355, -0.014428411610424519, 0.012564699165523052, 0.03582014515995979, -0.02020387165248394, 0.06160491332411766, -0.008207502774894238, -0.043950848281383514, 0.0198147464543581, 0.03352634608745575, 0.01265686098486185, 0.012267733924090862, -0.007997579872608185, -0.020490597933530807, 0.02193446271121502, -0.00551945623010397, 0.014377210289239883, -0.02158629707992077, 0.030536212027072906, -0.011591882444918156, -0.013496555387973785, -0.01398808415979147, -0.010286259464919567, 0.0009939373703673482, -0.003008052473887801, -0.02521131932735443, 0.00474120257422328, 0.0012096210848540068, 0.025026995688676834, -9.424164454685524e-05, 0.01112083438783884, 0.004208713304251432, 0.024494506418704987, 0.022815117612481117, 0.015216905623674393, 0.003947588615119457, -0.01148948073387146, -0.05591137334704399, 0.047473467886447906, 0.06185067817568779, 0.011110593564808369, 0.007116924040019512, -0.0036890243645757437, 0.021012846380472183, -0.03192887827754021, 0.0009395363740622997, -0.011223236098885536, -0.03283001109957695, 0.017705269157886505, 0.014141686260700226, 0.02832433395087719, -0.03524669632315636, 0.022815117612481117, -0.010803388431668282, 0.021135728806257248, 0.02863154001533985, -0.006625395733863115, -0.012298454530537128, -0.005204570945352316, 0.027464158833026886, 0.036270711570978165, 0.005877862684428692, 0.04337739571928978, 0.057426922023296356, -0.0076238131150603294, -0.0018624324584379792, -0.005703779403120279, -0.019743064418435097, 0.059556879103183746, -0.024494506418704987, -0.02818097174167633, -0.0359635055065155, -0.018145596608519554, 0.006650995928794146, 0.004362315870821476, -0.002106916857883334, -0.014326009899377823, 0.020869484171271324, 0.00018768326845020056, -0.01986594684422016, -0.024678830057382584, -0.014684416353702545, -0.008709271438419819, 0.009738409891724586, -0.003530301619321108, -0.0166812501847744, 0.009892012923955917, -0.019005771726369858, 0.015872277319431305, -0.01856544427573681, -0.00817166268825531, -0.021258611232042313, 0.0370284840464592, -0.0268907081335783, 0.04481102153658867, -0.012892385013401508, 0.0419028103351593, -0.051774341613054276, 0.0009952173568308353, 0.04423757269978523, 0.021258611232042313, -0.012605659663677216, 0.03065909445285797, 0.021033326163887978, 0.01985570602118969, -0.019435858353972435, -0.002831409452483058, -0.0029978123493492603, -0.04427853226661682, -0.003950148820877075, 0.0011648202780634165, 0.026870228350162506, -0.001858592382632196, -0.022753676399588585, 0.022466951981186867, -0.005186650436371565, 0.010035375133156776, -0.04517966881394386, 0.06574194878339767, -0.0051431297324597836, 0.047063861042261124, 0.05214298889040947, 0.00638987123966217, -0.039240363985300064, -0.03143734857439995, 0.024637870490550995, -0.03422267735004425, -0.010224818252027035, 0.045589275658130646, -0.013240550644695759, -0.0004217673558741808, 0.029635077342391014, -0.00687115965411067, 0.025129398331046104, 0.00804365985095501, 0.02451498806476593, -0.008376466110348701, -0.0023782814387232065, 0.01683485321700573, 0.012370135635137558, 0.02650158293545246, -0.03506237268447876, -0.02381865493953228, -0.0005033687921240926, 0.011407558806240559, 0.004651600960642099, -0.00990737322717905, -0.026112455874681473, -0.02099236659705639, 0.004933205898851156, 0.03901508077979088, 0.0013401834294199944, -0.014151927083730698, -0.0333625003695488, 0.04640848934650421, 0.009205920621752739, 0.03094581887125969, 0.003264056984335184, -0.026071494445204735, 0.018852168694138527, 0.02465835027396679, 0.012237013317644596, 0.0034663004335016012, -0.027402717620134354, -0.007209085859358311, -0.009190560318529606, -0.008176782168447971, -0.027771364897489548, 0.002693166956305504, 0.0702066645026207, -0.022405510768294334, -0.06353006511926651, 0.03995717689394951, -0.04046918451786041, -0.0492347776889801, -0.025784770026803017, -0.04837460443377495, -0.03381307050585747, -0.0039271083660423756, 0.013353193178772926, 0.004339275881648064, -0.020275553688406944, 0.06266989558935165, -0.03268665075302124, 0.0050637684762477875, -0.004106311593204737, -0.02090020477771759, 0.0425991415977478, -0.030085645616054535, 0.04235338047146797, 0.02119717001914978, 0.013793520629405975, -0.01633308455348015, -0.028590578585863113, -0.01782815158367157, 0.015472909435629845, -0.026112455874681473, 0.06140011176466942, 0.014418171718716621, 0.0824129581451416, -0.04210761561989784, -0.009810090996325016, 0.03045429103076458, -0.005196890793740749, 0.010414261370897293, 0.03174455463886261, 0.03784770146012306, -0.07372928410768509, 0.00563209829851985, 0.01000465452671051, 0.018135355785489082, -0.007413889281451702, -0.038892198354005814, 0.021750139072537422, -0.0187702476978302, -0.03147830814123154, -0.049644384533166885, 0.07581828534603119, 0.02055203914642334, 0.026010053232312202, 0.0049127256497740746, 0.014817538671195507, -0.03244088590145111, -0.004838484339416027, -0.06045801565051079, 0.008407186716794968, -0.011806925758719444, 0.002859569853171706, 0.05161049962043762, 0.06066281720995903, -0.06926456838846207, 0.026276297867298126, -0.015677712857723236, -0.003386939177289605, -0.0044570378959178925, -0.046531371772289276, 0.00856590922921896, -0.022303108125925064, -0.008227983489632607, -0.015493390150368214, -0.04690001904964447, 0.003983429633080959, 0.02867249958217144, -0.0010368181392550468, -0.0045363991521298885, 0.0017062698025256395, 0.0016051479615271091, 0.0011558601399883628, -0.007229566108435392, 0.006482033059000969, 0.04550735279917717, -0.03199031949043274, 0.023347606882452965, 0.016957735642790794, 0.0008672151016071439, 0.002657326404005289, -0.013865201734006405, -0.03676224127411842, -0.018729286268353462, 0.03743809461593628, 0.013066467829048634, -0.04616272449493408, 0.046777136623859406, -0.022446472197771072, 0.007966859266161919, -0.02134053222835064, -0.01714205928146839, -0.007772295735776424, 0.03743809461593628, 0.026071494445204735, 0.0901135727763176, 0.008571029640734196, 0.0002102436701534316, 0.003084853757172823, 0.059597838670015335, 0.013240550644695759, 0.027853285893797874, 0.034447960555553436, 0.023654812946915627, 0.026583503931760788, 0.015821075066924095, -0.04046918451786041, -0.04603984206914902, -0.005135449580848217, 0.04509774595499039, 0.010158257558941841, 0.014305529184639454, -0.027791844680905342, -0.020971884950995445, -0.058164212852716446, 0.014991621486842632, -0.05423198640346527, -0.024781232699751854, -0.03844163194298744, 0.008611990138888359, -0.0031642152462154627, -0.02584621123969555, -0.08204431086778641, 0.006246509030461311, 0.005030487664043903, 0.03838019073009491, -0.032113201916217804, 0.02203686349093914, 0.04186185076832771, -0.013783280737698078, -0.0034995810128748417, -0.005806181114166975, 0.02818097174167633, -0.008089740760624409, -0.04341835901141167, -0.01732638292014599, -0.017705269157886505, -0.05644386261701584, -0.015964439138770103, 0.015012102201581001, 0.006722677033394575, 0.009948333725333214, 0.04218953475356102, 0.05820517614483833, 0.04694097861647606, 0.026030534878373146, -0.023654812946915627, -0.010516663081943989, 0.014520573429763317, -0.04829268157482147, 0.012626140378415585, 0.020080991089344025, -0.011755725368857384, 0.008253583684563637, -0.02381865493953228, 0.012011729180812836, -0.0015705873956903815, -0.026808787137269974, 0.025047477334737778, 0.01603611931204796, -0.002360361162573099, 0.006313070189207792, 0.027607521042227745, -0.008007819764316082, -0.009784490801393986, 0.001804831437766552, 0.03153974935412407, -0.056525785475969315, 0.005724259652197361, -0.021504374220967293, -0.011581641621887684, -0.0017830710858106613, -0.009118879213929176, 0.0008339345222339034, -0.009513125754892826, 0.04927574098110199, 0.016599329188466072, 0.04358220100402832, -0.0006348910974338651, -0.003113014390692115, 0.005381213966757059, -0.014244087971746922, -0.03608638793230057, -0.01856544427573681, 0.006313070189207792, -0.05136473476886749, -0.01970210298895836, -0.03362874686717987, -0.022446472197771072, 0.0576317235827446, -0.04431949183344841, -0.0436641201376915, -0.0021849980112165213, 0.008924315683543682, -0.028201451525092125, -0.0027981288731098175, 0.04235338047146797, -0.003909188322722912, 0.029245950281620026, 0.028733940795063972, 0.037417612969875336, 0.005606497637927532, 0.004183113109320402, 0.004213833715766668, 0.006394991651177406, 0.003141174791380763, -0.010255538858473301, -0.03455036133527756, 0.019599702209234238, 0.04354123771190643, 0.017049897462129593, -0.013568236492574215, 0.01190932746976614, 0.010700986720621586, -0.03180599585175514, 0.0026624463498592377, -0.006400111597031355, 0.05996648594737053, 0.013629677705466747, 0.0020390755962580442, 0.011929808184504509, -0.05472351610660553, -0.0569353923201561, 0.0225283931940794, -0.004899925552308559, 0.015923477709293365, -0.018299199640750885, -0.056771550327539444, -0.01986594684422016, 0.06279277801513672, 0.007444609887897968, -0.005831781774759293, 0.0036839041858911514, 0.005360733717679977, 0.00666123628616333, -0.023060882464051247, -0.025969093665480614, -0.009876652620732784, 0.0010195377981290221, 0.008673431351780891, -0.0253137219697237, -0.04218953475356102, 0.02476075105369091, -0.014797057956457138, 0.025539005175232887, -0.027709923684597015, 0.013936882838606834, 0.005785700865089893, 0.010368180461227894, -0.0006460912409238517, 0.005714019760489464, 0.01344535406678915, -0.006302829831838608, 0.012185812927782536, -0.0016345884650945663, 0.0309048593044281, -0.004649041220545769, 0.022671755403280258, -0.006533233914524317, 0.028652019798755646, -0.025887170806527138, 0.0092571210116148, -0.01698845624923706, 0.018084155395627022, 0.011161794885993004, 0.010388661175966263, 0.022446472197771072, -0.02639918029308319, -0.01643548719584942, 0.021852541714906693, -0.015565071254968643, 0.010311859659850597, -0.02207782492041588, -0.03278905153274536, 0.022016383707523346, -0.0009350563050247729, 0.00790029764175415, -0.0403258241713047, 0.045630235224962234, 0.005114969331771135, -0.008489107713103294, 0.01793055236339569, -0.021852541714906693, -0.001845792168751359, 0.024924594908952713, 0.04143176227807999, 0.006640756037086248, -0.014592254534363747, 0.030781976878643036, 0.029204988852143288, 0.004679761826992035, 0.007265406660735607, 0.004825684241950512, 0.022548872977495193, 0.03119158372282982, -0.0024730032309889793, -0.026010053232312202, 0.06570098549127579, -0.02451498806476593, -0.027361758053302765, 0.03360826522111893, -0.015339787118136883, -0.034591324627399445, 0.005975143983960152, -0.03524669632315636, 0.00684555945917964, 0.05255259573459625, 0.032809533178806305, -0.03348538279533386, -0.00364038348197937, 0.02392105758190155, 0.03129398450255394, -0.03598398715257645, -0.008560789749026299, -0.007567491848021746, -0.0017190700164064765, -0.025661887601017952, 0.009579687379300594, 0.04530255123972893, 0.028488175943493843, -0.01124371588230133, 0.03907652199268341, -0.0022566793486475945, 0.009164960123598576, 0.019876185804605484, 0.05017687380313873, 0.027894247323274612, -0.012370135635137558, -0.01876000687479973, -0.0022029185201972723, -0.02238503098487854, 0.04067398980259895, 0.025375163182616234, -0.03723328933119774, 0.03817538544535637, 0.013291751965880394, 0.013936882838606834, 0.01975330524146557, -0.005790820810943842, 0.00148226588498801, -0.03278905153274536, 0.004065351095050573, 0.00993297342211008, 0.004738642834126949, -0.010956991463899612, -0.005908582825213671, 0.012441816739737988, -0.009477285668253899, -0.041022155433893204, -0.051037050783634186, -0.025641407817602158, 0.008048780262470245, -0.0029542914126068354, 0.04415564984083176, 0.0024294822942465544, -0.011079872958362103, 0.023859616369009018, -0.024453546851873398, -0.006246509030461311, -0.010793148539960384, -0.0309048593044281, -0.025477563962340355, 0.029041146859526634, 0.011212995275855064, -0.003747905371710658, 0.009415844455361366, 0.01599515974521637, 0.009001117199659348, -0.009922732599079609, -0.028099050745368004, -0.00941072404384613, 0.006154347211122513, 0.018708806484937668, 0.014735616743564606, -0.0012313814368098974, -0.00442119687795639, -0.04247625917196274, 0.0018880328861996531, -0.02818097174167633, 0.024822192266583443, 0.006092906463891268, -0.03252280876040459, 0.03502140939235687, 0.048948053270578384, -0.002961971564218402, -0.015964439138770103, -0.021156208589673042, -0.02088996395468712, -0.031416866928339005, 0.0026470862794667482, 0.01856544427573681, -0.023941537365317345, 0.021361012011766434, -0.01059858500957489, -0.006502513308078051, -0.005007447209209204, -0.0041703125461936, -0.05931111425161362, 0.00023616412363480777, 0.010875069536268711, -0.03366970643401146, -0.011643082834780216, -0.006681716535240412, -0.019282257184386253, 0.01185812707990408, -0.004495438188314438, -0.024986036121845245, -0.014653695747256279, -0.019937627017498016, 0.016271643340587616, -0.033997394144535065, -0.000680331839248538, 0.01089555025100708, -0.0074753304943442345, -0.06484080851078033, 0.01487897988408804, -0.0370284840464592, -0.029450753703713417, 0.019988829270005226, 0.005186650436371565, 0.011202755384147167, 0.0272388756275177, -0.008110221475362778, -0.019528020173311234, 0.07876745611429214, -0.05455967038869858, -0.007516290992498398, -0.02412586100399494, 0.009661608375608921, -0.0025792450178414583, -0.004874324891716242, -0.005831781774759293, 0.02064420096576214, -0.012257494032382965, 0.04317259415984154, -0.03237944468855858, 0.00173955038189888, -0.04780115187168121, -0.030679574236273766, 0.010337459854781628, 0.023450009524822235, -0.020582759752869606, 0.012298454530537128, 0.02318376488983631, -0.03432507812976837, 0.007838856428861618, 0.02074660174548626, 0.019149133935570717, -0.018145596608519554, -0.03950661048293114, -0.0006950521492399275, -0.029553154483437538, 0.02867249958217144, 0.016363805159926414, -0.006610035430639982, -0.029307391494512558, 0.038503073155879974, 0.01103891246020794, 0.022323589771986008, 0.007250046357512474, -0.029635077342391014, -0.032850492745637894, 0.0340588353574276, -0.02065443992614746, 0.04460621625185013, 0.04976726695895195, 0.010316980071365833, 0.01638428494334221, -0.007019642274826765, 0.02570284903049469, -0.01037330087274313, 0.02070564031600952, 0.0149301802739501, 0.006594675127416849, 0.012103891000151634, -0.03301433473825455, -0.005178970284759998, 0.00423431396484375, -0.007628933060914278, 0.025887170806527138, 0.03133494779467583, -0.04309067130088806, 0.039793334901332855, 0.006748277693986893, -0.0036275831516832113, 0.023593371734023094, 0.005616737995296717, 0.0040807113982737064, -0.01608731970191002, 0.048456523567438126, -0.04415564984083176, -0.012912864796817303, -0.012257494032382965, 0.02832433395087719, 0.002544684335589409, 0.02748463861644268, 0.02105380780994892, -0.02099236659705639, -0.009344163350760937, -0.00446727778762579, -0.03514429181814194, -0.006231148727238178, -0.0031488549429923296, -0.0235728919506073, -0.02709551341831684, -0.04091975465416908, -0.03618879243731499, 0.037909142673015594, 0.012738781981170177, -0.031273506581783295, 0.003896387992426753, 0.01768478751182556, 0.006236268673092127, 0.03174455463886261, -0.006118506658822298, 0.009272481314837933, -0.017305901274085045, -0.054150063544511795, -0.007219325751066208, 0.0130971884354949, 0.013455594889819622, -0.003136054612696171, -0.012482777237892151, -0.012267733924090862, -0.0032358963508158922, -0.02555948495864868, -0.0026496462523937225, 0.01738782413303852, 0.02891826443374157, 0.026603983715176582, 0.003233336377888918, -0.018882889300584793, -0.04792403429746628, 0.011049152351915836, -0.01391640305519104, 0.04354123771190643, -0.009415844455361366, -0.03663935884833336, -0.02242599055171013, 0.0333625003695488, 0.025928132236003876, -0.013936882838606834, 0.01758238673210144, 0.00643595214933157, -0.03418171778321266, 0.03358778730034828, -0.011714763939380646, -0.03254328668117523, 0.011694284155964851, -0.0198147464543581, -0.01185812707990408, 0.018104635179042816, 0.06660211831331253, -0.03397691249847412, -0.01044498197734356, 0.052020106464624405, -0.03205176070332527, 0.027177434414625168, -0.07282815128564835, 0.049152858555316925, -0.04825172200798988, -0.046531371772289276, 0.025825729593634605, 0.014674175530672073, -0.005345373414456844, 0.04010054096579552, -0.006789238192141056, 0.002626605797559023, -0.009451684542000294, 0.035328615456819534, -0.005401694215834141, -0.017008936032652855, -0.019732823595404625, -0.0006131306872703135, -0.030372370034456253, -0.009308322332799435, -0.029082106426358223, 0.017991993576288223, 0.0047488827258348465, 0.014428411610424519, 0.00817166268825531, 0.002288679825142026, 0.03964997082948685, 0.003059253329411149, 0.035287655889987946, -0.010409141890704632, 0.07528579235076904, 0.013537515886127949, 0.010496183298528194, -0.037110406905412674, -0.018903369084000587, -0.016568608582019806, 0.018360640853643417, 0.0232247244566679, -0.004882005043327808, 0.013936882838606834, -0.016005398705601692, -0.010158257558941841, -0.02207782492041588, 0.016896294429898262, 0.0011219395091757178, 0.0011974609224125743, -0.022507913410663605, 0.020377954468131065, -0.01649692840874195, 0.026563024148344994, 0.022303108125925064, 0.04546639323234558, -0.015196424908936024, -0.022548872977495193, -0.009886892512440681, 0.01367063820362091, -0.013271271251142025, -0.03977285325527191, 0.03950661048293114, 0.012554459273815155, 0.008555669337511063, -4.080071084899828e-05, 0.009461925365030766, 0.0007302527083083987, -0.000619530794210732, -0.0027827685698866844, -0.010588345117866993, 0.0004899285268038511, -0.012933345511555672, -0.002330920659005642, -0.00698380172252655, -0.013015267439186573, 0.03303481638431549, 0.032113201916217804, 0.01797151379287243, -0.020869484171271324, 0.010608824901282787, -0.000775053515098989, 0.0035635821986943483, -0.0031846954952925444, 0.04481102153658867, -0.04163656756281853, -0.06049897521734238, 0.03668031841516495, 0.010772667825222015, -0.015769874677062035, -0.009820330888032913, -0.017510704696178436, -0.012216532602906227, 0.023859616369009018, -0.03967045247554779, -0.07143548876047134, 0.008632470853626728, -0.01826847903430462, 0.0010144177358597517, -0.01220629271119833, -0.02034723572432995, 0.023347606882452965, 0.027771364897489548, -0.004183113109320402, -0.024945074692368507, -0.015053062699735165, -0.027300316840410233, 0.007654533721506596, 0.008739992044866085, -0.0034560603089630604, 0.021156208589673042, -0.017244460061192513, 0.0029491714667528868, -0.011960528790950775, -0.006446192506700754, -0.0019392338581383228, -0.01738782413303852, -0.01094675064086914, -0.038646433502435684, 0.02615341544151306, -0.012329175136983395, 0.012810463085770607, 0.012257494032382965, 0.00544777512550354, -0.02015267126262188, 0.007884937338531017, -0.0041600726544857025, -0.00046176803880371153, -0.03491900861263275, -0.014715136960148811, 0.021258611232042313, -0.010557624511420727, -0.0007635332876816392, 0.02744367904961109, 0.03407931327819824, -0.005621857941150665, -0.05021783709526062, -0.016773412004113197, 0.010875069536268711, -0.03045429103076458, -0.04091975465416908, -0.0333625003695488, -0.004265034571290016, -0.010660026222467422, 0.001943073933944106, 0.014100725762546062, 0.041124556213617325, 0.007567491848021746, 0.028099050745368004, -0.032358963042497635, -0.003194935619831085, 0.031621672213077545, 0.02441258542239666, 0.011581641621887684, -0.0070964437909424305, 0.02189350128173828, -0.0016473886789754033, 0.01354775670915842, 0.014305529184639454, 0.012114130891859531, -0.019026251509785652, -0.008038540370762348, 0.006072425749152899, 0.034447960555553436, 0.01227797381579876, 0.04501582682132721, 0.008750232867896557, -0.014694656245410442, 0.030495252460241318, 0.035431016236543655, -0.03715136647224426, -0.04710482060909271, 0.020971884950995445, -0.020336994901299477, -0.01614876091480255, -0.008770712651312351, 0.015022342093288898, 0.03942468762397766, 0.007895177230238914, -0.015964439138770103, 0.059106308966875076, 0.023142803460359573, -0.012841183692216873, -0.013977843336760998, 0.011141314171254635, 0.0375404953956604, 0.03000372275710106, -0.027853285893797874, 0.02748463861644268, -0.018114876002073288, 0.013568236492574215, -0.0023949218448251486, -0.03856451436877251, -0.00035296616260893643, -0.0068660397082567215, -0.02006050944328308, 0.017838390544056892, 0.03491900861263275, 0.011223236098885536, 0.016466207802295685, -0.023388568311929703, 0.021033326163887978, 0.012830943800508976, -0.010547383688390255, -0.01148948073387146, 0.01044498197734356, 0.04169800877571106, -0.031621672213077545, -0.042517222464084625, -0.00288517028093338, -0.0021389173343777657, -0.020029788836836815, -0.017060138285160065, -0.011212995275855064, -0.010357940569519997, -0.005102168768644333, 0.017459504306316376, 0.016415005549788475, -0.017695028334856033, -0.0022464392241090536, -0.022999441251158714, 0.008028300479054451, -0.017295662313699722, -0.0039117480628192425, -0.02609197422862053, 0.0340588353574276, 0.013906162232160568, -0.018483523279428482, 0.0232247244566679, 0.03016756661236286, -0.008606869727373123, -0.027894247323274612, 0.016322845593094826, 0.04788307473063469, -0.004528719000518322, 0.02633773908019066, 0.008227983489632607, -0.00977937038987875, -0.0025434044655412436, -0.011212995275855064, 0.009820330888032913, -0.009451684542000294, -0.006743157748132944, -0.00517385033890605, 0.02787376567721367, 0.04522062838077545, -0.0051277694292366505, 0.004782163538038731, -0.0031181343365460634, 0.01265686098486185, -0.06258796900510788, -0.022016383707523346, 0.003717184765264392, -0.015923477709293365, -0.007086203433573246, -0.017408303916454315, 0.005319772753864527, 0.011776205152273178, 0.019333457574248314, 0.004185672849416733, 0.01882144808769226, 0.011458760127425194, -0.04431949183344841, -0.024146340787410736, 0.022344069555401802, 0.027156952768564224, -0.004167752806097269, 0.0035482218954712152, 0.04599888250231743, -0.0005158489802852273, -0.027054551988840103, 0.003891267813742161, 0.026665424928069115, -0.028652019798755646, 0.0003868867352139205, 0.013578476384282112, -0.022118786349892616, 0.004920405801385641, -0.0002334440650884062, -0.0031846954952925444, 0.00881167408078909, 0.02312232367694378, -0.007219325751066208, 0.03420219570398331, 0.014674175530672073, -0.020336994901299477, 0.0037402252200990915, -0.01664029061794281, 0.000318565551424399, 0.024965554475784302, 0.0074702100828289986, 0.04530255123972893, 0.012585179880261421, 0.029778439551591873, -0.0001963234244612977, -0.011806925758719444, 0.008105101063847542, -0.008079500868916512, -0.0253137219697237, -0.005493856035172939, 0.03782721981406212, -0.0216477382928133, 0.03690560534596443, 0.03371066972613335, 0.012114130891859531, -0.02912306785583496, 0.009518246166408062, 0.0035405417438596487, 0.008048780262470245, -0.0038503070827573538, -0.01882144808769226, -0.040735431015491486, 0.06094954162836075, -0.01206293050199747, 0.03727424889802933, 0.0033613385166972876, -0.015329547226428986, 0.01369111891835928, -0.05501024052500725, -0.007936138659715652, 0.011653323657810688, 0.02713647298514843, 0.021709179505705833, -0.006420591846108437, -0.0016358685679733753, -0.014029044657945633, -0.01985570602118969, 0.0009254561155103147, 0.061277229338884354, -0.03795010223984718, 0.008658071048557758, -0.028058089315891266, -0.009748649783432484, -0.018401600420475006, -0.01590299792587757, -0.0272388756275177, 0.019005771726369858, 0.0067994785495102406, -0.021606776863336563, 0.0025958851911127567, -0.04804691672325134, -0.029327871277928352, 0.0005596897681243718, -0.024576429277658463, -0.01569819450378418, 0.050832245498895645, 0.005975143983960152, 0.03512381389737129, -0.03381307050585747, -0.02521131932735443, -0.0006137706805020571, -0.0037351050414144993, -0.011571401730179787, 0.002841649577021599, 0.004720722325146198, 0.0034150995779782534, 0.005621857941150665, 0.013496555387973785, -0.027894247323274612, 0.03799106180667877, -0.02684974856674671, 0.00792077835649252, 0.006830199155956507, 0.02684974856674671, 0.031969837844371796, 0.010567864403128624, -0.0034586202818900347, -0.03663935884833336, 0.01753118634223938, -0.00480520399287343, 0.05754980444908142, 0.021115249022841454, 0.039445169270038605, 0.010168497450649738, 0.03215416148304939, 0.00673291739076376, -0.029204988852143288, 0.007337087765336037, -0.03334202244877815, 0.032461367547512054, -0.010977471247315407, -0.03749953582882881, 0.026665424928069115, -0.03180599585175514, -0.006000744644552469, -0.004820564296096563, 0.03846210986375809, 0.008745112456381321, -0.025518525391817093, 0.010071215219795704, 0.03174455463886261, 0.02783280611038208, -0.016322845593094826, -0.014694656245410442, 0.03045429103076458, -0.012769502587616444, -0.04571215808391571, -0.0004825684300158173, 0.0038938280194997787, 0.03555389866232872, -0.006144107319414616, 2.1620377083308995e-05, -0.018954571336507797, -0.004265034571290016, -0.0062413886189460754, -0.020285794511437416, 0.013660398311913013, 0.03739713132381439, 0.014489852823317051, 0.027771364897489548, 0.023757213726639748, -0.009052317589521408, -0.02301992103457451, 0.00821774359792471, -0.017049897462129593, -0.012687581591308117, -0.008760472759604454, -0.016722211614251137, 0.015780115500092506, 0.0028800503350794315, -0.002380841411650181, -0.014418171718716621, 0.012646620161831379, 0.02381865493953228, -0.021422453224658966, -0.01039890106767416, 0.023736733943223953, -0.029143547639250755, 0.016875814646482468, -0.04931670054793358, -0.008929436095058918, 0.02179110050201416, -0.031724072992801666, 0.0432954765856266, 0.03571774438023567, 0.029696518555283546, -0.026726866140961647, 0.0006176107563078403, -0.04841556400060654, -0.002734127687290311, -0.030085645616054535, 0.029839880764484406, 0.010567864403128624, 0.02238503098487854, -0.03784770146012306, -0.008545429445803165, 0.015216905623674393, -0.004930646158754826, -0.021033326163887978, -0.01718301884829998, 0.022118786349892616, -0.012810463085770607, -0.03035189025104046, -0.01831967942416668, 0.006615155376493931, -0.005089368671178818, 0.009282722137868404, 0.011847886256873608, -0.03100726008415222, -0.023040402680635452, -0.033546824008226395, 0.020019549876451492, -0.023163283243775368, -0.009794730693101883, -0.0029696517158299685, -0.0130971884354949, -0.009958573617041111, 0.03237944468855858, 0.012370135635137558, -0.0056525785475969315, 0.02263079397380352, 0.0336901880800724, 0.006579314824193716, 0.017889592796564102, 0.018196796998381615, -0.03383354842662811, -0.013680879026651382, -0.012533978559076786, -0.022364549338817596, 0.009292962029576302, -0.008386706002056599, -0.008048780262470245, 0.0450567863881588, 0.028099050745368004, -0.016056600958108902, -0.047022901475429535, 0.02412586100399494, -0.002428202424198389, -0.0020301153417676687, 0.01643548719584942, 0.01927201636135578, 0.013773039914667606, -0.006922360509634018, -0.014182647690176964, 0.00935952365398407, -0.009989294223487377, 0.008094861172139645, -0.01737758331000805, -0.015216905623674393, 0.0033382982946932316, -0.02713647298514843, -0.030085645616054535, 0.025231800973415375, 0.013814001344144344, -0.023060882464051247, 0.04304971173405647, 0.00407303124666214, 0.0020173152443021536, 0.04980823025107384, 0.03782721981406212, 0.005596257746219635, 0.0309048593044281, -0.041677527129650116, -0.011438279412686825, -0.002384681487455964, -0.022507913410663605, -0.004208713304251432, -0.006528113968670368, 0.03506237268447876, 0.022098304703831673, 0.015206664800643921, 0.0019507539691403508, -0.010219697840511799, 0.041083596646785736, -0.00551945623010397, -0.04034630209207535, -0.01821727678179741, -0.0017805109964683652, -0.01857568323612213, 0.016312604770064354, -0.012400856241583824, 0.012994786724448204, -0.0062772296369075775, -0.0017817910993471742, 0.0007673733634874225, 0.023101842030882835, 0.019128654152154922, 0.015288586728274822, 0.03727424889802933, -0.009236641228199005, 0.023511450737714767, -0.019374417141079903, 0.022958479821681976, 0.00817166268825531, -0.007480450440198183, -0.012237013317644596, 0.015411469154059887, 0.015718674287199974, -0.006282349582761526, 0.03715136647224426, -0.03164215013384819, 0.0043879165314137936, -0.023634331300854683, 0.026603983715176582, 0.010639545507729053, 0.028713461011648178, -0.025375163182616234, -0.016824614256620407, 0.0074702100828289986, -0.00450823875144124, 0.012021970003843307, -0.007040122989565134, -0.015124743804335594, 0.02228262834250927, 0.02490411512553692, 0.012830943800508976, -0.024474026635289192, -0.01793055236339569, -0.02168869785964489, -0.06398063898086548, 0.028549617156386375, 0.021381493657827377, 0.006200428120791912, 0.014592254534363747, -0.026235338300466537, 0.003983429633080959, 0.0033510983921587467, 0.017070377245545387, -0.01005585491657257, 0.018483523279428482, -0.014377210289239883, -0.002320680534467101, 0.00647691311314702, -0.01403928454965353, 0.036864642053842545, -0.033403463661670685, -0.004759123083204031, 0.011950287967920303, -0.04882517084479332, 0.015831315889954567, 0.010153137147426605, -0.00735244806855917, 0.010178737342357635, 0.005591137334704399, -0.02079780213534832, 0.018657606095075607, 0.038748834282159805, -0.008320145308971405, -0.01976354420185089, -0.036967046558856964, 0.003315257839858532, -0.004794963635504246, 0.0333625003695488, -0.0070964437909424305, -0.044196609407663345, 0.027320796623826027, -0.029737478122115135, 0.025477563962340355, 0.04841556400060654, 0.009948333725333214, -0.0030208525713533163, -0.014766337350010872, -0.01426456868648529, -0.02521131932735443, 0.01683485321700573, -0.007183485198765993, -0.03020852617919445, 0.007035002578049898, 0.04362316057085991, -0.02119717001914978, 0.007562371902167797, 0.036270711570978165], index=0)], model='text-embedding-3-small', usage=Usage(prompt_tokens=7, total_tokens=7), error=None, raw_response=None, input=None)\n"
+ ]
+ }
+ ],
+ "source": [
+ "build_custom_model_client()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "BLAF5qTEmoyW"
+ },
+ "source": [
+ "# Issues and feedback\n",
+ "\n",
+ "If you encounter any issues, please report them here: [GitHub Issues](https://github.com/SylphAI-Inc/LightRAG/issues).\n",
+ "\n",
+ "For feedback, you can use either the [GitHub discussions](https://github.com/SylphAI-Inc/LightRAG/discussions) or [Discord](https://discord.gg/ezzszrRZvT)."
+ ]
+ }
+ ],
+ "metadata": {
+ "colab": {
+ "provenance": []
+ },
+ "kernelspec": {
+ "display_name": "Python 3",
+ "name": "python3"
+ },
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 0
+}
diff --git a/tutorials/adalflow_modelclient_sync_and_async.py b/tutorials/adalflow_modelclient_sync_and_async.py
new file mode 100644
index 00000000..555d0311
--- /dev/null
+++ b/tutorials/adalflow_modelclient_sync_and_async.py
@@ -0,0 +1,111 @@
+import asyncio
+import time
+from adalflow.components.model_client import (
+ OpenAIClient,
+) # Assuming OpenAIClient with .call() and .acall() is available
+from adalflow.core.types import ModelType
+
+from getpass import getpass
+import os
+
+from adalflow.utils import setup_env
+
+# Load environment variables - Make sure to have OPENAI_API_KEY in .env file and .env is present in current folder
+if os.path.isfile(".env"):
+ setup_env(".env")
+
+# Prompt user to enter their API keys securely
+if "OPENAI_API_KEY" not in os.environ:
+ openai_api_key = getpass("Please enter your OpenAI API key: ")
+ # Set environment variables
+ os.environ["OPENAI_API_KEY"] = openai_api_key
+ print("API keys have been set.")
+
+
+# Synchronous function for benchmarking .call()
+def benchmark_sync_call(api_kwargs, runs=10):
+ """
+ Benchmark the synchronous .call() method by running it multiple times.
+
+ Parameters:
+ - api_kwargs: The arguments to be passed to the API call
+ - runs: The number of times to run the call (default is 10)
+ """
+ # List to store responses
+ responses = []
+
+ # Record the start time of the benchmark
+ start_time = time.time()
+
+ # Perform synchronous API calls for the specified number of runs
+ responses = [
+ openai_client.call(
+ api_kwargs=api_kwargs, # API arguments
+ model_type=ModelType.LLM, # Model type (e.g., LLM for language models)
+ )
+ for _ in range(runs) # Repeat 'runs' times
+ ]
+
+ # Record the end time after all calls are completed
+ end_time = time.time()
+
+ # Output the results of each synchronous call
+ for i, response in enumerate(responses):
+ print(f"sync call {i + 1} completed: {response}")
+
+ # Print the total time taken for all synchronous calls
+ print(f"\nSynchronous benchmark completed in {end_time - start_time:.2f} seconds")
+
+
+# Asynchronous function for benchmarking .acall()
+async def benchmark_async_acall(api_kwargs, runs=10):
+ """
+ Benchmark the asynchronous .acall() method by running it multiple times concurrently.
+
+ Parameters:
+ - api_kwargs: The arguments to be passed to the API call
+ - runs: The number of times to run the asynchronous call (default is 10)
+ """
+ # Record the start time of the benchmark
+ start_time = time.time()
+
+ # Create a list of asynchronous tasks for the specified number of runs
+ tasks = [
+ openai_client.acall(
+ api_kwargs=api_kwargs, # API arguments
+ model_type=ModelType.LLM, # Model type (e.g., LLM for language models)
+ )
+ for _ in range(runs) # Repeat 'runs' times
+ ]
+
+ # Execute all tasks concurrently and wait for them to finish
+ responses = await asyncio.gather(*tasks)
+
+ # Record the end time after all tasks are completed
+ end_time = time.time()
+
+ # Output the results of each asynchronous call
+ for i, response in enumerate(responses):
+ print(f"Async call {i + 1} completed: {response}")
+
+ # Print the total time taken for all asynchronous calls
+ print(f"\nAsynchronous benchmark completed in {end_time - start_time:.2f} seconds")
+
+
+if __name__ == "__main__":
+ # Initialize the OpenAI client
+ openai_client = OpenAIClient()
+
+ # Sample prompt for testing
+ prompt = "Tell me a joke."
+
+ model_kwargs = {"model": "gpt-3.5-turbo", "temperature": 0.5, "max_tokens": 100}
+ api_kwargs = openai_client.convert_inputs_to_api_kwargs(
+ input=prompt, model_kwargs=model_kwargs, model_type=ModelType.LLM
+ )
+ # Run both benchmarks
+ print("Starting synchronous benchmark...\n")
+ benchmark_sync_call(api_kwargs)
+
+ print("\nStarting asynchronous benchmark...\n")
+ asyncio.run(benchmark_async_acall(api_kwargs))