-
Notifications
You must be signed in to change notification settings - Fork 4
/
app.py
66 lines (51 loc) · 3 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# Import necessary modules and setup for FastAPI, LangGraph, and LangChain
from fastapi import FastAPI # FastAPI framework for creating the web application
from pydantic import BaseModel # BaseModel for structured data data models
from typing import List # List type hint for type annotations
from langchain_community.tools.tavily_search import TavilySearchResults # TavilySearchResults tool for handling search results from Tavily
import os # os module for environment variable handling
from langgraph.prebuilt import create_react_agent # Function to create a ReAct agent
from langchain_groq import ChatGroq # ChatGroq class for interacting with LLMs
# Retrieve and set API keys for external tools and services
groq_api_key = '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' # Groq API key
os.environ["TAVILY_API_KEY"] = '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' # Set Tavily API key
# Predefined list of supported model names
MODEL_NAMES = [
"llama3-70b-8192", # Model 1: Llama 3 with specific configuration
"mixtral-8x7b-32768" # Model 2: Mixtral with specific configuration
]
# Initialize the TavilySearchResults tool with a specified maximum number of results.
tool_tavily = TavilySearchResults(max_results=2) # Allows retrieving up to 2 results
# Combine the TavilySearchResults and ExecPython tools into a list.
tools = [tool_tavily, ]
# FastAPI application setup with a title
app = FastAPI(title='LangGraph AI Agent')
# Define the request schema using Pydantic's BaseModel
class RequestState(BaseModel):
model_name: str # Name of the model to use for processing the request
system_prompt: str # System prompt for initializing the model
messages: List[str] # List of messages in the chat
# Define an endpoint for handling chat requests
@app.post("/chat")
def chat_endpoint(request: RequestState):
"""
API endpoint to interact with the chatbot using LangGraph and tools.
Dynamically selects the model specified in the request.
"""
if request.model_name not in MODEL_NAMES:
# Return an error response if the model name is invalid
return {"error": "Invalid model name. Please select a valid model."}
# Initialize the LLM with the selected model
llm = ChatGroq(groq_api_key=groq_api_key, model_name=request.model_name)
# Create a ReAct agent using the selected LLM and tools
agent = create_react_agent(llm, tools=tools, state_modifier=request.system_prompt)
# Create the initial state for processing
state = {"messages": request.messages}
# Process the state using the agent
result = agent.invoke(state) # Invoke the agent (can be async or sync based on implementation)
# Return the result as the response
return result
# Run the application if executed as the main script
if __name__ == '__main__':
import uvicorn # Import Uvicorn server for running the FastAPI app
uvicorn.run(app, host='127.0.0.1', port=8000) # Start the app on localhost with port 8000