-
Notifications
You must be signed in to change notification settings - Fork 1
/
agent.py
101 lines (90 loc) · 3.04 KB
/
agent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
from typing import Optional
from tools import (
get_peloton_classes,
get_recent_user_workouts,
add_class_to_stack,
get_user_workout_preferences,
set_user_workout_preferences,
get_classes_in_stack,
clear_classes_in_stack,
determine_fitness_discipline
)
from prompts import (
AGENT_SYSTEM_MSG
)
from langchain.tools.render import format_tool_to_openai_function
from langchain.chat_models import ChatOpenAI
from langchain.prompts import MessagesPlaceholder, ChatPromptTemplate
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.schema.messages import AIMessage, HumanMessage
from langchain.agents import AgentExecutor
class PeloAgent:
def __init__(self):
# Define the LLM to use.
llm = ChatOpenAI(
model="gpt-4o",
temperature=0
)
# Bind the tools to the LLM.
tools = [
get_peloton_classes,
get_recent_user_workouts,
add_class_to_stack,
get_classes_in_stack,
get_user_workout_preferences,
set_user_workout_preferences,
clear_classes_in_stack,
determine_fitness_discipline
]
openai_tools = [format_tool_to_openai_function(t) for t in tools]
llm_with_tools = llm.bind(functions=openai_tools)
# Create the prompt template for the agent.
MEMORY_KEY = "chat_history"
prompt = ChatPromptTemplate.from_messages(
[
(
"system",
AGENT_SYSTEM_MSG,
),
MessagesPlaceholder(variable_name=MEMORY_KEY),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
# Construct the agent definition.
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_function_messages(
x["intermediate_steps"]
),
"chat_history": lambda x: x["chat_history"],
}
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
self.chat_history = []
self.agent_executor = AgentExecutor(
agent=agent,
tools=tools,
verbose=True,
return_intermediate_steps=True,
handle_parsing_errors="Check your output and make sure it conforms!"
)
def invoke(self, user_msg: str) -> str:
result = self.agent_executor.invoke(
{
"input": user_msg,
"chat_history": self.chat_history
}
)
# Save the response to the history.
self.chat_history.extend(
[
HumanMessage(content=user_msg),
AIMessage(content=result["output"])
]
)
return result["output"]