-
Checked other resources
Commit to Help
Example Codeasync def get_response(self, messages, model):
try:
logging.debug("Starting get_response method")
memory = self.memory
logging.debug(f"Memory initialized: {memory}")
llm = ChatOllama(model=model)
logging.debug(f"ChatOllama initialized with model: {model}")
config = {"configurable": {"thread_id": "abc123"}}
logging.debug(f"Config set: {config}")
prompt = ChatPromptTemplate(
input_variables=["input", "response", "agent_scratchpad"],
messages=[
(
"system",
"You are a helpful assistant. {agent_scratchpad}",
),
("human", "{input}"),
]
)
logging.debug(f"Prompt template created: {prompt}")
agent = create_openai_functions_agent(llm=llm, tools=self.tools, prompt=prompt)
logging.debug("Agent created")
agent_executor = AgentExecutor(agent=agent, tools=self.tools)
logging.debug("AgentExecutor created")
# Check if messages is None or empty
if not messages:
logging.error("Messages list is empty or None")
return "Error: No messages provided."
# Extract the user input from the messages
user_input = next((message["content"] for message in messages if message["role"] == "user"), "")
logging.debug(f"User input extracted: {user_input}")
# Wrap the list of messages in a dictionary and include the user input
formatted_input = {
"messages": [{"role": message["role"], "content": message["content"]} for message in messages],
"input": user_input
}
logging.debug(f"Formatted input: {formatted_input}")
# Check if agent_executor is None
if agent_executor is None:
logging.error("agent_executor is None")
return "Error: Agent executor not initialized properly."
response = await agent_executor.invoke(formatted_input)
logging.debug(f"Response received: {response}")
if response is None:
logging.error("Received None response from agent_executor.invoke")
return "Error: No response received from the agent."
# Check if the response includes intermediate steps
if isinstance(response, dict):
# Attempt to get intermediate steps if they exist
intermediate_steps = response.get('intermediate_steps', None)
final_answer = response.get('output', 'No final answer found.')
logging.debug(f"Intermediate steps: {intermediate_steps}")
logging.debug(f"Final answer: {final_answer}")
if intermediate_steps is not None:
# Log or process intermediate steps if needed
for step in intermediate_steps:
logging.debug(f"Intermediate Step: {step}")
return final_answer
return str(response)
except Exception as e:
logging.error(f"Error in get_response: {e}", exc_info=True)
return f"Error in get_response: {str(e)}" DescriptionI'm trying to get a response from Ollama (I'm using the model llama3.1). Whenever it is needed, function calling should be used. This gets split up in messages and model before being passed to get_response()
System Infolangchain==0.3.8 macOS sequioa 15.1.1 | Python 3.12.5 |
Beta Was this translation helpful? Give feedback.
Replies: 2 comments 3 replies
-
@qubit999 , where exactly is the TypeError coming from? |
Beta Was this translation helpful? Give feedback.
-
The solution is: Thank you @feijoes |
Beta Was this translation helpful? Give feedback.
@qubit999 try upgrading the ollama package to 0.4.1