You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I included a self-contained, minimal example that demonstrates the issue INCLUDING all the relevant imports. The code run AS IS to reproduce the issue.
Example Code
defroute_memory(state):
""" Route question to wipe memory or fetch memory. Args: state (dict): The current graph state Returns: str: Next node to call """print("---ROUTE QUESTION---")
ifstate["question"] in ['exit', 'leave']:
print("---ROUTE QUESTION TO WIPE MEMORY---")
state["memory"] ="wipememory"return"wipememory"else:
print("---ROUTE QUESTION TO FETCH MEMORY---")
state["memory"] ="fetchmemory"return"fetchmemory"defclear_memory(state):
""" Clear the memory. Args: state (dict): The current graph state Returns: str: Next node to call """memory=ConversationBufferMemory(
memory_key="chat_history",
return_messages=True,
output_key='answer'
)
return {'memory': memory}
deffetch_memory(state):
# fetch memory logic form DBreturn {"memory": memory}
@tooldefretrieve_generate_update(config: RunnableConfig,
state: Annotated[dict, InjectedState]) ->str:
"""Helps answer standard related query and update the memory. Args: state (str): User's query. Returns: str: A simple string it returns. """metadata=metadata_retrieve(state)
indices=metadata["indices"]
documents=metadata['documents']
reformed_question=metadata["reformed_question"]
generated_response=generate(state, indices, reformed_question)
print(generated_response["llm_answer"])
print(generated_response["llm_reference"])
return {
"answer": generated_response["llm_answer"],
"reference": generated_response["llm_reference"]
}
@tooldefCustomCaseTool(string: str, case: Literal["upper", "lower"], config: RunnableConfig,
state: Annotated[dict, InjectedState],
) ->str:
"""Converts the case of the string to upper case."""print(string)
print(config["metadata"])
print(state["question"], state["user_session"])
print(case)
ifcase=="upper":
returnstring.upper()
elifcase=="lower":
returnstring.lower()
classGraphState(TypedDict):
""" Graph state is a dictionary that contains information we want to propagate to, and modify in, each graph node. """question : str# User questionuser_session: str# User sessionmessages: Annotated[list[AnyMessage], add_messages]
memory: AnyclassAssistant:
def__init__(self, runnable: Runnable):
self.runnable=runnabledef__call__(self, state: GraphState, config: RunnableConfig):
whileTrue:
result=self.runnable.invoke(state)
# If the LLM happens to return an empty response, we will re-prompt it# for an actual response.ifnotresult.tool_callsand (
notresult.contentorisinstance(result.content, list)
andnotresult.content[0].get("text")
):
messages=state["messages"] + [("user", "Respond with a real output.")]
state= {**state, "messages": messages}
else:
breakreturn {"messages": result}
defhandle_tool_error(state) ->dict:
error=state.get("error")
tool_calls=state["messages"][-1].tool_callsreturn {
"messages": [
ToolMessage(
content=f"Error: {repr(error)}\n please fix your mistakes.",
tool_call_id=tc["id"],
)
fortcintool_calls
]
}
defcreate_tool_node_with_fallback(tools: list) ->dict:
returnToolNode(tools).with_fallbacks(
[RunnableLambda(handle_tool_error)], exception_key="error"
)
assistant_prompt=ChatPromptTemplate.from_messages(
[
(
"system",
"You are a helpful assistant."" Use the provided tools to convert the case, retrieve relevant data and other information to assist the user's queries. "" When searching, be persistent. Expand your query bounds if the first search returns no results. "" If a search comes up empty, expand your search before giving up.",
),
("placeholder", "{messages}"),
]
)
standards_tools= [
CustomCaseTool,
retrieve_generate_update
]
standards_assistant_runnable=assistant_prompt|llm.bind_tools(standards_tools)
workflow=StateGraph(GraphState)
workflow.add_node("clear_memory", clear_memory) #memory clearworkflow.add_node("fetch_memory", fetch_memory)
workflow.add_node("assistant", Assistant(standards_assistant_runnable))
workflow.add_node("tools", create_tool_node_with_fallback(standards_tools))
workflow.set_conditional_entry_point(
route_memory,
{
"wipememory": "clear_memory",
"fetchmemory": "fetch_memory",
},
)
workflow.add_edge("fetch_memory", "assistant")
workflow.add_edge("clear_memory", "assistant")
workflow.add_conditional_edges(
"assistant",
tools_condition,
)
workflow.add_edge("tools", "assistant")
graph=workflow.compile()
_printed=set()
def_print_event(event: dict, _printed: set, max_length=1500):
current_state=event.get("dialog_state")
ifcurrent_state:
print("Currently in: ", current_state[-1])
message=event.get("messages")
ifmessage:
ifisinstance(message, list):
message=message[-1]
ifmessage.idnotin_printed:
msg_repr=message.pretty_repr(html=True)
iflen(msg_repr) >max_length:
msg_repr=msg_repr[:max_length] +" ... (truncated)"_printed.add(message.id)
config= {
"configurable": {
# Checkpoints are accessed by thread_id"thread_id": '12121',
}
}
inputs= {
"messages": "Convert the following text 'Hello! There How are you ALEX?' to lower case.",
"user_session": 8787,
"question": {"hello": 'sfmslfmslfs', "type": 12121}
}
forquestionintutorial_questions:
events=graph.stream(
inputs, config, stream_mode="values"
)
foreventinevents:
_print_event(event, _printed)
ThisistheresponseIamgetting:
---ROUTEQUESTION------ROUTEQUESTIONTOFETCHMEMORY---======================3===================================Hello! ThereHowareyouALEX?
{'thread_id': '12121', 'langgraph_step': 3, 'langgraph_node': 'tools', 'langgraph_triggers': ['branch:assistant:tools_condition:tools'], 'langgraph_path': ('__pregel_pull', 'tools'), 'langgraph_checkpoint_ns': 'tools:cd243836-d245-cc50-ced3-6121f6a810b9', 'checkpoint_ns': 'tools:cd243836-d245-cc50-ced3-6121f6a810b9'}
{'hello': 'sfmslfmslfs', 'type': 12121} 8787lower
Error Message and Stack Trace (if applicable)
No Error, just that response is not coming.
Description
I am creating a bot, which first fetches the memory (without tools) post that it enters in tool mode and runs the tool as required. But I am not getting the response printed. Can Anyone please help! @eric-langchain@nfcampos@Glavin001
System Info
Please Help!
The text was updated successfully, but these errors were encountered:
Checked other resources
Example Code
Error Message and Stack Trace (if applicable)
Description
I am creating a bot, which first fetches the memory (without tools) post that it enters in tool mode and runs the tool as required. But I am not getting the response printed. Can Anyone please help!
@eric-langchain @nfcampos @Glavin001
System Info
Please Help!
The text was updated successfully, but these errors were encountered: