Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

I am not getting output in langgraph, but tool is running. #2872

Open
4 tasks done
jaytimbadia opened this issue Dec 25, 2024 · 0 comments
Open
4 tasks done

I am not getting output in langgraph, but tool is running. #2872

jaytimbadia opened this issue Dec 25, 2024 · 0 comments

Comments

@jaytimbadia
Copy link

Checked other resources

  • This is a bug, not a usage question. For questions, please use GitHub Discussions.
  • I added a clear and detailed title that summarizes the issue.
  • I read what a minimal reproducible example is (https://stackoverflow.com/help/minimal-reproducible-example).
  • I included a self-contained, minimal example that demonstrates the issue INCLUDING all the relevant imports. The code run AS IS to reproduce the issue.

Example Code

def route_memory(state):
    """
    Route question to wipe memory or fetch memory.

    Args:
        state (dict): The current graph state

    Returns:
        str: Next node to call
    """
    print("---ROUTE QUESTION---")
    if state["question"] in ['exit', 'leave']:
        print("---ROUTE QUESTION TO WIPE MEMORY---")
        state["memory"] = "wipememory"
        return "wipememory"
    else:
        print("---ROUTE QUESTION TO FETCH MEMORY---")
        state["memory"] = "fetchmemory"
        return "fetchmemory"
    

def clear_memory(state):
    """
    Clear the memory.

    Args:
        state (dict): The current graph state

    Returns:
        str: Next node to call
    """
    memory = ConversationBufferMemory(
            memory_key="chat_history",
            return_messages=True,
            output_key='answer'
        )
    return {'memory': memory}

def fetch_memory(state):

    # fetch memory logic form DB

    return {"memory": memory}

@tool
def retrieve_generate_update(config: RunnableConfig, 
                    state: Annotated[dict, InjectedState]) -> str:
    """Helps answer standard related query and update the memory.
    Args:
        state (str): User's query.

    Returns:
        str: A simple string it returns.
    """
    metadata = metadata_retrieve(state)
    indices = metadata["indices"] 
    documents = metadata['documents'] 
    reformed_question = metadata["reformed_question"]

    generated_response = generate(state, indices, reformed_question)
    print(generated_response["llm_answer"])
    print(generated_response["llm_reference"])

    return {
        "answer": generated_response["llm_answer"],
        "reference": generated_response["llm_reference"]
    }    



@tool
def CustomCaseTool(string: str, case: Literal["upper", "lower"], config: RunnableConfig, 
                    state: Annotated[dict, InjectedState],
                   ) -> str:
    """Converts the case of the string to upper case."""
    print(string)
    print(config["metadata"])
    print(state["question"], state["user_session"])
    print(case)
    if case == "upper":
        return string.upper()
    elif case == "lower":
        return string.lower()

class GraphState(TypedDict):
    """
    Graph state is a dictionary that contains information we want to propagate to, and modify in, each graph node.
    """
    question : str # User question
    user_session: str # User session
    messages: Annotated[list[AnyMessage], add_messages]
    memory: Any

class Assistant:
    def __init__(self, runnable: Runnable):
        self.runnable = runnable

    def __call__(self, state: GraphState, config: RunnableConfig):
        while True:
            result = self.runnable.invoke(state)
            # If the LLM happens to return an empty response, we will re-prompt it
            # for an actual response.
            if not result.tool_calls and (
                not result.content
                or isinstance(result.content, list)
                and not result.content[0].get("text")
            ):
                messages = state["messages"] + [("user", "Respond with a real output.")]
                state = {**state, "messages": messages}
            else:
                break
        return {"messages": result}
    
def handle_tool_error(state) -> dict:
    error = state.get("error")
    tool_calls = state["messages"][-1].tool_calls
    return {
        "messages": [
            ToolMessage(
                content=f"Error: {repr(error)}\n please fix your mistakes.",
                tool_call_id=tc["id"],
            )
            for tc in tool_calls
        ]
    }


def create_tool_node_with_fallback(tools: list) -> dict:
    return ToolNode(tools).with_fallbacks(
        [RunnableLambda(handle_tool_error)], exception_key="error"
    )

assistant_prompt = ChatPromptTemplate.from_messages(
    [
        (
            "system",
            "You are a helpful assistant."
            " Use the provided tools to convert the case, retrieve relevant data and other information to assist the user's queries. "
            " When searching, be persistent. Expand your query bounds if the first search returns no results. "
            " If a search comes up empty, expand your search before giving up.",
        ),
        ("placeholder", "{messages}"),
    ]
)

standards_tools = [
    CustomCaseTool,
    retrieve_generate_update
]
standards_assistant_runnable = assistant_prompt | llm.bind_tools(standards_tools)

workflow = StateGraph(GraphState)

workflow.add_node("clear_memory", clear_memory) #memory clear
workflow.add_node("fetch_memory", fetch_memory)
workflow.add_node("assistant", Assistant(standards_assistant_runnable))
workflow.add_node("tools", create_tool_node_with_fallback(standards_tools))

workflow.set_conditional_entry_point(
    route_memory,
    {
        "wipememory": "clear_memory",
        "fetchmemory": "fetch_memory",
    },
)

workflow.add_edge("fetch_memory", "assistant")
workflow.add_edge("clear_memory", "assistant")


workflow.add_conditional_edges(
    "assistant",
    tools_condition,
)
workflow.add_edge("tools", "assistant")


graph = workflow.compile()


_printed = set()

def _print_event(event: dict, _printed: set, max_length=1500):
    current_state = event.get("dialog_state")
    if current_state:
        print("Currently in: ", current_state[-1])
    message = event.get("messages")
    if message:
        if isinstance(message, list):
            message = message[-1]
        if message.id not in _printed:
            msg_repr = message.pretty_repr(html=True)
            if len(msg_repr) > max_length:
                msg_repr = msg_repr[:max_length] + " ... (truncated)"
            _printed.add(message.id)


config = {
    "configurable": {
        # Checkpoints are accessed by thread_id
        "thread_id": '12121',
    }
}

inputs = {
    "messages": "Convert the following text 'Hello! There How are you ALEX?' to lower case.",
    "user_session": 8787,
    "question": {"hello": 'sfmslfmslfs', "type": 12121}
}
for question in tutorial_questions:
    events = graph.stream(
        inputs, config, stream_mode="values"
    )
    for event in events:
        _print_event(event, _printed)

This is the response I am getting: 

---ROUTE QUESTION---
---ROUTE QUESTION TO FETCH MEMORY---
======================
3 ===================================
Hello! There How are you ALEX?
{'thread_id': '12121', 'langgraph_step': 3, 'langgraph_node': 'tools', 'langgraph_triggers': ['branch:assistant:tools_condition:tools'], 'langgraph_path': ('__pregel_pull', 'tools'), 'langgraph_checkpoint_ns': 'tools:cd243836-d245-cc50-ced3-6121f6a810b9', 'checkpoint_ns': 'tools:cd243836-d245-cc50-ced3-6121f6a810b9'}
{'hello': 'sfmslfmslfs', 'type': 12121} 8787
lower

Error Message and Stack Trace (if applicable)

No Error, just that response is not coming.

Description

I am creating a bot, which first fetches the memory (without tools) post that it enters in tool mode and runs the tool as required. But I am not getting the response printed. Can Anyone please help!
@eric-langchain @nfcampos @Glavin001

System Info

first_bot

Please Help!

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant