Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Unexpected State Update #3266

Open
5 tasks done
Sarthak-ONS opened this issue Jan 31, 2025 · 3 comments
Open
5 tasks done

Unexpected State Update #3266

Sarthak-ONS opened this issue Jan 31, 2025 · 3 comments
Labels
question Further information is requested

Comments

@Sarthak-ONS
Copy link

Sarthak-ONS commented Jan 31, 2025

Checked other resources

  • I added a very descriptive title to this issue.
  • I searched the LangChain documentation with the integrated search.
  • I used the GitHub search to find a similar question and didn't find it.
  • I am sure that this is a bug in LangChain rather than my code.
  • The bug is not resolved by updating to the latest stable version of LangChain (or the specific integration package).

Example Code

from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.tools import tool
from langchain_core.messages import  HumanMessage , AIMessage
from langchain_core.runnables import RunnableConfig

from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START, add_messages
from langgraph.prebuilt import ToolNode, tools_condition

from typing import Annotated, List, TypedDict
from rich.console import Console
from rich.markdown import Markdown
from rich import print as rprint
from environs import Env

from prompts import system_prompt

from moviepy import *
import uuid

env = Env()
env.read_env()

console = Console()

# Memory configuration
memory = MemorySaver()

video_path = "video.mp4"

class VideoAppStateGraph(TypedDict):
    messages : Annotated[list , add_messages]
    video_path : str

@tool
def trim_video(start_time: str, end_time: str, state : VideoAppStateGraph ,config: RunnableConfig):
    """
    Trim a video between specified start and end times.
    
    Args:
        start_time (str): Start time in format HH:MM:SS
        end_time (str): End time in format HH:MM:SS
    
    Returns:
        str: Confirmation message
    """
    
    print("-"*10)
    print("trim_video")
    # print(state)
    # print("-"*10)
    print(config)
    print("-"*10)
    print(start_time)
    print(end_time)
    print("-"*10)

    
    video_path_trimmed = f"{uuid.uuid4().hex}.mp4"
    ffmpeg_tools.ffmpeg_extract_subclip(state["video_path"] , start_time , end_time ,video_path_trimmed)
    
    return {
        "video_path" : video_path_trimmed,
        "messages" : [AIMessage(content=f"Video has been trimmed from {start_time} to {end_time}")]
    }

@tool
def get_video_duration(state : VideoAppStateGraph):
    """
    Get Duration of the Video
    
    
    Returns:
        str: Duration of the video
    """
    
    print("-"*10)
    print("get_video_duration")
    print(state["video_path"])
    print("-"*10)
    video = VideoFileClip(state["video_path"])
    
    duration =  video.duration
    
    return {
        "video_path" : state["video_path"],
        "messages" : [AIMessage(content=f"Duration is {int(duration)} seconds")]
    }
    
    
@tool
def get_video_url(state : VideoAppStateGraph):
    """
    Get the video url of updated actions of video
    
    return str: video_url
    """
    
    base = env.str("HOSTED_BACKEND_URL")
    
    return {
        "video_path" : state["video_path"],
        "messages" : [AIMessage(content=f"{base}/{state["video_path"]}")]
    }

# Define tools list
tools = [trim_video, get_video_duration , get_video_url]

# Create tool node
tool_node = ToolNode(tools=tools)

prompt = ChatPromptTemplate.from_messages([
    ("system", system_prompt),
    MessagesPlaceholder(variable_name="messages"),
])

# Initialize the model with tools
model = ChatOpenAI(
    temperature=0.5,
    api_key=env.str("OPENAI_API_KEY"),
).bind_tools(tools)


def call_model(state):
    """
    Process the current state and generate a response.
    """
    print("*"*10)
    print("call_model")
    print(state["video_path"])
    print("*"*10)
    response = model.invoke(prompt.invoke({"messages": state["messages"]}))
    return {"messages": [response]}    

# Create the workflow graph
workflow = StateGraph(VideoAppStateGraph)

# Add nodes
workflow.add_node("assistant", call_model)
workflow.add_node("tools", tool_node)

# Add Edges
workflow.add_edge(START, "assistant")
workflow.add_conditional_edges("assistant", tools_condition)
workflow.add_edge("tools", "assistant")


# Compile the app
app = workflow.compile(checkpointer=memory)

class VideoEditor:
    def __init__(self):
        self.messages: List[HumanMessage] = []
        self.video_path : str = video_path
        
    def process_message(self, user_input: str) -> str:
        """Process a single message and return the response"""
        self.messages.append(HumanMessage(content=user_input))
        
        response = app.invoke({
            "messages": self.messages,
            "video_path" : self.video_path
        } , config={ "configurable" : {"thread_id": "1312312312"}})
        
        # Extract the last assistant message
        last_message = response["messages"][-1]
        self.messages.extend(response["messages"])
        
        return last_message.content

def main():
    """Main CLI interface for the video editing agent"""
    editor = VideoEditor()
    
    # Print welcome message
    console.print("[bold blue]Welcome to the Video Editing Assistant![/bold blue]")
    console.print("Type 'quit' or 'exit' to end the conversation.\n")
    
    while True:
        try:
            # Get user input
            user_input = console.input("[bold green]You:[/bold green] ")
            
            # Check for exit command
            if user_input.lower() in ['quit', 'exit', 'bye', 'goodbye']:
                console.print("\n[bold blue]Goodbye! Thank you for using the Video Editing Assistant.[/bold blue]")
                break
            
            # Process the message
            response = editor.process_message(user_input)
            
            # Print the response with markdown formatting
            console.print("\n[bold purple]Assistant:[/bold purple]")
            console.print(Markdown(response))
            console.print()  # Empty line for better readability
            
        except KeyboardInterrupt:
            console.print("\n[bold red]Session terminated by user.[/bold red]")
            break
        except Exception as e:
            console.print(f"\n[bold red]An error occurred: {str(e)}[/bold red]")
            console.print("Please try again or type 'exit' to quit.")

if __name__ == "__main__":
    main()

Error Message and Stack Trace (if applicable)

No response

Description

Unexpected State Change,
Initially, When, call_model is called, value of state["video_path"] is correct("video.mp4"), but when there is a tool call, unexpectedly, state is being updated, and print inside the trim_video tool says state["video_path"] is "sample_video.mp4"

System Info

System Information

OS: Windows
OS Version: 10.0.22631
Python Version: 3.13.0 (tags/v3.13.0:60403a5, Oct 7 2024, 09:38:07) [MSC v.1941 64 bit (AMD64)]

Package Information

langchain_core: 0.3.33
langsmith: 0.3.3
langchain_openai: 0.3.3
langgraph_sdk: 0.1.51

Optional packages not installed

langserve

Other Dependencies

httpx: 0.28.1
jsonpatch: 1.33
langsmith-pyo3: Installed. No version info available.
openai: 1.60.2
orjson: 3.10.15
packaging: 24.2
pydantic: 2.10.6
pytest: Installed. No version info available.
PyYAML: 6.0.2
requests: 2.32.3
requests-toolbelt: 1.0.0
rich: 13.9.4
tenacity: 9.0.0
tiktoken: 0.8.0
typing-extensions: 4.12.2
zstandard: 0.23.0

@eyurtsev eyurtsev transferred this issue from langchain-ai/langchain Jan 31, 2025
@vbarda
Copy link
Collaborator

vbarda commented Jan 31, 2025

@Sarthak-ONS that's because in your example the LLM is populating the state field (and it's hallucinating the video path). to propagate actual graph state you need to use InjectedState https://langchain-ai.github.io/langgraph/how-tos/pass-run-time-values-to-tools/#pass-graph-state-to-tools

@Sarthak-ONS
Copy link
Author

from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.tools import tool , InjectedToolCallId
from langchain_core.messages import  HumanMessage , AIMessage , ToolMessage
from langgraph.types import Command

from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import StateGraph, START, add_messages
from langgraph.prebuilt import ToolNode, tools_condition, InjectedState , create_react_agent
from langgraph.prebuilt.chat_agent_executor import AgentState

from typing import Annotated, List
from rich.console import Console
from rich.markdown import Markdown
from rich import print as rprint
from environs import Env

from prompts import system_prompt

from moviepy import *
import uuid

env = Env()
env.read_env()

console = Console()

# Memory configuration
memory = MemorySaver()

video_path = "video.mp4"

class VideoAppStateGraph(AgentState):
    messages : Annotated[list , add_messages]
    video_path : str

@tool
def trim_video(start_time: str, end_time: str, state: Annotated[dict, InjectedState] , tool_call_id: Annotated[str, InjectedToolCallId],):
    """
    Trim a video between specified start and end times.
    
    Args:
        start_time (str): Start time in format HH:MM:SS
        end_time (str): End time in format HH:MM:SS
    
    Returns:
        str: Confirmation message
    """
    
    print("-"*10)
    print("trim_video")
    print(state["video_path"])
    print(tool_call_id)
    # print("-"*10)
    print("-"*10)
    print(start_time)
    print(end_time)
    print("-"*10)

    
    video_path_trimmed = f"{uuid.uuid4()}.mp4"
    ffmpeg_tools.ffmpeg_extract_subclip(state["video_path"] , start_time , end_time ,video_path_trimmed)
    
    return Command(
        update={
        "video_path" : video_path_trimmed,
        "messages" : [ToolMessage(content=f"Video has been trimmed from {start_time} to {end_time}" ,tool_call_id=tool_call_id)]
    })

@tool
def get_video_duration(state: Annotated[dict, InjectedState]):
    """
    Get Duration of the Video
    
    
    Returns:
        str: Duration of the video
    """
    
    print("-"*10)
    print("get_video_duration")
    print(state["video_path"])
    print("-"*10)
    video = VideoFileClip(state["video_path"])
    
    duration =  video.duration
    
    return f"Duration is {int(duration)} seconds"
    
@tool
def get_video_url(state: Annotated[dict, InjectedState]):
    """
    Get the video url of updated actions of video
    
    return str: video_url
    """
    
    base = env.str("HOSTED_BACKEND_URL")
    
    print(state["video_path"])
    
    return f"{base}/{state["video_path"]}"

# Define tools list
tools = [trim_video, get_video_duration , get_video_url]

# Initialize the model with tools
model = ChatOpenAI(
    temperature=0.5,
    api_key=env.str("OPENAI_API_KEY"),
)

agent = create_react_agent(model=model , tools=tools , prompt=system_prompt , state_schema= VideoAppStateGraph , store=MemorySaver)

class VideoEditor:
    def __init__(self):
        self.messages: List[HumanMessage] = []
        self.video_path : str = video_path
        
    def process_message(self, user_input: str) -> str:
        """Process a single message and return the response"""
        self.messages.append(HumanMessage(content=user_input))
        
        response = agent.invoke({
            "messages": self.messages,
            "video_path" : self.video_path
        } , config={ "configurable" : {"thread_id": "1312312312"}})
        
        # Extract the last assistant message
        last_message = response["messages"][-1]
        self.messages.extend(response["messages"])
        
        return last_message.content

def main():
    """Main CLI interface for the video editing agent"""
    editor = VideoEditor()
    
    # Print welcome message
    console.print("[bold blue]Welcome to the Video Editing Assistant![/bold blue]")
    console.print("Type 'quit' or 'exit' to end the conversation.\n")
    
    while True:
        try:
            # Get user input
            user_input = console.input("[bold green]You:[/bold green] ")
            
            # Check for exit command
            if user_input.lower() in ['quit', 'exit', 'bye', 'goodbye']:
                console.print("\n[bold blue]Goodbye! Thank you for using the Video Editing Assistant.[/bold blue]")
                break
            
            # Process the message
            response = editor.process_message(user_input)
            
            # Print the response with markdown formatting
            console.print("\n[bold purple]Assistant:[/bold purple]")
            console.print(Markdown(response))
            console.print()  # Empty line for better readability
            
        except KeyboardInterrupt:
            console.print("\n[bold red]Session terminated by user.[/bold red]")
            break
        except Exception as e:
            console.print(f"\n[bold red]An error occurred: {str(e)}[/bold red]")
            console.print("Please try again or type 'exit' to quit.")

if __name__ == "__main__":
    main()

After going through this documentation,
https://langchain-ai.github.io/langgraph/how-tos/pass-run-time-values-to-tools/#use-it_1
https://github.com/langchain-ai/langgraph/blob/main/docs/docs/how-tos/update-state-from-tools.ipynb

State update isn't happening

In trim_video tool, I am trying to update the state, But state update isn't happening,

After trim I am trying to call get_video_url its returning, initial video_path only, similarly for the get_video_duration also

@vbarda
Copy link
Collaborator

vbarda commented Feb 3, 2025

can you provide some langsmith traces / boil the code down to a minimum reproducible example?

@vbarda vbarda added the question Further information is requested label Feb 3, 2025
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
question Further information is requested
Projects
None yet
Development

No branches or pull requests

2 participants