Get error of "TypeError: can only concatenate list (not "tuple") to list' when I was testing "Plan-and-Execute" example #22878
Replies: 3 comments 3 replies
-
@LeonMing30 Hello there! I'm here to assist you with any bugs, questions, or contributions. Let's tackle this issue together. To resolve the In your example, it seems that the issue arises from the Here's how you can modify your code to fix this issue: config = {"recursion_limit": 50}
inputs = {"input": "what is the hometown of the 2024 Australia open winner?"}
async for event in app.astream(inputs, config=config):
for k, v in event.items():
if k != "__end__":
# Ensure past_steps is a list before concatenation
if k == "past_steps" and isinstance(v, tuple):
v = list(v)
print(v) This modification ensures that Additionally, you should check the def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
plan = self.planner.plan(
inputs,
callbacks=run_manager.get_child() if run_manager else None,
)
if run_manager:
run_manager.on_text(str(plan), verbose=self.verbose)
for step in plan.steps:
_new_inputs = {
"previous_steps": self.step_container,
"current_step": step,
"objective": inputs[self.input_key],
}
new_inputs = {**_new_inputs, **inputs}
response = self.executor.step(
new_inputs,
callbacks=run_manager.get_child() if run_manager else None,
)
if run_manager:
run_manager.on_text(
f"*****\n\nStep: {step.value}", verbose=self.verbose
)
run_manager.on_text(
f"\n\nResponse: {response.response}", verbose=self.verbose
)
self.step_container.add_step(step, response)
return {self.output_key: self.step_container.get_final_response()}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
plan = await self.planner.aplan(
inputs,
callbacks=run_manager.get_child() if run_manager else None,
)
if run_manager:
await run_manager.on_text(str(plan), verbose=self.verbose)
for step in plan.steps:
_new_inputs = {
"previous_steps": self.step_container,
"current_step": step,
"objective": inputs[self.input_key],
}
new_inputs = {**_new_inputs, **inputs}
response = await self.executor.astep(
new_inputs,
callbacks=run_manager.get_child() if run_manager else None,
)
if run_manager:
await run_manager.on_text(
f"*****\n\nStep: {step.value}", verbose=self.verbose
)
await run_manager.on_text(
f"\n\nResponse: {response.response}", verbose=self.verbose
)
self.step_container.add_step(step, response)
return {self.output_key: self.step_container.get_final_response()} Ensure that any concatenation in your "Plan-and-Execute" example follows this pattern [1]. |
Beta Was this translation helpful? Give feedback.
-
me too??? |
Beta Was this translation helpful? Give feedback.
-
@cristianohello @LeonMing30 @Andor233 To solve it you need to change the returned "past_steps" from execute_step to a list, because it is declared as a list of tuples, not tuple. In 'execute_step' function, change: return { To the following: return { |
Beta Was this translation helpful? Give feedback.
-
Checked other resources
Commit to Help
Example Code
Description
I have copied the whole programme in https://langchain-ai.github.io/langgraph/tutorials/plan-and-execute/plan-and-execute/#create-the-graph.
System Info
langchain==0.2.3
langchain-community==0.2.4
langchain-core==0.2.5
langchain-openai==0.1.8
langchain-text-splitters==0.2.0
langchainhub==0.1.15
Note: you may need to restart the kernel to use updated packages.
Beta Was this translation helpful? Give feedback.
All reactions