Skip to content

Commit

Permalink
Merge pull request #24 from Grigorij-Dudnik/dev
Browse files Browse the repository at this point in the history
v0.2.4
  • Loading branch information
Grigorij-Dudnik authored Dec 12, 2024
2 parents 8ad469e + a5904fc commit b2f47e2
Show file tree
Hide file tree
Showing 26 changed files with 397 additions and 1,087 deletions.
74 changes: 35 additions & 39 deletions agents/debugger_agent.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,24 @@
import os
from tools.tools_coder_pipeline import (
ask_human_tool, TOOL_NOT_EXECUTED_WORD, prepare_list_dir_tool, prepare_see_file_tool,
ask_human_tool, prepare_list_dir_tool, prepare_see_file_tool,
prepare_create_file_tool, prepare_replace_code_tool, prepare_insert_code_tool, prepare_watch_web_page_tool
)
from langchain_openai.chat_models import ChatOpenAI
from typing import TypedDict, Sequence
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langgraph.prebuilt.tool_executor import ToolExecutor
from langgraph.graph import StateGraph
from dotenv import load_dotenv, find_dotenv
from langchain.tools import tool
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatOllama
from langchain_anthropic import ChatAnthropic
from utilities.print_formatters import print_formatted
from utilities.util_functions import check_file_contents, check_application_logs, render_tools
from utilities.util_functions import check_file_contents, check_application_logs, render_tools, exchange_file_contents, bad_tool_call_looped
from utilities.llms import llm_open_router
from utilities.langgraph_common_functions import (
call_model, call_tool, ask_human, after_ask_human_condition, bad_json_format_msg, multiple_jsons_msg, no_json_msg,
agent_looped_human_help,
)
from utilities.user_input import user_input
from agents.frontend_feedback import execute_screenshot_codes

load_dotenv(find_dotenv())
log_file_path = os.getenv("LOG_FILE")
Expand Down Expand Up @@ -60,32 +58,34 @@ class AgentState(TypedDict):


class Debugger():
def __init__(self, files, work_dir, human_feedback, vfeedback_screenshots_msg=None):
def __init__(self, files, work_dir, human_feedback, vfeedback_screenshots_msg=None, playwright_codes=None, screenshot_descriptions=None):
self.work_dir = work_dir
tools = prepare_tools(work_dir)
rendered_tools = render_tools(tools)
self.tool_executor = ToolExecutor(tools)
self.tools = prepare_tools(work_dir)
rendered_tools = render_tools(self.tools)
self.system_message = SystemMessage(
content=system_prompt_template.format(executor_tools=rendered_tools)
)
self.files = files
self.human_feedback = human_feedback
self.visual_feedback = vfeedback_screenshots_msg
self.playwright_codes = playwright_codes
self.screenshot_descriptions = screenshot_descriptions

# workflow definition
debugger_workflow = StateGraph(AgentState)

debugger_workflow.add_node("agent", self.call_model_debugger)
debugger_workflow.add_node("tool", self.call_tool_debugger)
debugger_workflow.add_node("check_log", self.check_log)
debugger_workflow.add_node("frontend_screenshots", self.frontend_screenshots)
debugger_workflow.add_node("human_help", agent_looped_human_help)
debugger_workflow.add_node("human_end_process_confirmation", ask_human)

debugger_workflow.set_entry_point("agent")

# executor_workflow.add_edge("agent", "checker")
debugger_workflow.add_edge("tool", "agent")
debugger_workflow.add_edge("human_help", "agent")
debugger_workflow.add_edge("frontend_screenshots", "agent")
debugger_workflow.add_conditional_edges("agent", self.after_agent_condition)
debugger_workflow.add_conditional_edges("check_log", self.after_check_log_condition)
debugger_workflow.add_conditional_edges("human_end_process_confirmation", after_ask_human_condition)
Expand All @@ -104,64 +104,63 @@ def call_model_debugger(self, state):

def call_tool_debugger(self, state):
last_ai_message = state["messages"][-1]
state = call_tool(state, self.tool_executor)
state = call_tool(state, self.tools)
for tool_call in last_ai_message.json5_tool_calls:
if tool_call["tool"] == "create_file_with_code":
self.files.add(tool_call["tool_input"]["filename"])
self.exchange_file_contents(state)
state = exchange_file_contents(state, self.files, self.work_dir)
return state

def check_log(self, state):
# Add logs
logs = check_application_logs()
log_message = HumanMessage(content="Logs:\n" + logs)

state["messages"].append(log_message)

return state

def frontend_screenshots(self, state):
print_formatted("Making screenshots, please wait a while...", color="light_blue")
# Remove old one
state["messages"] = [msg for msg in state["messages"] if not hasattr(msg, "contains_screenshots")]
# Add new file contents
screenshot_msg = execute_screenshot_codes(self.playwright_codes, self.screenshot_descriptions)
state["messages"].append(screenshot_msg)
return state

# Conditional edge functions
def after_agent_condition(self, state):
last_message = state["messages"][-1]

# safety mechanism for looped wrong tool call
last_human_messages = [m for m in state["messages"] if m.type == "human"][-4:]
tool_not_executed_msgs = [
m for m in last_human_messages if isinstance(m.content, str) and m.content.startswith(TOOL_NOT_EXECUTED_WORD)
]
if len(tool_not_executed_msgs) == 4:
print("Seems like AI been looped. Please suggest it how to introduce change correctly:")
if bad_tool_call_looped(state):
return "human_help"

elif last_message.content in (bad_json_format_msg, multiple_jsons_msg, no_json_msg):
return "agent"
elif last_message.json5_tool_calls[0]["tool"] == "final_response_debugger":
return "check_log" if log_file_path else "human_end_process_confirmation"
if log_file_path:
return "check_log"
elif self.screenshot_descriptions:
return "frontend_screenshots"
else:
return "human_end_process_confirmation"
else:
return "tool"

def after_check_log_condition(self, state):
last_message = state["messages"][-1]

if last_message.content.endswith("Logs are correct"):
return "human_end_process_confirmation"
if self.screenshot_descriptions:
return "frontend_screenshots"
else:
return "human_end_process_confirmation"
else:
return "agent"

# just functions
def exchange_file_contents(self, state):
# Remove old one
state["messages"] = [msg for msg in state["messages"] if not hasattr(msg, "contains_file_contents")]
# Add new file contents
file_contents = check_file_contents(self.files, self.work_dir)
file_contents = f"Find most actual file contents here:\n\n{file_contents}\nTake a look at line numbers before introducing changes."
file_contents_msg = HumanMessage(content=file_contents, contains_file_contents=True)
state["messages"].insert(2, file_contents_msg) # insert after the system and plan msgs
return state

def do_task(self, task, plan, text_files):
def do_task(self, task, plan):
print_formatted("Debugger starting its work", color="green")
print_formatted("🛠️ Need to improve your code? I can help!", color="light_blue")
file_contents = check_file_contents(text_files, self.work_dir)
file_contents = check_file_contents(self.files, self.work_dir)
inputs = {"messages": [
self.system_message,
HumanMessage(content=f"Task: {task}\n\n######\n\nPlan which developer implemented already:\n\n{plan}"),
Expand All @@ -180,8 +179,5 @@ def prepare_tools(work_dir):
insert_code = prepare_insert_code_tool(work_dir)
create_file = prepare_create_file_tool(work_dir)
tools = [list_dir, see_file, replace_code, insert_code, create_file, ask_human_tool, final_response_debugger]
#if frontend_port:
# watch_web_page_tool = prepare_watch_web_page_tool(frontend_port)
# tools.append(watch_web_page_tool)

return tools
39 changes: 9 additions & 30 deletions agents/executor_agent.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,19 @@
import os
from tools.tools_coder_pipeline import (
ask_human_tool, TOOL_NOT_EXECUTED_WORD, prepare_create_file_tool, prepare_replace_code_tool,
prepare_insert_code_tool
ask_human_tool, prepare_create_file_tool, prepare_replace_code_tool, prepare_insert_code_tool
)
from langchain_openai.chat_models import ChatOpenAI
from typing import TypedDict, Sequence
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langgraph.prebuilt.tool_executor import ToolExecutor, ToolInvocation
from langgraph.graph import StateGraph, END
from dotenv import load_dotenv, find_dotenv
from langchain.tools import tool
from langchain_community.chat_models import ChatOllama
from langchain_anthropic import ChatAnthropic
from langchain_mistralai import ChatMistralAI
from utilities.llms import llm_open_router
from utilities.print_formatters import print_formatted
from utilities.print_formatters import print_formatted, print_error
from utilities.util_functions import (
check_file_contents, check_application_logs, render_tools, find_tools_json
check_file_contents, render_tools, find_tools_json, exchange_file_contents, bad_tool_call_looped
)
from utilities.langgraph_common_functions import (
call_model, call_tool, bad_json_format_msg, multiple_jsons_msg, no_json_msg, agent_looped_human_help
Expand Down Expand Up @@ -65,9 +62,8 @@ class AgentState(TypedDict):
class Executor():
def __init__(self, files, work_dir):
self.work_dir = work_dir
tools = prepare_tools(work_dir)
rendered_tools = render_tools(tools)
self.tool_executor = ToolExecutor(tools)
self.tools = prepare_tools(work_dir)
rendered_tools = render_tools(self.tools)
self.system_message = SystemMessage(
content=system_prompt_template.format(executor_tools=rendered_tools)
)
Expand Down Expand Up @@ -96,31 +92,24 @@ def call_model_executor(self, state):
if last_message.type == "ai" and len(last_message.json5_tool_calls) > 1:
state["messages"].append(
HumanMessage(content=multiple_jsons_msg))
print_formatted("\nToo many jsons provided, asked to provide one.", color="yellow")
print_error("\nToo many jsons provided, asked to provide one.")
return state

def call_tool_executor(self, state):
last_ai_message = state["messages"][-1]
state = call_tool(state, self.tool_executor)
state = call_tool(state, self.tools)
for tool_call in last_ai_message.json5_tool_calls:
if tool_call["tool"] == "create_file_with_code":
self.files.add(tool_call["tool_input"]["filename"])
self.exchange_file_contents(state)
state = exchange_file_contents(state, self.files, self.work_dir)
return state

# Conditional edge functions
def after_agent_condition(self, state):
last_message = state["messages"][-1]

# safety mechanism for looped wrong tool call
last_human_messages = [m for m in state["messages"] if m.type == "human"][-4:]
tool_not_executed_msgs = [
m for m in last_human_messages if isinstance(m.content, str) and m.content.startswith(TOOL_NOT_EXECUTED_WORD)
]
if len(tool_not_executed_msgs) == 4:
print("Seems like AI been looped. Please suggest it how to introduce change correctly:")
if bad_tool_call_looped(state):
return "human_help"

elif last_message.content in (bad_json_format_msg, multiple_jsons_msg, no_json_msg):
return "agent"
elif last_message.json5_tool_calls[0]["tool"] == "final_response_executor":
Expand All @@ -129,16 +118,6 @@ def after_agent_condition(self, state):
return "tool"

# just functions
def exchange_file_contents(self, state):
# Remove old one
state["messages"] = [msg for msg in state["messages"] if not hasattr(msg, "contains_file_contents")]
# Add new file contents
file_contents = check_file_contents(self.files, self.work_dir)
file_contents = f"Find most actual file contents here:\n\n{file_contents}\nTake a look at line numbers before introducing changes."
file_contents_msg = HumanMessage(content=file_contents, contains_file_contents=True)
state["messages"].insert(2, file_contents_msg) # insert after the system and plan msgs
return state

def do_task(self, task, plan):
print_formatted("Executor starting its work", color="green")
print_formatted("✅ I follow the plan and will implement necessary changes!", color="light_blue")
Expand Down
72 changes: 34 additions & 38 deletions agents/file_answerer.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,19 @@
from langchain_openai.chat_models import ChatOpenAI
from langchain_anthropic import ChatAnthropic
from langchain_mistralai.chat_models import ChatMistralAI
from langchain_community.chat_models import ChatOllama
from langchain_community.llms import Replicate
from langchain_ollama import ChatOllama
from typing import TypedDict, Sequence
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage
from langgraph.prebuilt.tool_executor import ToolExecutor
from langgraph.graph import StateGraph, END
from dotenv import load_dotenv, find_dotenv
from langchain.tools import tool
from tools.tools_coder_pipeline import (
prepare_see_file_tool, prepare_list_dir_tool, retrieve_files_by_semantic_query
)
from tools.rag.retrieval import vdb_available
from utilities.util_functions import find_tools_json, list_directory_tree, render_tools
from utilities.util_functions import find_tools_json, list_directory_tree
from utilities.langgraph_common_functions import (
call_model, call_tool, bad_json_format_msg, multiple_jsons_msg, no_json_msg
call_model_native_tools, call_tool_native, bad_json_format_msg, no_json_msg
)
from utilities.print_formatters import print_formatted
from utilities.llms import llm_open_router
import os

Expand All @@ -39,19 +35,17 @@ def final_response_file_answerer(answer, additional_materials):
"""
pass

#llm = ChatOllama(model="gemma2:9b-instruct-fp16")
#llm = ChatMistralAI(api_key=mistral_api_key, model="mistral-large-latest")
#llm = Replicate(model="meta/meta-llama-3.1-405b-instruct")
llms = []
if anthropic_api_key:
llms.append(ChatAnthropic(model='claude-3-5-sonnet-20241022', temperature=0.2, timeout=120).with_config({"run_name": "File Answerer"}))
if os.getenv("OPENROUTER_API_KEY"):
llms.append(llm_open_router("anthropic/claude-3.5-sonnet").with_config({"run_name": "File Answerer"}))
if openai_api_key:
llms.append(ChatOpenAI(model="gpt-4o", temperature=0.2, timeout=120).with_config({"run_name": "File Answerer"}))
if os.getenv("OLLAMA_MODEL"):
llms.append(ChatOllama(model=os.getenv("OLLAMA_MODEL")).with_config({"run_name": "File Answerer"}))

def init_llms(tools):
llms = []
if anthropic_api_key:
llms.append(ChatAnthropic(model='claude-3-5-haiku-20241022', temperature=0.2, timeout=120).bind_tools(tools).with_config({"run_name": "File Answerer"}))
if os.getenv("OPENROUTER_API_KEY"):
llms.append(llm_open_router("anthropic/claude-3.5-haiku").bind_tools(tools).with_config({"run_name": "File Answerer"}))
if openai_api_key:
llms.append(ChatOpenAI(model="gpt-4o-mini", temperature=0.2, timeout=120).bind_tools(tools).with_config({"run_name": "File Answerer"}))
if os.getenv("OLLAMA_MODEL"):
llms.append(ChatOllama(model=os.getenv("OLLAMA_MODEL")).bind_tools(tools).with_config({"run_name": "File Answerer"}))
return llms

class AgentState(TypedDict):
messages: Sequence[BaseMessage]
Expand All @@ -61,20 +55,13 @@ class AgentState(TypedDict):
with open(f"{parent_dir}/prompts/researcher_file_answerer.prompt", "r") as f:
system_prompt_template = f.read()


# node functions
def call_model_researcher(state):
state = call_model(state, llms, printing=False)
return state


# Logic for conditional edges
def after_agent_condition(state):
last_message = state["messages"][-1]

if last_message.content in (bad_json_format_msg, multiple_jsons_msg, no_json_msg):
if last_message.content == no_json_msg:
return "agent"
elif last_message.json5_tool_calls[0]["tool"] == "final_response_file_answerer":
elif last_message.tool_calls[0]["name"] == "final_response_file_answerer":
return END
else:
return "tool"
Expand All @@ -84,16 +71,15 @@ class ResearchFileAnswerer():
def __init__(self, work_dir):
see_file = prepare_see_file_tool(work_dir)
list_dir = prepare_list_dir_tool(work_dir)
tools = [see_file, list_dir, final_response_file_answerer]
self.tools = [see_file, list_dir, final_response_file_answerer]
if vdb_available():
tools.append(retrieve_files_by_semantic_query)
self.rendered_tools = render_tools(tools)
self.tool_executor = ToolExecutor(tools)
self.tools.append(retrieve_files_by_semantic_query)
self.llms = init_llms(self.tools)

# workflow definition
researcher_workflow = StateGraph(AgentState)

researcher_workflow.add_node("agent", call_model_researcher)
researcher_workflow.add_node("agent", self.call_model_researcher)
researcher_workflow.add_node("tool", self.call_tool_researcher)

researcher_workflow.set_entry_point("agent")
Expand All @@ -105,16 +91,26 @@ def __init__(self, work_dir):

# node functions
def call_tool_researcher(self, state):
return call_tool(state, self.tool_executor)
return call_tool_native(state, self.tools)

def call_model_researcher(self, state):
state = call_model_native_tools(state, self.llms, printing=False)
last_message = state["messages"][-1]
if len(last_message.tool_calls) > 1:
# Filter out the tool call with "final_response_researcher"
state["messages"][-1].tool_calls = [
tool_call for tool_call in last_message.tool_calls
if tool_call["name"] != "final_response_file_answerer"
]
return state

# just functions
def research_and_answer(self, questions):
system_message = system_prompt_template.format(questions=questions, tools=self.rendered_tools)
system_message = system_prompt_template.format(questions=questions)
inputs = {
"messages": [SystemMessage(content=system_message), HumanMessage(content=list_directory_tree(work_dir))]}
researcher_response = self.researcher.invoke(inputs, {"recursion_limit": 100})["messages"][-1]
tool_json = find_tools_json(researcher_response.content)[0]
answer = tool_json["tool_input"]
answer = researcher_response.tool_calls[0]["args"]

return answer

Expand Down
Loading

0 comments on commit b2f47e2

Please sign in to comment.