Skip to content

Commit

Permalink
Merge pull request #11 from GregorD1A1/dev
Browse files Browse the repository at this point in the history
Release 0.1.1
  • Loading branch information
Grigorij-Dudnik authored Oct 11, 2024
2 parents b938748 + 05951df commit 14c3e45
Show file tree
Hide file tree
Showing 26 changed files with 487 additions and 314 deletions.
34 changes: 19 additions & 15 deletions agents/executor_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from langchain.prompts import PromptTemplate
from langchain_community.chat_models import ChatOllama
from langchain_anthropic import ChatAnthropic
from utilities.util_functions import check_file_contents, print_formatted, check_application_logs, find_tool_json
from utilities.util_functions import check_file_contents, print_formatted, check_application_logs
from utilities.langgraph_common_functions import (call_model, call_tool, ask_human, after_ask_human_condition,
bad_json_format_msg, multiple_jsons_msg, no_json_msg)
from utilities.user_input import user_input
Expand All @@ -33,19 +33,20 @@ def final_response(test_instruction):
implemented changes work correctly."""
print_formatted(test_instruction, color="blue")

stop_sequence = "\n```\n"

#llm = ChatOpenAI(model="gpt-4o", temperature=0).with_config({"run_name": "Executor"})
llm = ChatAnthropic(model='claude-3-5-sonnet-20240620', temperature=0.2, max_tokens=2000, stop=[stop_sequence]).with_config({"run_name": "Executor"})
#llm = ChatTogether(model="meta-llama/Llama-3-70b-chat-hf", temperature=0).with_config({"run_name": "Executor"})
#llm = ChatOllama(model="mixtral"), temperature=0).with_config({"run_name": "Executor"})
llms = []
if os.getenv("ANTHROPIC_API_KEY"):
llms.append(ChatAnthropic(model='claude-3-5-sonnet-20240620', temperature=0.2, max_tokens=2000, timeout=120).with_config({"run_name": "Executor"}))
if os.getenv("OPENAI_API_KEY"):
llms.append(ChatOpenAI(model="gpt-4o", temperature=0.2, timeout=120).with_config({"run_name": "Executor"}))


class AgentState(TypedDict):
messages: Sequence[BaseMessage]

current_dir = os.path.dirname(os.path.realpath(__file__))
with open(f"{current_dir}/prompts/executor_system.prompt", "r") as f:
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
with open(f"{parent_dir}/prompts/executor_system.prompt", "r") as f:
system_prompt_template = f.read()


Expand Down Expand Up @@ -82,18 +83,21 @@ def __init__(self, files, work_dir):

# node functions
def call_model_executor(self, state):
#stop_sequence = None
state = call_model(state, llm, stop_sequence_to_add=stop_sequence)

state = call_model(state, llms)
last_message = state["messages"][-1]
if last_message.type == "ai" and len(last_message.json5_tool_calls) > 1:
state["messages"].append(
HumanMessage(content=multiple_jsons_msg))
print("\nToo many jsons provided, asked to provide one.")
return state

def call_tool_executor(self, state):
last_ai_message = state["messages"][-1]
state = call_tool(state, self.tool_executor)
if last_ai_message.tool_call["tool"] == "create_file_with_code":
self.files.add(last_ai_message.tool_call["tool_input"]["filename"])
if last_ai_message.tool_call["tool"] in ["insert_code", "replace_code", "create_file_with_code"]:
state = self.exchange_file_contents(state)
for tool_call in last_ai_message.json5_tool_calls:
if tool_call["tool"] == "create_file_with_code":
self.files.add(tool_call["tool_input"]["filename"])
self.exchange_file_contents(state)
return state

def check_log(self, state):
Expand Down Expand Up @@ -124,7 +128,7 @@ def after_agent_condition(self, state):

elif last_message.content in (bad_json_format_msg, multiple_jsons_msg, no_json_msg):
return "agent"
elif last_message.tool_call["tool"] == "final_response":
elif last_message.json5_tool_calls[0]["tool"] == "final_response":
return "check_log" if log_file_path else "human_end_process_confirmation"
else:
return "tool"
Expand Down
24 changes: 15 additions & 9 deletions agents/planner_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,32 @@
from langgraph.graph import END, StateGraph
from dotenv import load_dotenv, find_dotenv
from utilities.util_functions import print_formatted, check_file_contents, convert_images, get_joke
from utilities.langgraph_common_functions import call_model, ask_human, after_ask_human_condition
from utilities.langgraph_common_functions import ask_human, after_ask_human_condition
import os
from langchain_community.chat_models import ChatOllama
from langchain_anthropic import ChatAnthropic


load_dotenv(find_dotenv())

llm = ChatOpenAI(model="gpt-4o", temperature=0.3).with_config({"run_name": "Planer"})
llm_voter = llm.with_config({"run_name": "Voter"})
llms_planners = []
if os.getenv("OPENAI_API_KEY"):
llms_planners.append(ChatOpenAI(model="gpt-4o", temperature=0.3, timeout=120).with_config({"run_name": "Planer"}))
if os.getenv("ANTHROPIC_API_KEY"):
llms_planners.append(ChatAnthropic(model='claude-3-5-sonnet-20240620', temperature=0.3, timeout=120).with_config({"run_name": "Planer"}))

llm_planner = llms_planners[0].with_fallbacks(llms_planners[1:])
# copy planers, but exchange config name
llm_voter = llm_planner.with_config({"run_name": "Voter"})

class AgentState(TypedDict):
messages: Sequence[BaseMessage]
voter_messages: Sequence[BaseMessage]

current_dir = os.path.dirname(os.path.realpath(__file__))
with open(f"{current_dir}/prompts/planer_system.prompt", "r") as f:
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
with open(f"{parent_dir}/prompts/planer_system.prompt", "r") as f:
planer_system_prompt_template = f.read()
with open(f"{current_dir}/prompts/voter_system.prompt", "r") as f:
with open(f"{parent_dir}/prompts/voter_system.prompt", "r") as f:
voter_system_prompt_template = f.read()

planer_system_message = SystemMessage(content=planer_system_prompt_template)
Expand All @@ -37,7 +43,7 @@ def call_planers(state):
nr_plans = 3
print(f"\nGenerating plan propositions. While I'm thinking...\n")
print_formatted(get_joke(), color="red")
plan_propositions_messages = llm.batch([messages for _ in range(nr_plans)])
plan_propositions_messages = llm_planner.batch([messages for _ in range(nr_plans)])
for i, proposition in enumerate(plan_propositions_messages):
state["voter_messages"].append(AIMessage(content="_"))
state["voter_messages"].append(HumanMessage(content=f"Proposition nr {i+1}:\n\n" + proposition.content))
Expand All @@ -56,7 +62,7 @@ def call_planers(state):

def call_model_corrector(state):
messages = state["messages"]
response = llm.invoke(messages)
response = llm_planner.invoke(messages)
print_formatted(response.content)
state["messages"].append(response)

Expand Down Expand Up @@ -99,4 +105,4 @@ def planning(task, text_files, image_paths, work_dir):
if __name__ == "__main__":
task = "Test task"
work_dir = os.getenv("WORK_DIR")
planning(task, work_dir=work_dir)
planning(task, work_dir=work_dir)
31 changes: 16 additions & 15 deletions agents/researcher_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
prepare_list_dir_tool, prepare_see_file_tool, retrieve_files_by_semantic_query
)
from rag.retrieval import vdb_available
from utilities.util_functions import find_tool_json, print_formatted
from utilities.util_functions import find_tools_json, print_formatted
from utilities.langgraph_common_functions import (
call_model, call_tool, ask_human, after_ask_human_condition, bad_json_format_msg, multiple_jsons_msg, no_json_msg
)
Expand All @@ -23,15 +23,17 @@

load_dotenv(find_dotenv())
mistral_api_key = os.getenv("MISTRAL_API_KEY")
anthropic_api_key = os.getenv("ANTHROPIC_API_KEY")
openai_api_key = os.getenv("OPENAI_API_KEY")
work_dir = os.getenv("WORK_DIR")


@tool
def final_response(files_to_work_on, reference_files, template_images):
"""That tool outputs list of files executor will need to change and paths to graphical patterns if some.
Use that tool only when you 100% sure you found all the files Executor will need to modify.
"""That tool outputs list of files programmer will need to change and paths to graphical patterns if some.
Use that tool only when you 100% sure you found all the files programmer will need to modify.
If not, do additional research. Include only the files you convinced will be useful.
Provide only existing files, do not provide that you'll be implementing.
Provide only existing files, do not provide files to be implemented.
tool input:
:param files_to_work_on: ["List", "of", "existing files", "to potentially introduce", "changes"],
Expand All @@ -40,28 +42,27 @@ def final_response(files_to_work_on, reference_files, template_images):
"""
pass

#stop_sequence = "\n```\n"
stop_sequence = None

#llm = ChatOpenAI(model="gpt-4o", temperature=0.2)
llm = ChatAnthropic(model='claude-3-5-sonnet-20240620', temperature=0.2).with_config({"run_name": "Researcher"})
#llm = ChatOllama(model="gemma2:9b-instruct-fp16")
#llm = ChatMistralAI(api_key=mistral_api_key, model="mistral-large-latest")
#llm = Replicate(model="meta/meta-llama-3.1-405b-instruct")

llms = []
if anthropic_api_key:
llms.append(ChatAnthropic(model='claude-3-5-sonnet-20240620', temperature=0.2, timeout=120).with_config({"run_name": "Researcher"}))
if openai_api_key:
llms.append(ChatOpenAI(model="gpt-4o", temperature=0.2, timeout=120).with_config({"run_name": "Researcher"}))

class AgentState(TypedDict):
messages: Sequence[BaseMessage]


current_dir = os.path.dirname(os.path.realpath(__file__))
with open(f"{current_dir}/prompts/researcher_system.prompt", "r") as f:
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
with open(f"{parent_dir}/prompts/researcher_system.prompt", "r") as f:
system_prompt_template = f.read()


# node functions
def call_model_researcher(state):
state = call_model(state, llm, stop_sequence_to_add=stop_sequence)
state = call_model(state, llms)
return state


Expand All @@ -71,7 +72,7 @@ def after_agent_condition(state):

if last_message.content in (bad_json_format_msg, multiple_jsons_msg, no_json_msg):
return "agent"
elif last_message.tool_call["tool"] == "final_response":
elif last_message.json5_tool_calls[0]["tool"] == "final_response":
return "human"
else:
return "tool"
Expand Down Expand Up @@ -113,7 +114,7 @@ def research_task(self, task):
inputs = {"messages": [SystemMessage(content=system_message), HumanMessage(content=f"Go")]}
researcher_response = self.researcher.invoke(inputs, {"recursion_limit": 100})["messages"][-2]

tool_json = find_tool_json(researcher_response.content)
tool_json = find_tools_json(researcher_response.content)[0]
text_files = set(tool_json["tool_input"]["files_to_work_on"] + tool_json["tool_input"]["reference_files"])
image_paths = tool_json["tool_input"]["template_images"]

Expand Down
3 changes: 2 additions & 1 deletion clean_coder_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
from agents.executor_agent import Executor
import os
from utilities.user_input import user_input

import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)


def run_clean_coder_pipeline(task, work_dir):
Expand Down
40 changes: 40 additions & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
services:
project_manager:
build:
context: .
dockerfile: docker/Dockerfile
container_name: project_manager
environment:
- WORK_DIR=/work_dir
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- TODOIST_API_KEY=${TODOIST_API_KEY}
- TODOIST_PROJECT_ID=${TODOIST_PROJECT_ID}
- COHERE_API_KEY=${COHERE_API_KEY}
- LOG_FILE=${LOG_FILE:-}
volumes:
- .:/Clean_Coder
- ${WORK_DIR}:/work_dir
# Uncomment (linux) or adjust (other systems) the following line to use microphone
# devices:
# - "/dev/snd:/dev/snd"
command: python project_manager.py

clean_coder_pipeline:
build:
context: .
dockerfile: docker/Dockerfile
container_name: clean_coder_pipeline
environment:
- WORK_DIR=/work_dir
- OPENAI_API_KEY=${OPENAI_API_KEY}
- ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY}
- COHERE_API_KEY=${COHERE_API_KEY}
- LOG_FILE=${LOG_FILE:-}
volumes:
- .:/Clean_Coder
- ${WORK_DIR}:/work_dir
# Uncomment (linux) or adjust (other systems) the following line to use microphone
# devices:
# - "/dev/snd:/dev/snd"
command: python clean_coder_pipeline.py
24 changes: 24 additions & 0 deletions docker/.dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
.env
.idea/
.cache/
__pycache__/
env/
venv/
*.pyc
*.pyo
*.pyd
.Python
pip-log.txt
pip-delete-this-directory.txt
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.log
.git
.mypy_cache
.pytest_cache
.hypothesis
30 changes: 30 additions & 0 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
FROM python:3.10-slim

# Set working directory
WORKDIR /Clean_Coder

# Install system dependencies
RUN apt-get update && apt-get install -y \
build-essential \
linux-headers-generic \
libportaudio2 \
&& rm -rf /var/lib/apt/lists/*

# Copy requirements file
COPY requirements.txt .

# Install dependencies
RUN pip install --no-cache-dir -r requirements.txt

# Copy the rest of the application code
COPY . .

# Create workdir
RUN mkdir /work_dir

# Set environment variables
ENV PYTHONUNBUFFERED=1
ENV WORK_DIR=/work_dir

# Default command
CMD ["python", "project_manager.py"]
Loading

0 comments on commit 14c3e45

Please sign in to comment.