Skip to content

Commit

Permalink
Merge pull request #20 from GregorD1A1/dev
Browse files Browse the repository at this point in the history
Release v0.2.1
  • Loading branch information
Grigorij-Dudnik authored Nov 8, 2024
2 parents d86f1e6 + 5b1972a commit bc7b803
Show file tree
Hide file tree
Showing 13 changed files with 121 additions and 145 deletions.
5 changes: 4 additions & 1 deletion .env.template
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,7 @@ TODOIST_PROJECT_ID=
## For automatic error check
LOG_FILE=
## For frontend visual feedback
FRONTEND_PORT=
FRONTEND_PORT=
## Additional model providers
OPENROUTER_API_KEY=
OLLAMA_MODEL=
8 changes: 6 additions & 2 deletions agents/debugger_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from langchain_anthropic import ChatAnthropic
from utilities.print_formatters import print_formatted
from utilities.util_functions import check_file_contents, check_application_logs, render_tools
from utilities.llms import llm_open_router
from utilities.langgraph_common_functions import (
call_model, call_tool, ask_human, after_ask_human_condition, bad_json_format_msg, multiple_jsons_msg, no_json_msg,
agent_looped_human_help,
Expand Down Expand Up @@ -44,9 +45,12 @@ def final_response(test_instruction):
model='claude-3-5-sonnet-20241022', temperature=0, max_tokens=2000, timeout=120
).with_config({"run_name": "Debugger"})
)
if os.getenv("OPENROUTER_API_KEY"):
llms.append(llm_open_router("anthropic/claude-3.5-sonnet").with_config({"run_name": "Debugger"}))
if os.getenv("OPENAI_API_KEY"):
llms.append(ChatOpenAI(model="gpt-4o", temperature=0, timeout=120).with_config({"run_name": "Debugger"}))

if os.getenv("OLLAMA_MODEL"):
llms.append(ChatOllama(model=os.getenv("OLLAMA_MODEL")).with_config({"run_name": "Debugger"}))

class AgentState(TypedDict):
messages: Sequence[BaseMessage]
Expand Down Expand Up @@ -156,7 +160,7 @@ def exchange_file_contents(self, state):
return state

def do_task(self, task, plan, text_files):
print("\n\n\nExecutor starting its work")
print_formatted("Debugger starting its work", color="blue")
file_contents = check_file_contents(text_files, self.work_dir)
inputs = {"messages": [
self.system_message,
Expand Down
7 changes: 6 additions & 1 deletion agents/executor_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
from langchain_community.chat_models import ChatOllama
from langchain_anthropic import ChatAnthropic
from langchain_mistralai import ChatMistralAI
from utilities.llms import llm_open_router
from utilities.print_formatters import print_formatted
from utilities.util_functions import (
check_file_contents, check_application_logs, render_tools, find_tools_json
Expand Down Expand Up @@ -43,8 +44,12 @@ def finish(test_instruction):
llms.append(ChatAnthropic(
model='claude-3-5-sonnet-20240620', temperature=0, max_tokens=2000, timeout=120
).with_config({"run_name": "Executor"}))
if os.getenv("OPENROUTER_API_KEY"):
llms.append(llm_open_router("anthropic/claude-3.5-sonnet").with_config({"run_name": "Executor"}))
if os.getenv("OPENAI_API_KEY"):
llms.append(ChatOpenAI(model="gpt-4o-mini", temperature=0, timeout=120).with_config({"run_name": "Executor"}))
if os.getenv("OLLAMA_MODEL"):
llms.append(ChatOllama(model=os.getenv("OLLAMA_MODEL")).with_config({"run_name": "Executor"}))


class AgentState(TypedDict):
Expand Down Expand Up @@ -134,7 +139,7 @@ def exchange_file_contents(self, state):
return state

def do_task(self, task, plan):
print("\n\n\nExecutor starting its work")
print_formatted("\nExecutor starting its work", color="blue")
file_contents = check_file_contents(self.files, self.work_dir)
inputs = {"messages": [
self.system_message,
Expand Down
7 changes: 6 additions & 1 deletion agents/planner_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,20 @@
import os
from langchain_community.chat_models import ChatOllama
from langchain_anthropic import ChatAnthropic
from utilities.llms import llm_open_router


load_dotenv(find_dotenv())

llms_planners = []
if os.getenv("OPENAI_API_KEY"):
llms_planners.append(ChatOpenAI(model="gpt-4o", temperature=0.3, timeout=120).with_config({"run_name": "Planer"}))
if os.getenv("OPENROUTER_API_KEY"):
llms_planners.append(llm_open_router("openai/gpt-4o").with_config({"run_name": "Planer"}))
if os.getenv("ANTHROPIC_API_KEY"):
llms_planners.append(ChatAnthropic(model='claude-3-5-sonnet-20240620', temperature=0.3, timeout=120).with_config({"run_name": "Planer"}))
if os.getenv("OLLAMA_MODEL"):
llms_planners.append(ChatOllama(model=os.getenv("OLLAMA_MODEL")).with_config({"run_name": "Planer"}))

llm_planner = llms_planners[0].with_fallbacks(llms_planners[1:])
# copy planers, but exchange config name
Expand Down Expand Up @@ -95,7 +100,7 @@ def call_model_corrector(state):


def planning(task, text_files, image_paths, work_dir):
print("\n\nPlanner starting its work")
print_formatted("\nPlanner starting its work", color="blue")
file_contents = check_file_contents(text_files, work_dir, line_numbers=False)
images = convert_images(image_paths)
message_content_without_imgs = f"Task: {task},\n\nFiles:\n{file_contents}"
Expand Down
13 changes: 10 additions & 3 deletions agents/researcher_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
from utilities.langgraph_common_functions import (
call_model, call_tool, ask_human, after_ask_human_condition, bad_json_format_msg, multiple_jsons_msg, no_json_msg
)
from utilities.print_formatters import print_formatted
from utilities.llms import llm_open_router
import os


Expand Down Expand Up @@ -45,12 +47,15 @@ def final_response_researcher(files_to_work_on, reference_files, template_images
#llm = ChatMistralAI(api_key=mistral_api_key, model="mistral-large-latest")
#llm = Replicate(model="meta/meta-llama-3.1-405b-instruct")
llms = []
#if os.getenv("MISTRAL_API_KEY"):
# llms.append(ChatMistralAI(model="ministral-8b-latest").with_config({"run_name": "Researcher"}))
if anthropic_api_key:
llms.append(ChatAnthropic(model='claude-3-5-sonnet-20240620', temperature=0.2, timeout=120).with_config({"run_name": "Researcher"}))
if os.getenv("OPENROUTER_API_KEY"):
llms.append(llm_open_router("anthropic/claude-3.5-sonnet").with_config({"run_name": "Researcher"}))
if openai_api_key:
llms.append(ChatOpenAI(model="gpt-4o", temperature=0.2, timeout=120).with_config({"run_name": "Researcher"}))
if os.getenv("OLLAMA_MODEL"):
llms.append(ChatOllama(model=os.getenv("OLLAMA_MODEL")).with_config({"run_name": "Researcher"}))


class AgentState(TypedDict):
messages: Sequence[BaseMessage]
Expand Down Expand Up @@ -110,7 +115,9 @@ def call_tool_researcher(self, state):

# just functions
def research_task(self, task):
print("Researcher starting its work")
print_formatted("Researcher starting its work", color="green")
print_formatted("👋 Hey! I'm looking for a files on which we will work on together!", color="light_blue")

system_message = system_prompt_template.format(task=task, tools=self.rendered_tools)
inputs = {
"messages": [SystemMessage(content=system_message), HumanMessage(content=list_directory_tree(work_dir))]}
Expand Down
6 changes: 5 additions & 1 deletion manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
no_json_msg)
from utilities.util_functions import render_tools
from utilities.start_project_functions import create_project_description_file
from utilities.llms import llm_open_router
import os
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
Expand All @@ -44,9 +45,12 @@
llms = []
if os.getenv("OPENAI_API_KEY"):
llms.append(ChatOpenAI(model="gpt-4o", temperature=0.4, timeout=120).with_config({"run_name": "Manager"}))
if os.getenv("OPENROUTER_API_KEY"):
llms.append(llm_open_router("openai/gpt-4o").with_config({"run_name": "Researcher"}))
if os.getenv("ANTHROPIC_API_KEY"):
llms.append(ChatAnthropic(model='claude-3-5-sonnet-20241022', temperature=0.4, timeout=120).with_config({"run_name": "Manager"}))

if os.getenv("OLLAMA_MODEL"):
llms.append(ChatOllama(model=os.getenv("OLLAMA_MODEL")).with_config({"run_name": "Manager"}))

class AgentState(TypedDict):
messages: Sequence[BaseMessage]
Expand Down
2 changes: 0 additions & 2 deletions tools/tools_coder_pipeline.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from langchain.tools import tool
import os
from playwright.sync_api import sync_playwright
from openai import OpenAI
from dotenv import load_dotenv, find_dotenv
from utilities.syntax_checker_functions import check_syntax
from utilities.start_project_functions import file_folder_ignored, forbidden_files_and_folders
Expand All @@ -12,7 +11,6 @@


load_dotenv(find_dotenv())
OAIclient = OpenAI()


TOOL_NOT_EXECUTED_WORD = "Tool not been executed. "
Expand Down
16 changes: 16 additions & 0 deletions utilities/llms.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from langchain_openai.chat_models import ChatOpenAI as ChatOpenRouter
from os import getenv
from dotenv import load_dotenv

load_dotenv()

def llm_open_router(model):
return ChatOpenRouter(
openai_api_key=getenv("OPENROUTER_API_KEY"),
openai_api_base="https://openrouter.ai/api/v1",
model_name=model,
default_headers={
"HTTP-Referer": "https://github.com/GregorD1A1/Clean-Coder-AI",
"X-Title": "Clean Coder",
},
)
124 changes: 35 additions & 89 deletions utilities/print_formatters.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,12 @@
import json5
import textwrap
from termcolor import colored
from json import JSONDecodeError
from rich.panel import Panel
from rich.syntax import Syntax
from rich.console import Console
from rich.padding import Padding
from pygments.util import ClassNotFound
from pygments.lexers import get_lexer_by_name
from pygments.lexers import get_lexer_by_name, get_lexer_for_filename


def split_text_and_code(text):
Expand Down Expand Up @@ -48,34 +47,13 @@ def print_formatted_content(content):
json_data = parse_tool_json(code_content)
if not json_data:
print_formatted("Badly parsed tool json:")
print_formatted_code(code=code_content, language="json5")
print_formatted_code(code=code_content, extension="json5")
return
tool = json_data.get('tool')
tool_input = json_data.get('tool_input', {})
print_tool_message(tool_name=tool, tool_input=tool_input)
else: # code snippet
print_formatted_code(code=code_content, language=language)


def get_message_by_tool_name(tool_name):
tool_messages = {
"add_task": "It's time to add a new task:",
"modify_task": "Let's modify the task:",
"reorder_tasks": "Let's reorder tasks...",
"create_epic": "Let's create an epic...",
"modify_epic": "Let's modify the epic:",
"finish_project_planning": "Project planning is finished",
"list_dir": "Let's list files in a directory:",
"see_file": "Looking at the file content...",
"retrieve_files_by_semantic_query": "Let's find files by semantic query...",
"insert_code": "Let's add some code...",
"replace_code": "Some code needs to be updated...",
"create_file_with_code": "Let's create a new file...",
"ask_human_tool": "Ask human for input or actions.",
"watch_web_page": "Visiting a web page...",
"finish": "Hurray! The work is DONE!"
}
return f'\n{tool_messages.get(tool_name, "")}'
print_formatted_code(code=code_content, extension=language)


def print_formatted(content, width=None, color=None, on_color=None, bold=False, end='\n'):
Expand All @@ -98,67 +76,35 @@ def safe_int(value):
return None


def print_formatted_code(code, language, start_line=1, title=''):
def print_formatted_code(code, extension, start_line=1, title=None):
console = Console()

start_line = safe_int(start_line)

try:
lexer = get_lexer_by_name(language or 'text')
lexer = get_lexer_for_filename(extension)
except ClassNotFound:
lexer = get_lexer_by_name('text')

try:
if code:
syntax = Syntax(
code,
lexer,
line_numbers=True,
start_line=start_line,
theme="monokai",
word_wrap=True,
padding=(1, 1),
)

snippet_title = title or f"{language.capitalize() if isinstance(language, str) else 'Code'} Snippet"

if len(snippet_title) > 100:
snippet_title = 'Code Snippet'

styled_code = Panel(
syntax,
border_style="bold yellow",
title=snippet_title,
expand=False
)

console.print(Padding(styled_code, 1))
else:
console.print("[bold red]Error: No code to display[/bold red]")
except Exception as e:
if code:
syntax = Syntax(
code,
lexer,
line_numbers=True,
start_line=start_line,
theme="monokai",
word_wrap=True,
padding=(1, 1),
)

snippet_title = title or f"{language.capitalize() if isinstance(language, str) else 'Code'} Snippet"

styled_code = Panel(
syntax,
border_style="bold yellow",
title=snippet_title,
expand=False
)

console.print(Padding(styled_code, 1))
else:
console.print("[bold red]Error: Code is None[/bold red]")
syntax = Syntax(
code,
lexer,
line_numbers=True,
start_line=start_line,
theme="monokai",
word_wrap=True,
padding=(1, 1),
)

snippet_title = title or f"{extension.capitalize()} Snippet"
if len(snippet_title) > 100:
snippet_title = f"..{snippet_title[-95:]}"

styled_code = Panel(
syntax,
border_style="bold yellow",
title=snippet_title,
expand=False
)
console.print(Padding(styled_code, 1))


def print_error(message: str) -> None:
Expand All @@ -178,24 +124,24 @@ def print_tool_message(tool_name, tool_input=None):
print_formatted(content=f'{tool_input}/', color='cyan', bold=True)
elif tool_name == 'create_file_with_code':
message = "Let's create new file..."
language = tool_input['filename'].split(".")[-1]
extension = tool_input['filename'].split(".")[-1]
print_formatted(content=message, color='blue', bold=True)
print_formatted_code(code=tool_input['code'], language=language, title=tool_input['filename'])
print_formatted_code(code=tool_input['code'], extension=extension, title=tool_input['filename'])
elif tool_name == 'insert_code':
message = f"Let's insert code after line {tool_input['start_line']}"
language = tool_input['filename'].split(".")[-1]
extension = tool_input['filename'].split(".")[-1]
print_formatted(content=message, color='blue', bold=True)
print_formatted_code(code=tool_input['code'], language=language, start_line=tool_input['start_line']+1, title=tool_input['filename'])
print_formatted_code(code=tool_input['code'], extension=extension, start_line=tool_input['start_line']+1, title=tool_input['filename'])
elif tool_name == 'replace_code':
message = f"Let's insert code on the place of lines {tool_input['start_line']} to {tool_input['end_line']}"
language = tool_input['filename'].split(".")[-1]
extension = tool_input['filename'].split(".")[-1]
print_formatted(content=message, color='blue', bold=True)
print_formatted_code(code=tool_input['code'], language=language, start_line=tool_input['start_line'], title=tool_input['filename'])
print_formatted_code(code=tool_input['code'], extension=extension, start_line=tool_input['start_line'], title=tool_input['filename'])

elif tool_name == 'add_task':
message = "Let's add a task..."
print_formatted(content=message, color='blue', bold=True)
print_formatted_code(code=tool_input['task_description'], title=tool_input['task_name'], language='text')
print_formatted_code(code=tool_input['task_description'], title=tool_input['task_name'], extension='text')
elif tool_name == 'create_epic':
message = "Let's create an epic..."
print_formatted(content=message, color='blue', bold=True)
Expand All @@ -207,10 +153,10 @@ def print_tool_message(tool_name, tool_input=None):
print_formatted(content=tool_input, color='blue', bold=True)
elif tool_name == 'final_response_researcher':
json_string = json.dumps(tool_input, indent=2)
print_formatted_code(code=json_string, language='json', title='Files:')
print_formatted_code(code=json_string, extension='json', title='Files:')
elif tool_name == 'final_response':
json_string = json.dumps(tool_input, indent=2)
print_formatted_code(code=json_string, language='json', title='Instruction:')
print_formatted_code(code=json_string, extension='json', title='Instruction:')
else:
message = f"Calling {tool_name} tool..."
print_formatted(content=message, color='blue', bold=True)
Expand Down
5 changes: 4 additions & 1 deletion utilities/start_project_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,10 @@


load_dotenv(find_dotenv())
work_dir = os.getenv("WORK_DIR")
try:
work_dir = os.environ["WORK_DIR"]
except KeyError:
raise Exception("Please set up your project folder as WORK_DIR parameter in .env")


def create_coderignore():
Expand Down
Loading

0 comments on commit bc7b803

Please sign in to comment.