Skip to content

Commit

Permalink
Update llm
Browse files Browse the repository at this point in the history
  • Loading branch information
dewmal committed Jul 2, 2024
1 parent f0591fc commit d937498
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 79 deletions.
18 changes: 14 additions & 4 deletions bindings/ceylon/ceylon/llm/llm_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from typing import List

import networkx as nx
from langchain_core.tools import StructuredTool
from langchain_core.tools import StructuredTool, BaseTool
from pydantic.dataclasses import dataclass

from ceylon.ceylon import AgentCore, Processor, MessageHandler, AgentDefinition
Expand All @@ -26,7 +26,7 @@ class LLMAgent(AgentCore, MessageHandler, Processor):

agent_replies: List[LLMAgentResponse] = []

def __init__(self, name, position, instructions, responsibilities, llm, tools: list[StructuredTool] = None):
def __init__(self, name, position, instructions, responsibilities, llm, tools: list[BaseTool] = None):
super().__init__(definition=AgentDefinition(
name=name,
position=position,
Expand All @@ -53,9 +53,18 @@ async def on_message(self, agent_id, data, time):

next_agent = self.get_next_agent()
if next_agent == definition.name:
dependencies = list(self.network_graph.predecessors(next_agent))
print("Dependencies are:", dependencies, "for", next_agent)

only_dependencies = {dt.agent_name: dt for dt in self.agent_replies if dt.agent_name in dependencies}

if len(only_dependencies) == len(dependencies):
print("Executing", definition.name)
await self.execute(self.original_goal)

await self.execute({
"original_request": self.original_goal,
"old_responses": self.agent_replies,
**only_dependencies,
dt.agent_name: dt.response
})

Expand Down Expand Up @@ -90,8 +99,9 @@ async def execute(self, input):
print("Executing", definition.name)

result = process_agent_request(self.llm, input, definition, tools=self.tools)

# result = f"{definition.name} executed successfully"
response = LLMAgentResponse(agent_id=definition.id, agent_name=definition.name, response=result)

await self.broadcast(pickle.dumps(response))

await self.update_status(next_agent)
Expand Down
34 changes: 5 additions & 29 deletions bindings/ceylon/ceylon/llm/llm_caller.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
from langchain.agents import initialize_agent, AgentType
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.tools import StructuredTool
from langchain_core.utils.function_calling import format_tool_to_openai_function

from ceylon.ceylon import AgentDefinition
from ceylon.tools.search_tool import SearchTool


def process_agent_request(llm, inputs, agent_definition, tools=None):
Expand All @@ -30,20 +29,17 @@ def process_agent_request(llm, inputs, agent_definition, tools=None):

if tools:
llm = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])
# agent = prompt | llm | OpenAIFunctionsAgentOutputParser()
# executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

formatted_prompt = prompt.format(**formatted_inputs)

agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
handle_parsing_errors=True
)
print(formatted_prompt)
response = agent.run(formatted_prompt)
print(response)
return response
else:
print("Not using tools")
Expand All @@ -54,33 +50,13 @@ def process_agent_request(llm, inputs, agent_definition, tools=None):

if __name__ == '__main__':
from langchain_community.llms.ollama import Ollama
from duckduckgo_search import DDGS


def search_query(query: str):
"""
Searches the given keywords on DuckDuckGo and returns the search results.
Parameters:
keywords (str): The keywords to search for. This should be a string containing the search terms.
Returns:
list: A list of dictionaries, where each dictionary contains the following keys:
- title (str): The title of the search result.
- href (str): The URL of the search result.
- body (str): A brief description of the search result.
"""
print(f"Searching for {query}")
results = DDGS().text(query, safesearch='off', timelimit='y', max_results=10)
return results


# Initialize Ollama
llm = Ollama(model="llama3")
llm = Ollama(model="llama3:instruct")

# Load tools
tools = [
StructuredTool.from_function(search_query, name="Search",
description="Useful for searching the web for current information.")
SearchTool(),
]

res = process_agent_request(llm,
Expand Down
1 change: 0 additions & 1 deletion bindings/ceylon/src/agent/workspace.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ impl Workspace {
let agent = agent.clone();
let task = rt.spawn(async move {
agent.start(topic, url, _inputs).await;

});
tasks.push(task);
}
Expand Down
52 changes: 7 additions & 45 deletions bindings/ceylon/tests/llm_agen_test.py
Original file line number Diff line number Diff line change
@@ -1,57 +1,19 @@
import asyncio

from duckduckgo_search import DDGS
from langchain_community.chat_models import ChatOllama
from langchain_community.chat_models import ChatOllama, ChatOpenAI
from langchain_core.tools import StructuredTool

from ceylon import AgentRunner
from ceylon.llm.llm_agent import LLMAgent


def search_query(keywords: str, ):
"""
Searches the given keywords on DuckDuckGo and returns the search results.
Parameters:
keywords (str): The keywords to search for. This should be a string containing the search terms.
Returns:
list: A list of dictionaries, where each dictionary contains the following keys:
- title (str): The title of the search result.
- href (str): The URL of the search result.
- body (str): A brief description of the search result.
"""
print(f"Searching for {keywords}")
results = DDGS().text(keywords, safesearch='off', timelimit='y', max_results=10)
return results


def publish_content(content: str, name: str):
"""
Publishes the given content.
Parameters:
content (str): The content to be published. This should be a string containing the text or data that needs to be published.
name (str): The name of the file to be created. This should be a string containing the name of the file to be created.
Returns:
None
"""
print(f"Publishing content")
name = f"content-{name}.txt"

try:
# Open the file in write mode
with open(name, "a", encoding="utf-8") as f:
f.write(content)
return f"Published {content} in {name}"
except Exception as e:
return f"An error occurred: {e}"
from ceylon.tools.file_publisher_tool import FilePublisherTool
from ceylon.tools.search_tool import SearchTool


async def main():
runner = AgentRunner(workspace_name="ceylon-ai")
llm_lib = ChatOllama(model="phi3:instruct")
# llm_lib = ChatOpenAI(model="gpt-4o")
# llm_lib = ChatOllama(model="llama3:instruct")
llm_lib = ChatOpenAI(model="gpt-4o")
runner.register_agent(LLMAgent(
name="writer",
position="Assistant Writer",
Expand Down Expand Up @@ -86,7 +48,7 @@ async def main():
"Must summarize output without source references."
],
tools=[
StructuredTool.from_function(search_query)
SearchTool()
]
))
#
Expand Down Expand Up @@ -114,7 +76,7 @@ async def main():
"Publish it as finalized and polished content",
],
tools=[
StructuredTool.from_function(publish_content)
FilePublisherTool()
]
))

Expand Down

0 comments on commit d937498

Please sign in to comment.