Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(agent): Add langchain + crew.ai agent examples - hot_weather & cold_weather #121

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -167,3 +167,5 @@ cython_debug/
/examples/**/agent_store.json
/bee-hive/agent_store.json


.vscode
7,173 changes: 6,927 additions & 246 deletions bee-hive/poetry.lock

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions bee-hive/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@ python = ">= 3.11, < 3.13"
pyyaml = "^6.0.2"
openai = "^1.56.2"
python-dotenv = "^1.0.1"
langchain-ollama = "^0.2.2"
langchain-core = "^0.3.30"
langchain-community = "^0.3.14"
duckduckgo-search = "^7.2.1"
crewai = "^0.95.0"
crewai-tools = "^0.25.8"

[build-system]
requires = ["poetry-core"]
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
#!/usr/bin/env python
"""
This script uses the crew.ai framework to find activities suitable for cold or wet weather.

Dependencies:
- crewai
- langchain_community

Usage:
- Ensure ollama is available on localhost:11434 and the llama3.1 model is available.
- Run the script to get a list of 5 activities to do in cole weather in San Francisco.
"""

from crewai import Agent, Crew, Task, Process, LLM
from crewai.project import CrewBase, agent, task, crew
from crewai.tools import tool

# Many ways of using tools - using custom tool from langchain
from langchain_community.tools import DuckDuckGoSearchRun

@CrewBase
class ColdWeatherCrew:
"""
Defines a class to manage a crew that finds activities to do in cold or wet weather.
"""

# setup LLM
llm = LLM(model="ollama/llama3.1", base_url="http://localhost:11434")

@tool("DuckDuckGo")
def ddg_search(question: str) -> str:
"""
Defines a crew.ai tool which performs a web search using the
DuckDuckGo search engine.

Args:
question (str): The search query to be sent to DuckDuckGo.

Returns:
str: The search results returned by DuckDuckGo.
"""
search_tool = DuckDuckGoSearchRun()
return search_tool.run(question)

@agent
def activity_planner_agent(self) -> Agent:
"""
Defines a crew.ai agent that plans activities using the specified
agent configuration and tools.

Returns:
Agent: An instance of the Agent class configured with the activity planner settings,
including the DuckDuckGo search tool and a locally running LLM (Ollama 3.1).
"""
return Agent(
config=self.agents_config["activity_planner_agent"],
tools=[self.ddg_search], # Include the DuckDuckGo search tool
# Hardcode to ollama 3.1 running locally
llm=self.llm,
verbose=True,
)

@task
def activity_finder_task(self) -> Task:
"""
Defines a task to find activities suitable for cold weather.

Returns:
Task: A Task object configured with the activity finder task settings.
"""
return Task(config=self.tasks_config["activity_finder_task"], verbose=True)

@crew
def activity_crew(self) -> Crew:
"""
Creates and returns a Crew object configured with the current agents, tasks,
and a sequential process.

Returns:
Crew: A Crew object with the specified agents, tasks, and process.
"""
return Crew(
agents=self.agents,
tasks=self.tasks,
process=Process.sequential,
verbose=True,
)

# Main for testing
if __name__ == "__main__":
print("Running crew...")
inputs = {"location": "San Francisco"}
ColdWeatherCrew().activity_crew().kickoff(inputs=inputs)
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
activity_planner_agent:
role: "Activity Planner"
goal: Aid user in finding activities to do in cold or wet weather
backstory: I am an AI agent that can help you find activities to do in cold or wet weather. I can provide you with a list of activities suitable for cold or wet weather in San Francisco.
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
activity_finder_task:
description: >
Find a list of 5 activities to do in the cold or wet weather in {location}.
Make sure the information is current as of 2025.
expected_output: >
A list of 5 activities to do in the cold or wet weather in {location} with
the name of the activity, a brief description, and the address of the
location.
agent: activity_planner_agent
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
#!/usr/bin/env python
"""
This script defines a hot weather agent that uses the LangChain framework to find activities to do in hot weather.

Dependencies:
- langchain
- langchain_ollama
- langchain_core
- langchain_community
- duckduckgo-search

Usage:
- Ensure ollama is available on localhost:11434 and the llama3.1 model is available.
- Run the script to get a list of 5 activities to do in hot weather in San Francisco.
"""

import os
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.runnables.base import Runnable
from langchain.agents import AgentExecutor, create_react_agent, Tool
from langchain_ollama.chat_models import ChatOllama
from langchain_community.tools import DuckDuckGoSearchRun

class HotWeatherAgent:
"""
Defines a class to manage an agent that finds activities to do in hot weather.
"""

# Some tools are already known -in which case ref by name and
# 'from langchain_community.agent_toolkits.load_tools import load_tools'
search = DuckDuckGoSearchRun()
tools = [
Tool(
name = "DuckDuckGo Search",
func=search.run,
description="useful for when you need to answer questions about current events"
)
]
# Assume llama3.1 (running locally given env above)
os.environ["LLAMAFILE_SERVER_BASE_URL"] = "http://localhost:11434"
llm = ChatOllama(model="granite3.1-dense:8b", tools=tools)
Comment on lines +39 to +41
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

comment says llama but model for llm is granite


agent_executor: Runnable

def __init__(self) -> None:
"""
Initializes the HotWeatherAgent class with the specified LLM and tools.
"""

# this is a prompt template which many examples use - but we could explicitly define our own if needed
# In this case it's using a react style approach which iterates to get a good result
prompt: PromptTemplate = PromptTemplate.from_template("""
Answer the following questions as best you can. Make it clear when you have a Final Answer,

You have access to the following tools:
{tools}

Use the following format:

Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat 3 times)
Final Answer: the final answer to the original input question

These keywords must never be translated and transformed:
- Action:
- Thought:
- Action Input:
- Observation:
- Final Answer:
- Question:

because they are part of the thinking process instead of the output.

If a Final Answer is given, do not follow any actions after that.

Begin!

Question: {input}
Thought:{agent_scratchpad}
""")

# create the agent & executor (not using any memory)
self.agent = create_react_agent(self.llm, prompt=prompt,tools=self.tools)
self.agent_executor = AgentExecutor(agent=self.agent, tools=self.tools, verbose=True,handle_parsing_errors=True)

def run(self, question: str) -> str:
"""
Runs the agent to find activities to do in hot weather.

Args:
question (str): The question to be answered by the agent.

Returns:
str: The response generated by the agent.
"""

response = self.agent_executor.invoke({"input": question})
if "output" in response:
return response["output"]
else:
return "Error: The response does not contain the expected 'output' key."

# run the agent
if __name__ == "__main__":
# Sample
EXAMPLE_QUESTION = "Provide a list of 5 activities to do in the hot weather in San Francisco"
agent = HotWeatherAgent()
result = agent.run(EXAMPLE_QUESTION)
print(result)