diff --git a/Dockerfile b/Dockerfile index 00a3f2e..c8b451b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -55,5 +55,6 @@ RUN apt-get clean && apt-get purge USER mitodl +EXPOSE 8888 EXPOSE 8001 ENV PORT 8001 diff --git a/ai_agents/__init__.py b/ai_chatbots/__init__.py similarity index 100% rename from ai_agents/__init__.py rename to ai_chatbots/__init__.py diff --git a/ai_chatbots/api.py b/ai_chatbots/api.py new file mode 100644 index 0000000..d1698b9 --- /dev/null +++ b/ai_chatbots/api.py @@ -0,0 +1,49 @@ +"""AI-specific functions for ai_agents.""" + +from typing import Optional + +from django.conf import settings +from llama_index.core.agent import AgentRunner +from llama_index.core.llms.llm import LLM + +from ai_chatbots.constants import AgentClassEnum, LLMClassEnum +from ai_chatbots.proxies import AIProxy + + +def get_llm(model_name: Optional[str] = None, proxy: Optional[AIProxy] = None) -> LLM: + """ + Get the LLM from the given model name, + incorporating a proxy if passed. + + Args: + model_name: The name of the model + proxy: The proxy to use + + Returns: + The LLM + + """ + if not model_name: + model_name = settings.AI_MODEL + try: + llm_class = LLMClassEnum[settings.AI_PROVIDER].value + return llm_class( + model=model_name, + **(proxy.get_api_kwargs() if proxy else {}), + additional_kwargs=(proxy.get_additional_kwargs() if proxy else {}), + ) + except KeyError as ke: + msg = f"{settings.AI_PROVIDER} not supported" + raise NotImplementedError(msg) from ke + except Exception as ex: + msg = f"Error instantiating LLM: {model_name}" + raise ValueError(msg) from ex + + +def get_agent() -> AgentRunner: + """Get the appropriate chatbot agent for the AI provider""" + try: + return AgentClassEnum[settings.AI_PROVIDER].value + except KeyError as ke: + msg = f"{settings.AI_PROVIDER} not supported" + raise NotImplementedError(msg) from ke diff --git a/ai_agents/apps.py b/ai_chatbots/apps.py similarity index 79% rename from ai_agents/apps.py rename to ai_chatbots/apps.py index ec60906..0451560 100644 --- a/ai_agents/apps.py +++ b/ai_chatbots/apps.py @@ -6,4 +6,4 @@ class AiChatConfig(AppConfig): """AI Chat Appconfig""" - name = "ai_agents" + name = "ai_chatbots" diff --git a/ai_agents/agents.py b/ai_chatbots/chatbots.py similarity index 82% rename from ai_agents/agents.py rename to ai_chatbots/chatbots.py index 67b4103..076752d 100644 --- a/ai_agents/agents.py +++ b/ai_chatbots/chatbots.py @@ -10,24 +10,23 @@ from django.conf import settings from django.core.cache import caches from django.utils.module_loading import import_string -from llama_index.agent.openai import OpenAIAgent from llama_index.core.agent import AgentRunner from llama_index.core.base.llms.types import ChatMessage from llama_index.core.constants import DEFAULT_TEMPERATURE from llama_index.core.tools import FunctionTool, ToolMetadata -from llama_index.llms.openai import OpenAI from openai import BadRequestError from pydantic import Field -from ai_agents.constants import AIModelAPI, LearningResourceType, OfferedBy -from ai_agents.utils import enum_zip +from ai_chatbots.api import get_agent, get_llm +from ai_chatbots.constants import LearningResourceType, OfferedBy +from ai_chatbots.utils import enum_zip log = logging.getLogger(__name__) -class BaseChatAgent(ABC): +class BaseChatbot(ABC): """ - Base service class for an AI chat agent + Base AI chatbot class Llamaindex was chosen to implement this because it provides a far easier framework than native OpenAi or LiteLLM to @@ -42,10 +41,9 @@ class BaseChatAgent(ABC): https://docs.litellm.ai/docs/completion/function_call """ - INSTRUCTIONS = "Provide instructions for the AI assistant" + INSTRUCTIONS = "You are a friendly chatbot, answer the user's questions" # For LiteLLM tracking purposes - JOB_ID = "BASECHAT_JOB" TASK_NAME = "BASECHAT_TASK" CACHE_PREFIX = "base_ai_" @@ -53,20 +51,21 @@ def __init__( self, user_id: str, *, - name: str = "AI Chat Agent", + name: str = "MIT Open Learning Chatbot", model: Optional[str] = None, temperature: Optional[float] = None, instructions: Optional[str] = None, ): - """Initialize the AI chat agent service""" + """Initialize the AI chatbot""" self.user_id = user_id self.assistant_name = name - self.ai = settings.AI_MODEL_API self.model = model or settings.AI_MODEL self.temperature = temperature or DEFAULT_TEMPERATURE self.instructions = instructions or self.INSTRUCTIONS if settings.AI_PROXY_CLASS and settings.AI_PROXY_URL: - self.proxy = import_string(f"ai_agents.proxy.{settings.AI_PROXY_CLASS}")() + self.proxy = import_string(f"ai_chatbots.proxy.{settings.AI_PROXY_CLASS}")( + user_id=user_id, task_id=self.TASK_NAME + ) else: self.proxy = None self.agent = None @@ -104,22 +103,14 @@ def save_chat_history(self) -> None: self.cache_key, json.dumps(chat_history), timeout=settings.AI_CACHE_TIMEOUT ) + @abstractmethod def create_agent(self) -> AgentRunner: """Create an AgentRunner for the relevant AI source""" - if self.ai == AIModelAPI.openai.value: - return self.create_openai_agent() - else: - error = f"AI source {self.ai} is not supported" - raise NotImplementedError(error) def create_tools(self): """Create any tools required by the agent""" return [] - @abstractmethod - def create_openai_agent(self) -> OpenAIAgent: - """Create an OpenAI agent""" - def clear_chat_history(self) -> None: """Clear the chat history from the cache""" self.agent.chat_history.clear() @@ -170,13 +161,36 @@ def get_completion(self, message: str, *, debug: bool = settings.AI_DEBUG) -> st self.save_chat_history() -class RecommendationAgent(BaseChatAgent): - """Service class for the AI search function agent""" +class FunctionCallingChatbot(BaseChatbot): + """Function calling chatbot, using a FunctionCallingAgent""" + + TASK_NAME = "FUNCTION_CALL_TASK" - JOB_ID = "SEARCH_JOB" - TASK_NAME = "SEARCH_TASK" + def create_agent(self) -> AgentRunner: + """ + Create a function calling agent + """ + llm = get_llm(self.model, self.proxy) + self.agent = get_agent().from_tools( + tools=self.create_tools(), + llm=llm, + verbose=True, + system_prompt=self.instructions, + ) + if self.save_history: + self.get_or_create_chat_history_cache() + return self.agent + + +class ResourceRecommendationBot(FunctionCallingChatbot): + """ + Chatbot that searches for learning resources in the MIT Learn catalog, + then recommends the best results to the user based on their query. + """ - INSTRUCTIONS = f"""You are an assistant helping users find courses from a catalog + TASK_NAME = "RECOMMENDATION_TASK" + + INSTRUCTIONS = """You are an assistant helping users find courses from a catalog of learning resources. Users can ask about specific topics, levels, or recommendations based on their interests or goals. @@ -205,8 +219,7 @@ class RecommendationAgent(BaseChatAgent): as the value for this parameter. offered_by: If a user asks for resources "offered by" or "from" an institution, -you should include this parameter based on the following -dictionary: {OfferedBy.as_dict()} DO NOT USE THE offered_by FILTER OTHERWISE. +you should include this parameter. DO NOT USE THE offered_by FILTER OTHERWISE. certificate: true if the user is interested in resources that offer certificates, false if the user does not want resources with a certificate offered. Do not used this filter @@ -248,21 +261,6 @@ class RecommendationAgent(BaseChatAgent): Expected Output: Maybe ask whether the user wants to learn how to program, or just use AI in their discipline - does this person want to study machine learning? More info needed. Then perform a relevant search and send back the best results. - -And here are some recommended search parameters to apply for sample user prompts: - -User: "I am interested in learning advanced AI techniques" -Search parameters: {{"q": "AI techniques"}} - -User: "I am curious about AI applications for business" -Search parameters: {{"q": "AI business"}} - -User: "I want free basic courses about climate change from OpenCourseware" -Search parameters: {{"q": "climate change", "free": true, "resource_type": ["course"], -"offered_by": "ocw"}} - -User: "I want to learn some advanced mathematics" -Search parameters: {{"q": "mathematics"}} """ class SearchToolSchema(pydantic.BaseModel): @@ -272,7 +270,7 @@ class SearchToolSchema(pydantic.BaseModel): q: The search query string resource_type: Filter by type of resource (course, program, etc) free: Filter for free resources only - certificate: Filter for resources offering certificates + certification: Filter for resources offering certificates offered_by: Filter by institution offering the resource """ @@ -291,7 +289,7 @@ class SearchToolSchema(pydantic.BaseModel): default=None, description="Whether the resource is free to access, true|false", ) - certificate: Optional[bool] = Field( + certification: Optional[bool] = Field( default=None, description=( "Whether the resource offers a certificate upon completion, true|false" @@ -309,7 +307,7 @@ class SearchToolSchema(pydantic.BaseModel): "q": "machine learning", "resource_type": ["course"], "free": True, - "certificate": False, + "certification": False, "offered_by": "MIT", } ] @@ -325,7 +323,7 @@ def __init__( temperature: Optional[float] = None, instructions: Optional[str] = None, ): - """Initialize the AI search agent service""" + """Initialize the chatbot""" super().__init__( user_id, name=name, @@ -335,7 +333,7 @@ def __init__( ) self.search_parameters = [] self.search_results = [] - self.create_agent() + super().create_agent() def search_courses(self, q: str, **kwargs) -> str: """ @@ -392,33 +390,6 @@ def search_courses(self, q: str, **kwargs) -> str: log.exception("Error querying MIT API") return json.dumps({"error": str(e)}) - def create_openai_agent(self) -> OpenAIAgent: - """ - Create an OpenAI-specific llamaindex agent for function calling - - Using `OpenAI` instead of a more universal `LiteLLM` because - the `LiteLLM` class as implemented by llamaindex does not - support function calling. ie: - agent = FunctionCallingAgentWorker.from_tools(.... - > AssertionError: llm must be an instance of FunctionCallingLLM - """ - llm = OpenAI( - model=self.model, - **(self.proxy.get_api_kwargs() if self.proxy else {}), - additional_kwargs=( - self.proxy.get_additional_kwargs(self) if self.proxy else {} - ), - ) - self.agent = OpenAIAgent.from_tools( - tools=self.create_tools(), - llm=llm, - verbose=True, - system_prompt=self.instructions, - ) - if self.save_history: - self.get_or_create_chat_history_cache() - return self.agent - def create_tools(self): """Create tools required by the agent""" return [self.create_search_tool()] diff --git a/ai_agents/agents_test.py b/ai_chatbots/chatbots_test.py similarity index 63% rename from ai_agents/agents_test.py rename to ai_chatbots/chatbots_test.py index 308df7d..3c3c6d3 100644 --- a/ai_agents/agents_test.py +++ b/ai_chatbots/chatbots_test.py @@ -7,7 +7,8 @@ from django.conf import settings from llama_index.core.constants import DEFAULT_TEMPERATURE -from ai_agents.agents import RecommendationAgent +from ai_chatbots.chatbots import ResourceRecommendationBot +from ai_chatbots.constants import LLMClassEnum from main.test_utils import assert_json_equal @@ -35,41 +36,38 @@ def search_results(): (None, None, None), ], ) -def test_search_agent_service_initialization_defaults(model, temperature, instructions): - """Test the RecommendationAgent class instantiation.""" - name = "My search agent" +def test_chatbot_initialization_defaults(model, temperature, instructions): + """Test the ResourceRecommendationBot class instantiation.""" + name = "My search bot" - search_agent = RecommendationAgent( + chatbot = ResourceRecommendationBot( "user", name=name, model=model, temperature=temperature, instructions=instructions, ) - assert search_agent.model == (model if model else settings.AI_MODEL) - assert search_agent.temperature == ( - temperature if temperature else DEFAULT_TEMPERATURE - ) - assert search_agent.instructions == ( - instructions if instructions else search_agent.instructions - ) - assert search_agent.agent.__class__.__name__ == "OpenAIAgent" - assert search_agent.agent.agent_worker._llm.model == ( # noqa: SLF001 - model if model else settings.AI_MODEL + assert chatbot.model == (model if model else settings.AI_MODEL) + assert chatbot.temperature == (temperature if temperature else DEFAULT_TEMPERATURE) + assert chatbot.instructions == ( + instructions if instructions else chatbot.instructions ) + worker_llm = chatbot.agent.agent_worker._llm # noqa: SLF001 + assert worker_llm.__class__ == LLMClassEnum.openai.value + assert worker_llm.model == (model if model else settings.AI_MODEL) def test_clear_chat_history(client, user, chat_history): """Test that the RecommendationAgent clears chat_history.""" - search_agent = RecommendationAgent(user.username) - search_agent.agent.chat_history.extend(chat_history) - assert len(search_agent.agent.chat_history) == 4 - search_agent.clear_chat_history() - assert search_agent.agent.chat_history == [] + chatbot = ResourceRecommendationBot(user.username) + chatbot.agent.chat_history.extend(chat_history) + assert len(chatbot.agent.chat_history) == 4 + chatbot.clear_chat_history() + assert chatbot.agent.chat_history == [] @pytest.mark.django_db -def test_search_agent_tool(settings, mocker, search_results): +def test_chatbot_tool(settings, mocker, search_results): """The search agent tool should be created and function correctly.""" settings.AI_MIT_SEARCH_LIMIT = 5 retained_attributes = [ @@ -90,10 +88,10 @@ def test_search_agent_tool(settings, mocker, search_results): expected_results.append(simple_result) mock_post = mocker.patch( - "ai_agents.agents.requests.get", + "ai_chatbots.chatbots.requests.get", return_value=mocker.Mock(json=mocker.Mock(return_value=search_results)), ) - search_agent = RecommendationAgent("anonymous", name="test agent") + chatbot = ResourceRecommendationBot("anonymous", name="test agent") search_parameters = { "q": "physics", "resource_type": ["course", "program"], @@ -102,7 +100,7 @@ def test_search_agent_tool(settings, mocker, search_results): "offered_by": "xpro", "limit": 5, } - tool = search_agent.create_tools()[0] + tool = chatbot.create_tools()[0] results = tool._fn(**search_parameters) # noqa: SLF001 mock_post.assert_called_once_with( settings.AI_MIT_SEARCH_URL, params=search_parameters, timeout=30 @@ -119,7 +117,7 @@ def test_get_completion(settings, mocker, debug, search_results): "metadata": { "search_parameters": {"q": "physics"}, "search_results": search_results.get("results"), - "system_prompt": RecommendationAgent.INSTRUCTIONS, + "system_prompt": ResourceRecommendationBot.INSTRUCTIONS, } } comment_metadata = f"\n\n\n\n".encode() @@ -127,24 +125,22 @@ def test_get_completion(settings, mocker, debug, search_results): if debug: expected_return_value.append(comment_metadata) mocker.patch( - "ai_agents.agents.OpenAIAgent.stream_chat", + "ai_chatbots.constants.OpenAIAgent.stream_chat", return_value=mocker.Mock(response_gen=iter(expected_return_value)), ) - search_agent = RecommendationAgent("anonymous", name="test agent") - search_agent.search_parameters = metadata["metadata"]["search_parameters"] - search_agent.search_results = metadata["metadata"]["search_results"] - search_agent.instructions = metadata["metadata"]["system_prompt"] - search_agent.search_parameters = {"q": "physics"} - search_agent.search_results = search_results + chatbot = ResourceRecommendationBot("anonymous", name="test agent") + chatbot.search_parameters = metadata["metadata"]["search_parameters"] + chatbot.search_results = metadata["metadata"]["search_results"] + chatbot.instructions = metadata["metadata"]["system_prompt"] + chatbot.search_parameters = {"q": "physics"} + chatbot.search_results = search_results results = "".join( [ str(chunk) - for chunk in search_agent.get_completion( - "I want to learn physics", debug=debug - ) + for chunk in chatbot.get_completion("I want to learn physics", debug=debug) ] ) - search_agent.agent.stream_chat.assert_called_once_with("I want to learn physics") + chatbot.agent.stream_chat.assert_called_once_with("I want to learn physics") assert "".join([str(value) for value in expected_return_value]) in results if debug: assert '\n\n