-
Notifications
You must be signed in to change notification settings - Fork 16.1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
39 changed files
with
5,059 additions
and
0 deletions.
There are no files selected for viewing
88 changes: 88 additions & 0 deletions
88
...community/langchain_community/agent_toolkits/conversational_retrieval/openai_functions.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,88 @@ | ||
from __future__ import annotations | ||
|
||
from typing import Any, List, Optional, TYPE_CHECKING | ||
|
||
from langchain_core.language_models import BaseLanguageModel | ||
from langchain_core.memory import BaseMemory | ||
from langchain_core.messages import SystemMessage | ||
from langchain_core.prompts.chat import MessagesPlaceholder | ||
from langchain_core.tools import BaseTool | ||
|
||
if TYPE_CHECKING: | ||
from langchain.agents.agent import AgentExecutor | ||
|
||
|
||
def _get_default_system_message() -> SystemMessage: | ||
return SystemMessage( | ||
content=( | ||
"Do your best to answer the questions. " | ||
"Feel free to use any tools available to look up " | ||
"relevant information, only if necessary" | ||
) | ||
) | ||
|
||
def create_conversational_retrieval_agent( | ||
llm: BaseLanguageModel, | ||
tools: List[BaseTool], | ||
remember_intermediate_steps: bool = True, | ||
memory_key: str = "chat_history", | ||
system_message: Optional[SystemMessage] = None, | ||
verbose: bool = False, | ||
max_token_limit: int = 2000, | ||
**kwargs: Any, | ||
) -> AgentExecutor: | ||
"""A convenience method for creating a conversational retrieval agent. | ||
Args: | ||
llm: The language model to use, should be ChatOpenAI | ||
tools: A list of tools the agent has access to | ||
remember_intermediate_steps: Whether the agent should remember intermediate | ||
steps or not. Intermediate steps refer to prior action/observation | ||
pairs from previous questions. The benefit of remembering these is if | ||
there is relevant information in there, the agent can use it to answer | ||
follow up questions. The downside is it will take up more tokens. | ||
memory_key: The name of the memory key in the prompt. | ||
system_message: The system message to use. By default, a basic one will | ||
be used. | ||
verbose: Whether or not the final AgentExecutor should be verbose or not, | ||
defaults to False. | ||
max_token_limit: The max number of tokens to keep around in memory. | ||
Defaults to 2000. | ||
Returns: | ||
An agent executor initialized appropriately | ||
""" | ||
from langchain.agents.agent import AgentExecutor | ||
from langchain.agents.openai_functions_agent.agent_token_buffer_memory import ( | ||
AgentTokenBufferMemory, | ||
) | ||
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent | ||
from langchain.memory.token_buffer import ConversationTokenBufferMemory | ||
|
||
if remember_intermediate_steps: | ||
memory: BaseMemory = AgentTokenBufferMemory( | ||
memory_key=memory_key, llm=llm, max_token_limit=max_token_limit | ||
) | ||
else: | ||
memory = ConversationTokenBufferMemory( | ||
memory_key=memory_key, | ||
return_messages=True, | ||
output_key="output", | ||
llm=llm, | ||
max_token_limit=max_token_limit, | ||
) | ||
|
||
_system_message = system_message or _get_default_system_message() | ||
prompt = OpenAIFunctionsAgent.create_prompt( | ||
system_message=_system_message, | ||
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)], | ||
) | ||
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt) | ||
return AgentExecutor( | ||
agent=agent, | ||
tools=tools, | ||
memory=memory, | ||
verbose=verbose, | ||
return_intermediate_steps=remember_intermediate_steps, | ||
**kwargs, | ||
) |
53 changes: 53 additions & 0 deletions
53
.scripts/community_split/libs/community/langchain_community/agent_toolkits/json/base.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
"""Json agent.""" | ||
from __future__ import annotations | ||
from typing import Any, Dict, List, Optional, TYPE_CHECKING | ||
|
||
from langchain_core.callbacks import BaseCallbackManager | ||
from langchain_core.language_models import BaseLanguageModel | ||
|
||
from langchain_community.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX | ||
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit | ||
|
||
if TYPE_CHECKING: | ||
from langchain.agents.agent import AgentExecutor | ||
|
||
|
||
def create_json_agent( | ||
llm: BaseLanguageModel, | ||
toolkit: JsonToolkit, | ||
callback_manager: Optional[BaseCallbackManager] = None, | ||
prefix: str = JSON_PREFIX, | ||
suffix: str = JSON_SUFFIX, | ||
format_instructions: Optional[str] = None, | ||
input_variables: Optional[List[str]] = None, | ||
verbose: bool = False, | ||
agent_executor_kwargs: Optional[Dict[str, Any]] = None, | ||
**kwargs: Any, | ||
) -> AgentExecutor: | ||
"""Construct a json agent from an LLM and tools.""" | ||
from langchain.agents.agent import AgentExecutor | ||
from langchain.agents.mrkl.base import ZeroShotAgent | ||
from langchain.chains.llm import LLMChain | ||
tools = toolkit.get_tools() | ||
prompt_params = {"format_instructions": format_instructions} if format_instructions is not None else {} | ||
prompt = ZeroShotAgent.create_prompt( | ||
tools, | ||
prefix=prefix, | ||
suffix=suffix, | ||
input_variables=input_variables, | ||
**prompt_params, | ||
) | ||
llm_chain = LLMChain( | ||
llm=llm, | ||
prompt=prompt, | ||
callback_manager=callback_manager, | ||
) | ||
tool_names = [tool.name for tool in tools] | ||
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) | ||
return AgentExecutor.from_agent_and_tools( | ||
agent=agent, | ||
tools=tools, | ||
callback_manager=callback_manager, | ||
verbose=verbose, | ||
**(agent_executor_kwargs or {}), | ||
) |
57 changes: 57 additions & 0 deletions
57
.scripts/community_split/libs/community/langchain_community/agent_toolkits/nla/tool.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
"""Tool for interacting with a single API with natural language definition.""" | ||
|
||
from __future__ import annotations | ||
from typing import Any, Optional, TYPE_CHECKING | ||
|
||
from langchain_core.language_models import BaseLanguageModel | ||
from langchain_core.tools import Tool | ||
|
||
from langchain_community.tools.openapi.utils.api_models import APIOperation | ||
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec | ||
from langchain_community.utilities.requests import Requests | ||
|
||
if TYPE_CHECKING: | ||
from langchain.chains.api.openapi.chain import OpenAPIEndpointChain | ||
|
||
|
||
class NLATool(Tool): | ||
"""Natural Language API Tool.""" | ||
|
||
@classmethod | ||
def from_open_api_endpoint_chain( | ||
cls, chain: OpenAPIEndpointChain, api_title: str | ||
) -> "NLATool": | ||
"""Convert an endpoint chain to an API endpoint tool.""" | ||
expanded_name = ( | ||
f'{api_title.replace(" ", "_")}.{chain.api_operation.operation_id}' | ||
) | ||
description = ( | ||
f"I'm an AI from {api_title}. Instruct what you want," | ||
" and I'll assist via an API with description:" | ||
f" {chain.api_operation.description}" | ||
) | ||
return cls(name=expanded_name, func=chain.run, description=description) | ||
|
||
@classmethod | ||
def from_llm_and_method( | ||
cls, | ||
llm: BaseLanguageModel, | ||
path: str, | ||
method: str, | ||
spec: OpenAPISpec, | ||
requests: Optional[Requests] = None, | ||
verbose: bool = False, | ||
return_intermediate_steps: bool = False, | ||
**kwargs: Any, | ||
) -> "NLATool": | ||
"""Instantiate the tool from the specified path and method.""" | ||
api_operation = APIOperation.from_openapi_spec(spec, path, method) | ||
chain = OpenAPIEndpointChain.from_api_operation( | ||
api_operation, | ||
llm, | ||
requests=requests, | ||
verbose=verbose, | ||
return_intermediate_steps=return_intermediate_steps, | ||
**kwargs, | ||
) | ||
return cls.from_open_api_endpoint_chain(chain, spec.info.title) |
77 changes: 77 additions & 0 deletions
77
.scripts/community_split/libs/community/langchain_community/agent_toolkits/openapi/base.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
"""OpenAPI spec agent.""" | ||
from __future__ import annotations | ||
from typing import Any, Dict, List, Optional, TYPE_CHECKING | ||
|
||
from langchain_core.callbacks import BaseCallbackManager | ||
from langchain_core.language_models import BaseLanguageModel | ||
|
||
from langchain_community.agent_toolkits.openapi.prompt import ( | ||
OPENAPI_PREFIX, | ||
OPENAPI_SUFFIX, | ||
) | ||
from langchain_community.agent_toolkits.openapi.toolkit import OpenAPIToolkit | ||
|
||
if TYPE_CHECKING: | ||
from langchain.agents.agent import AgentExecutor | ||
|
||
|
||
def create_openapi_agent( | ||
llm: BaseLanguageModel, | ||
toolkit: OpenAPIToolkit, | ||
callback_manager: Optional[BaseCallbackManager] = None, | ||
prefix: str = OPENAPI_PREFIX, | ||
suffix: str = OPENAPI_SUFFIX, | ||
format_instructions: Optional[str] = None, | ||
input_variables: Optional[List[str]] = None, | ||
max_iterations: Optional[int] = 15, | ||
max_execution_time: Optional[float] = None, | ||
early_stopping_method: str = "force", | ||
verbose: bool = False, | ||
return_intermediate_steps: bool = False, | ||
agent_executor_kwargs: Optional[Dict[str, Any]] = None, | ||
**kwargs: Any, | ||
) -> AgentExecutor: | ||
"""Construct an OpenAPI agent from an LLM and tools. | ||
*Security Note*: When creating an OpenAPI agent, check the permissions | ||
and capabilities of the underlying toolkit. | ||
For example, if the default implementation of OpenAPIToolkit | ||
uses the RequestsToolkit which contains tools to make arbitrary | ||
network requests against any URL (e.g., GET, POST, PATCH, PUT, DELETE), | ||
Control access to who can submit issue requests using this toolkit and | ||
what network access it has. | ||
See https://python.langchain.com/docs/security for more information. | ||
""" | ||
from langchain.agents.agent import AgentExecutor | ||
from langchain.agents.mrkl.base import ZeroShotAgent | ||
from langchain.chains.llm import LLMChain | ||
tools = toolkit.get_tools() | ||
prompt_params = {"format_instructions": format_instructions} if format_instructions is not None else {} | ||
prompt = ZeroShotAgent.create_prompt( | ||
tools, | ||
prefix=prefix, | ||
suffix=suffix, | ||
input_variables=input_variables, | ||
**prompt_params | ||
) | ||
llm_chain = LLMChain( | ||
llm=llm, | ||
prompt=prompt, | ||
callback_manager=callback_manager, | ||
) | ||
tool_names = [tool.name for tool in tools] | ||
agent = ZeroShotAgent(llm_chain=llm_chain, allowed_tools=tool_names, **kwargs) | ||
return AgentExecutor.from_agent_and_tools( | ||
agent=agent, | ||
tools=tools, | ||
callback_manager=callback_manager, | ||
verbose=verbose, | ||
return_intermediate_steps=return_intermediate_steps, | ||
max_iterations=max_iterations, | ||
max_execution_time=max_execution_time, | ||
early_stopping_method=early_stopping_method, | ||
**(agent_executor_kwargs or {}), | ||
) |
Oops, something went wrong.