diff --git a/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py b/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py index 0b9f63e5e8b8c..c48661873c8eb 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/retrievers.py @@ -44,7 +44,7 @@ class __ModuleName__Retriever(BaseRetriever): retriever.invoke(query) - .. code-block:: python + .. code-block:: none # TODO: Example output. @@ -67,7 +67,7 @@ class __ModuleName__Retriever(BaseRetriever): llm = ChatOpenAI(model="gpt-3.5-turbo-0125") def format_docs(docs): - return "\n\n".join(doc.page_content for doc in docs) + return "\\n\\n".join(doc.page_content for doc in docs) chain = ( {"context": retriever | format_docs, "question": RunnablePassthrough()} @@ -78,7 +78,7 @@ def format_docs(docs): chain.invoke("...") - .. code-block:: python + .. code-block:: none # TODO: Example output. diff --git a/libs/cli/langchain_cli/integration_template/integration_template/toolkits.py b/libs/cli/langchain_cli/integration_template/integration_template/toolkits.py index 067ed6d1d54e4..5a80398891167 100644 --- a/libs/cli/langchain_cli/integration_template/integration_template/toolkits.py +++ b/libs/cli/langchain_cli/integration_template/integration_template/toolkits.py @@ -41,7 +41,7 @@ class __ModuleName__Toolkit(BaseToolKit): toolkit.get_tools() - .. code-block:: python + .. code-block:: none # TODO: Example output. @@ -61,7 +61,7 @@ class __ModuleName__Toolkit(BaseToolKit): for event in events: event["messages"][-1].pretty_print() - .. code-block:: python + .. code-block:: none # TODO: Example output. diff --git a/libs/community/langchain_community/agent_toolkits/github/toolkit.py b/libs/community/langchain_community/agent_toolkits/github/toolkit.py index 014dc5f321a72..1712f8628698f 100644 --- a/libs/community/langchain_community/agent_toolkits/github/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/github/toolkit.py @@ -164,9 +164,105 @@ class GitHubToolkit(BaseToolkit): See [Security](https://python.langchain.com/docs/security) for more information. + Setup: + See detailed installation instructions here: + https://python.langchain.com/v0.2/docs/integrations/tools/github/#installation + + You will need to install ``pygithub`` and set the following environment + variables: + + .. code-block:: bash + + pip install -U pygithub + export GITHUB_APP_ID="your-app-id" + export GITHUB_APP_PRIVATE_KEY="path-to-private-key" + export GITHUB_REPOSITORY="your-github-repository" + + Instantiate: + .. code-block:: python + + from langchain_community.agent_toolkits.github.toolkit import GitHubToolkit + from langchain_community.utilities.github import GitHubAPIWrapper + + github = GitHubAPIWrapper() + toolkit = GitHubToolkit.from_github_api_wrapper(github) + + Tools: + .. code-block:: python + + tools = toolkit.get_tools() + for tool in tools: + print(tool.name) + + .. code-block:: none + + Get Issues + Get Issue + Comment on Issue + List open pull requests (PRs) + Get Pull Request + Overview of files included in PR + Create Pull Request + List Pull Requests' Files + Create File + Read File + Update File + Delete File + Overview of existing files in Main branch + Overview of files in current working branch + List branches in this repository + Set active branch + Create a new branch + Get files from a directory + Search issues and pull requests + Search code + Create review request + + Use within an agent: + .. code-block:: python + + from langchain_openai import ChatOpenAI + from langgraph.prebuilt import create_react_agent + + # Select example tool + tools = [tool for tool in toolkit.get_tools() if tool.name == "Get Issue"] + assert len(tools) == 1 + tools[0].name = "get_issue" + + llm = ChatOpenAI(model="gpt-4o-mini") + agent_executor = create_react_agent(llm, tools) + + example_query = "What is the title of issue 24888?" + + events = agent_executor.stream( + {"messages": [("user", example_query)]}, + stream_mode="values", + ) + for event in events: + event["messages"][-1].pretty_print() + + .. code-block:: none + + ================================[1m Human Message [0m================================= + + What is the title of issue 24888? + ==================================[1m Ai Message [0m================================== + Tool Calls: + get_issue (call_iSYJVaM7uchfNHOMJoVPQsOi) + Call ID: call_iSYJVaM7uchfNHOMJoVPQsOi + Args: + issue_number: 24888 + =================================[1m Tool Message [0m================================= + Name: get_issue + + {"number": 24888, "title": "Standardize KV-Store Docs", "body": "..." + ==================================[1m Ai Message [0m================================== + + The title of issue 24888 is "Standardize KV-Store Docs". + Parameters: tools: List[BaseTool]. The tools in the toolkit. Default is an empty list. - """ + """ # noqa: E501 tools: List[BaseTool] = [] diff --git a/libs/community/langchain_community/agent_toolkits/gmail/toolkit.py b/libs/community/langchain_community/agent_toolkits/gmail/toolkit.py index 8d8f4c9fac8bc..a225d1d7d55ac 100644 --- a/libs/community/langchain_community/agent_toolkits/gmail/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/gmail/toolkit.py @@ -39,9 +39,81 @@ class GmailToolkit(BaseToolkit): See https://python.langchain.com/docs/security for more information. + Setup: + You will need a Google credentials.json file to use this toolkit. + See instructions here: https://python.langchain.com/v0.2/docs/integrations/tools/gmail/#setup + + Key init args: + api_resource: Optional. The Google API resource. Default is None. + + Instantiate: + .. code-block:: python + + from langchain_google_community import GmailToolkit + + toolkit = GmailToolkit() + + Tools: + .. code-block:: python + + toolkit.get_tools() + + .. code-block:: none + + [GmailCreateDraft(api_resource=), + GmailSendMessage(api_resource=), + GmailSearch(api_resource=), + GmailGetMessage(api_resource=), + GmailGetThread(api_resource=)] + + Use within an agent: + .. code-block:: python + + from langchain_openai import ChatOpenAI + from langgraph.prebuilt import create_react_agent + + llm = ChatOpenAI(model="gpt-4o-mini") + + agent_executor = create_react_agent(llm, tools) + + example_query = "Draft an email to fake@fake.com thanking them for coffee." + + events = agent_executor.stream( + {"messages": [("user", example_query)]}, + stream_mode="values", + ) + for event in events: + event["messages"][-1].pretty_print() + + .. code-block:: none + + ================================[1m Human Message [0m================================= + + Draft an email to fake@fake.com thanking them for coffee. + ==================================[1m Ai Message [0m================================== + Tool Calls: + create_gmail_draft (call_slGkYKZKA6h3Mf1CraUBzs6M) + Call ID: call_slGkYKZKA6h3Mf1CraUBzs6M + Args: + message: Dear Fake, + + I wanted to take a moment to thank you for the coffee yesterday. It was a pleasure catching up with you. Let's do it again soon! + + Best regards, + [Your Name] + to: ['fake@fake.com'] + subject: Thank You for the Coffee + =================================[1m Tool Message [0m================================= + Name: create_gmail_draft + + Draft created. Draft Id: r-7233782721440261513 + ==================================[1m Ai Message [0m================================== + + I have drafted an email to fake@fake.com thanking them for the coffee. You can review and send it from your email draft with the subject "Thank You for the Coffee". + Parameters: api_resource: Optional. The Google API resource. Default is None. - """ + """ # noqa: E501 api_resource: Resource = Field(default_factory=build_resource_service) diff --git a/libs/community/langchain_community/agent_toolkits/openapi/toolkit.py b/libs/community/langchain_community/agent_toolkits/openapi/toolkit.py index 124612ae54d14..a756da90df9df 100644 --- a/libs/community/langchain_community/agent_toolkits/openapi/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/openapi/toolkit.py @@ -38,7 +38,125 @@ class RequestsToolkit(BaseToolkit): what network access it has. See https://python.langchain.com/docs/security for more information. - """ + + Setup: + Install ``langchain-community``. + + .. code-block:: bash + + pip install -U langchain-community + + Key init args: + requests_wrapper: langchain_community.utilities.requests.GenericRequestsWrapper + wrapper for executing requests. + allow_dangerous_requests: bool + Defaults to False. Must "opt-in" to using dangerous requests by setting to True. + + Instantiate: + .. code-block:: python + + from langchain_community.agent_toolkits.openapi.toolkit import RequestsToolkit + from langchain_community.utilities.requests import TextRequestsWrapper + + toolkit = RequestsToolkit( + requests_wrapper=TextRequestsWrapper(headers={}), + allow_dangerous_requests=ALLOW_DANGEROUS_REQUEST, + ) + + Tools: + .. code-block:: python + + tools = toolkit.get_tools() + tools + + .. code-block:: none + + [RequestsGetTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True), + RequestsPostTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True), + RequestsPatchTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True), + RequestsPutTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True), + RequestsDeleteTool(requests_wrapper=TextRequestsWrapper(headers={}, aiosession=None, auth=None, response_content_type='text', verify=True), allow_dangerous_requests=True)] + + Use within an agent: + .. code-block:: python + + from langchain_openai import ChatOpenAI + from langgraph.prebuilt import create_react_agent + + + api_spec = \"\"\" + openapi: 3.0.0 + info: + title: JSONPlaceholder API + version: 1.0.0 + servers: + - url: https://jsonplaceholder.typicode.com + paths: + /posts: + get: + summary: Get posts + parameters: &id001 + - name: _limit + in: query + required: false + schema: + type: integer + example: 2 + description: Limit the number of results + \"\"\" + + system_message = \"\"\" + You have access to an API to help answer user queries. + Here is documentation on the API: + {api_spec} + \"\"\".format(api_spec=api_spec) + + llm = ChatOpenAI(model="gpt-4o-mini") + agent_executor = create_react_agent(llm, tools, state_modifier=system_message) + + example_query = "Fetch the top two posts. What are their titles?" + + events = agent_executor.stream( + {"messages": [("user", example_query)]}, + stream_mode="values", + ) + for event in events: + event["messages"][-1].pretty_print() + + .. code-block:: none + + ================================[1m Human Message [0m================================= + + Fetch the top two posts. What are their titles? + ==================================[1m Ai Message [0m================================== + Tool Calls: + requests_get (call_RV2SOyzCnV5h2sm4WPgG8fND) + Call ID: call_RV2SOyzCnV5h2sm4WPgG8fND + Args: + url: https://jsonplaceholder.typicode.com/posts?_limit=2 + =================================[1m Tool Message [0m================================= + Name: requests_get + + [ + { + "userId": 1, + "id": 1, + "title": "sunt aut facere repellat provident occaecati excepturi optio reprehenderit", + "body": "quia et suscipit..." + }, + { + "userId": 1, + "id": 2, + "title": "qui est esse", + "body": "est rerum tempore vitae..." + } + ] + ==================================[1m Ai Message [0m================================== + + The titles of the top two posts are: + 1. "sunt aut facere repellat provident occaecati excepturi optio reprehenderit" + 2. "qui est esse" + """ # noqa: E501 requests_wrapper: TextRequestsWrapper """The requests wrapper.""" diff --git a/libs/community/langchain_community/agent_toolkits/slack/toolkit.py b/libs/community/langchain_community/agent_toolkits/slack/toolkit.py index 6e082fb28f5e4..874ef080ff92b 100644 --- a/libs/community/langchain_community/agent_toolkits/slack/toolkit.py +++ b/libs/community/langchain_community/agent_toolkits/slack/toolkit.py @@ -21,7 +21,73 @@ class SlackToolkit(BaseToolkit): Parameters: client: The Slack client. - """ + + Setup: + Install ``slack_sdk`` and set environment variable ``SLACK_USER_TOKEN``. + + .. code-block:: bash + + pip install -U slack_sdk + export SLACK_USER_TOKEN="your-user-token" + + Key init args: + client: slack_sdk.WebClient + The Slack client. + + Instantiate: + .. code-block:: python + + from langchain_community.agent_toolkits import SlackToolkit + + toolkit = SlackToolkit() + + Tools: + .. code-block:: python + + tools = toolkit.get_tools() + tools + + .. code-block:: none + + [SlackGetChannel(client=), + SlackGetMessage(client=), + SlackScheduleMessage(client=), + SlackSendMessage(client=)] + + Use within an agent: + .. code-block:: python + + from langchain_openai import ChatOpenAI + from langgraph.prebuilt import create_react_agent + + llm = ChatOpenAI(model="gpt-4o-mini") + agent_executor = create_react_agent(llm, tools) + + example_query = "When was the #general channel created?" + + events = agent_executor.stream( + {"messages": [("user", example_query)]}, + stream_mode="values", + ) + for event in events: + message = event["messages"][-1] + if message.type != "tool": # mask sensitive information + event["messages"][-1].pretty_print() + + .. code-block:: none + + ================================[1m Human Message [0m================================= + + When was the #general channel created? + ==================================[1m Ai Message [0m================================== + Tool Calls: + get_channelid_name_dict (call_NXDkALjoOx97uF1v0CoZTqtJ) + Call ID: call_NXDkALjoOx97uF1v0CoZTqtJ + Args: + ==================================[1m Ai Message [0m================================== + + The #general channel was created on timestamp 1671043305. + """ # noqa: E501 client: WebClient = Field(default_factory=login) diff --git a/libs/community/langchain_community/retrievers/arxiv.py b/libs/community/langchain_community/retrievers/arxiv.py index 633d22b1a240b..3d59e949d593e 100644 --- a/libs/community/langchain_community/retrievers/arxiv.py +++ b/libs/community/langchain_community/retrievers/arxiv.py @@ -10,9 +10,76 @@ class ArxivRetriever(BaseRetriever, ArxivAPIWrapper): """`Arxiv` retriever. - It wraps load() to get_relevant_documents(). - It uses all ArxivAPIWrapper arguments without any change. - """ + Setup: + Install ``arxiv``: + + .. code-block:: bash + + pip install -U arxiv + + Key init args: + load_max_docs: int + maximum number of documents to load + get_ful_documents: bool + whether to return full document text or snippets + + Instantiate: + .. code-block:: python + + from langchain_community.retrievers import ArxivRetriever + + retriever = ArxivRetriever( + load_max_docs=2, + get_ful_documents=True, + ) + + Usage: + .. code-block:: python + + docs = retriever.invoke("What is the ImageBind model?") + docs[0].metadata + + .. code-block:: none + + {'Entry ID': 'http://arxiv.org/abs/2305.05665v2', + 'Published': datetime.date(2023, 5, 31), + 'Title': 'ImageBind: One Embedding Space To Bind Them All', + 'Authors': 'Rohit Girdhar, Alaaeldin El-Nouby, Zhuang Liu, Mannat Singh, Kalyan Vasudev Alwala, Armand Joulin, Ishan Misra'} + + Use within a chain: + .. code-block:: python + + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnablePassthrough + from langchain_openai import ChatOpenAI + + prompt = ChatPromptTemplate.from_template( + \"\"\"Answer the question based only on the context provided. + + Context: {context} + + Question: {question}\"\"\" + ) + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125") + + def format_docs(docs): + return "\\n\\n".join(doc.page_content for doc in docs) + + chain = ( + {"context": retriever | format_docs, "question": RunnablePassthrough()} + | prompt + | llm + | StrOutputParser() + ) + + chain.invoke("What is the ImageBind model?") + + .. code-block:: none + + 'The ImageBind model is an approach to learn a joint embedding across six different modalities - images, text, audio, depth, thermal, and IMU data...' + """ # noqa: E501 get_full_documents: bool = False diff --git a/libs/community/langchain_community/retrievers/azure_ai_search.py b/libs/community/langchain_community/retrievers/azure_ai_search.py index a10df0f568ec8..e2ab7f74f90b4 100644 --- a/libs/community/langchain_community/retrievers/azure_ai_search.py +++ b/libs/community/langchain_community/retrievers/azure_ai_search.py @@ -19,7 +19,71 @@ class AzureAISearchRetriever(BaseRetriever): - """`Azure AI Search` service retriever.""" + """`Azure AI Search` service retriever. + + Setup: + See here for more detail: https://python.langchain.com/v0.2/docs/integrations/retrievers/azure_ai_search/ + + We will need to install the below dependencies and set the required + environment variables: + + .. code-block:: bash + + pip install -U langchain-community azure-identity azure-search-documents + export AZURE_AI_SEARCH_SERVICE_NAME="" + export AZURE_AI_SEARCH_INDEX_NAME="" + export AZURE_AI_SEARCH_API_KEY="" + + Key init args: + content_key: str + top_k: int + index_name: str + + Instantiate: + .. code-block:: python + + from langchain_community.retrievers import AzureAISearchRetriever + + retriever = AzureAISearchRetriever( + content_key="content", top_k=1, index_name="langchain-vector-demo" + ) + + Usage: + .. code-block:: python + + retriever.invoke("here is my unstructured query string") + + Use within a chain: + .. code-block:: python + + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnablePassthrough + from langchain_openai import AzureChatOpenAI + + prompt = ChatPromptTemplate.from_template( + \"\"\"Answer the question based only on the context provided. + + Context: {context} + + Question: {question}\"\"\" + ) + + llm = AzureChatOpenAI(azure_deployment="gpt-35-turbo") + + def format_docs(docs): + return "\\n\\n".join(doc.page_content for doc in docs) + + chain = ( + {"context": retriever | format_docs, "question": RunnablePassthrough()} + | prompt + | llm + | StrOutputParser() + ) + + chain.invoke("...") + + """ # noqa: E501 service_name: str = "" """Name of Azure AI Search service""" diff --git a/libs/community/langchain_community/retrievers/bedrock.py b/libs/community/langchain_community/retrievers/bedrock.py index 85718c79c93a2..a2a05f77496e5 100644 --- a/libs/community/langchain_community/retrievers/bedrock.py +++ b/libs/community/langchain_community/retrievers/bedrock.py @@ -19,11 +19,18 @@ class RetrievalConfig(BaseModel, extra="allow"): # type: ignore[call-arg] class AmazonKnowledgeBasesRetriever(BaseRetriever): - """`Amazon Bedrock Knowledge Bases` retrieval. + """Amazon Bedrock Knowledge Bases retriever. See https://aws.amazon.com/bedrock/knowledge-bases for more info. - Args: + Setup: + Install ``langchain-aws``: + + .. code-block:: bash + + pip install -U langchain-aws + + Key init args: knowledge_base_id: Knowledge Base ID. region_name: The aws region e.g., `us-west-2`. Fallback to AWS_DEFAULT_REGION env variable or region specified in @@ -35,7 +42,7 @@ class AmazonKnowledgeBasesRetriever(BaseRetriever): client: boto3 client for bedrock agent runtime. retrieval_config: Configuration for retrieval. - Example: + Instantiate: .. code-block:: python from langchain_community.retrievers import AmazonKnowledgeBasesRetriever @@ -48,7 +55,48 @@ class AmazonKnowledgeBasesRetriever(BaseRetriever): } }, ) - """ + + Usage: + .. code-block:: python + + query = "..." + + retriever.invoke(query) + + Use within a chain: + .. code-block:: python + + from langchain_aws import ChatBedrockConverse + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnablePassthrough + from langchain_openai import ChatOpenAI + + prompt = ChatPromptTemplate.from_template( + \"\"\"Answer the question based only on the context provided. + + Context: {context} + + Question: {question}\"\"\" + ) + + llm = ChatBedrockConverse( + model_id="anthropic.claude-3-5-sonnet-20240620-v1:0" + ) + + def format_docs(docs): + return "\\n\\n".join(doc.page_content for doc in docs) + + chain = ( + {"context": retriever | format_docs, "question": RunnablePassthrough()} + | prompt + | llm + | StrOutputParser() + ) + + chain.invoke("...") + + """ # noqa: E501 knowledge_base_id: str region_name: Optional[str] = None diff --git a/libs/community/langchain_community/retrievers/milvus.py b/libs/community/langchain_community/retrievers/milvus.py index 3c12e7b150d6e..7e8cd3fdb3ab0 100644 --- a/libs/community/langchain_community/retrievers/milvus.py +++ b/libs/community/langchain_community/retrievers/milvus.py @@ -15,7 +15,73 @@ class MilvusRetriever(BaseRetriever): - """`Milvus API` retriever.""" + """Milvus API retriever. + + See detailed instructions here: https://python.langchain.com/v0.2/docs/integrations/retrievers/milvus_hybrid_search/ + + Setup: + Install ``langchain-milvus`` and other dependencies: + + .. code-block:: bash + + pip install -U pymilvus[model] langchain-milvus + + Key init args: + collection: Milvus Collection + + Instantiate: + .. code-block:: python + + retriever = MilvusCollectionHybridSearchRetriever(collection=collection) + + Usage: + .. code-block:: python + + query = "What are the story about ventures?" + + retriever.invoke(query) + + .. code-block:: none + + [Document(page_content="In 'The Lost Expedition' by Caspian Grey...", metadata={'doc_id': '449281835035545843'}), + Document(page_content="In 'The Phantom Pilgrim' by Rowan Welles...", metadata={'doc_id': '449281835035545845'}), + Document(page_content="In 'The Dreamwalker's Journey' by Lyra Snow..", metadata={'doc_id': '449281835035545846'})] + + Use within a chain: + .. code-block:: python + + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnablePassthrough + from langchain_openai import ChatOpenAI + + prompt = ChatPromptTemplate.from_template( + \"\"\"Answer the question based only on the context provided. + + Context: {context} + + Question: {question}\"\"\" + ) + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125") + + def format_docs(docs): + return "\\n\\n".join(doc.page_content for doc in docs) + + chain = ( + {"context": retriever | format_docs, "question": RunnablePassthrough()} + | prompt + | llm + | StrOutputParser() + ) + + chain.invoke("What novels has Lila written and what are their contents?") + + .. code-block:: none + + "Lila Rose has written 'The Memory Thief,' which follows a charismatic thief..." + + """ # noqa: E501 embedding_function: Embeddings collection_name: str = "LangChainCollection" diff --git a/libs/community/langchain_community/retrievers/wikipedia.py b/libs/community/langchain_community/retrievers/wikipedia.py index cd7e38180cea3..570d7a9aa75e8 100644 --- a/libs/community/langchain_community/retrievers/wikipedia.py +++ b/libs/community/langchain_community/retrievers/wikipedia.py @@ -10,9 +10,66 @@ class WikipediaRetriever(BaseRetriever, WikipediaAPIWrapper): """`Wikipedia API` retriever. - It wraps load() to get_relevant_documents(). - It uses all WikipediaAPIWrapper arguments without any change. - """ + Setup: + Install the ``wikipedia`` dependency: + + .. code-block:: bash + + pip install -U wikipedia + + Instantiate: + .. code-block:: python + + from langchain_community.retrievers import WikipediaRetriever + + retriever = WikipediaRetriever() + + Usage: + .. code-block:: python + + docs = retriever.invoke("TOKYO GHOUL") + print(docs[0].page_content[:100]) + + .. code-block:: none + + Tokyo Ghoul (Japanese: 東京喰種(トーキョーグール), Hepburn: Tōkyō Gūru) is a Japanese dark fantasy + + Use within a chain: + .. code-block:: python + + from langchain_core.output_parsers import StrOutputParser + from langchain_core.prompts import ChatPromptTemplate + from langchain_core.runnables import RunnablePassthrough + from langchain_openai import ChatOpenAI + + prompt = ChatPromptTemplate.from_template( + \"\"\"Answer the question based only on the context provided. + + Context: {context} + + Question: {question}\"\"\" + ) + + llm = ChatOpenAI(model="gpt-3.5-turbo-0125") + + def format_docs(docs): + return "\\n\\n".join(doc.page_content for doc in docs) + + chain = ( + {"context": retriever | format_docs, "question": RunnablePassthrough()} + | prompt + | llm + | StrOutputParser() + ) + + chain.invoke( + "Who is the main character in `Tokyo Ghoul` and does he transform into a ghoul?" + ) + + .. code-block:: none + + 'The main character in Tokyo Ghoul is Ken Kaneki, who transforms into a ghoul after receiving an organ transplant from a ghoul named Rize.' + """ # noqa: E501 def _get_relevant_documents( self, query: str, *, run_manager: CallbackManagerForRetrieverRun