diff --git a/.github/workflows/validate_new_notebooks.yml b/.github/workflows/validate_new_notebooks.yml index cce0fc05b67f..bb6bffebc062 100644 --- a/.github/workflows/validate_new_notebooks.yml +++ b/.github/workflows/validate_new_notebooks.yml @@ -36,23 +36,23 @@ jobs: - name: Get changed files id: changed-files uses: tj-actions/changed-files@v44 - - name: Check for new or modified notebooks + - name: Check for new or modified notebooks in docs/core_docs id: check_notebooks run: | - notebooks=$(echo '${{ steps.changed-files.outputs.all_changed_files }}' | tr ' ' '\n' | grep '\.ipynb$' || true) + notebooks=$(echo '${{ steps.changed-files.outputs.all_changed_files }}' | tr ' ' '\n' | grep '^docs/core_docs/.*\.ipynb$' || true) echo "Affected notebooks: $notebooks" echo "has_affected_notebooks=$([ -n "$notebooks" ] && echo 'true' || echo 'false')" >> $GITHUB_OUTPUT - name: Build examples if: steps.check_notebooks.outputs.has_affected_notebooks == 'true' run: yarn turbo:command build --filter=examples - - name: Validate affected notebooks + - name: Validate affected notebooks in docs/core_docs if: steps.check_notebooks.outputs.has_affected_notebooks == 'true' run: | - notebooks=$(echo '${{ steps.changed-files.outputs.all_changed_files }}' | tr ' ' '\n' | grep '\.ipynb$' || true) + notebooks=$(echo '${{ steps.changed-files.outputs.all_changed_files }}' | tr ' ' '\n' | grep '^docs/core_docs/.*\.ipynb$' || true) if [ -n "$notebooks" ]; then for notebook in $notebooks; do yarn notebook:validate "$notebook" done else - echo "No notebooks to validate." + echo "No notebooks in docs/core_docs to validate." fi \ No newline at end of file diff --git a/docs/core_docs/.gitignore b/docs/core_docs/.gitignore index aa23525d4033..df8abd01615f 100644 --- a/docs/core_docs/.gitignore +++ b/docs/core_docs/.gitignore @@ -34,26 +34,6 @@ yarn-error.log* /.quarto/ # AUTO_GENERATED_DOCS -docs/tutorials/rag.md -docs/tutorials/rag.mdx -docs/tutorials/query_analysis.md -docs/tutorials/query_analysis.mdx -docs/tutorials/qa_chat_history.md -docs/tutorials/qa_chat_history.mdx -docs/tutorials/pdf_qa.md -docs/tutorials/pdf_qa.mdx -docs/tutorials/local_rag.md -docs/tutorials/local_rag.mdx -docs/tutorials/llm_chain.md -docs/tutorials/llm_chain.mdx -docs/tutorials/graph.md -docs/tutorials/graph.mdx -docs/tutorials/extraction.md -docs/tutorials/extraction.mdx -docs/tutorials/classification.md -docs/tutorials/classification.mdx -docs/tutorials/chatbot.md -docs/tutorials/chatbot.mdx docs/how_to/trim_messages.md docs/how_to/trim_messages.mdx docs/how_to/tools_prompting.md @@ -208,5 +188,27 @@ docs/how_to/assign.md docs/how_to/assign.mdx docs/how_to/agent_executor.md docs/how_to/agent_executor.mdx +docs/tutorials/rag.md +docs/tutorials/rag.mdx +docs/tutorials/query_analysis.md +docs/tutorials/query_analysis.mdx +docs/tutorials/qa_chat_history.md +docs/tutorials/qa_chat_history.mdx +docs/tutorials/pdf_qa.md +docs/tutorials/pdf_qa.mdx +docs/tutorials/local_rag.md +docs/tutorials/local_rag.mdx +docs/tutorials/llm_chain.md +docs/tutorials/llm_chain.mdx +docs/tutorials/graph.md +docs/tutorials/graph.mdx +docs/tutorials/extraction.md +docs/tutorials/extraction.mdx +docs/tutorials/classification.md +docs/tutorials/classification.mdx +docs/tutorials/chatbot.md +docs/tutorials/chatbot.mdx docs/integrations/llms/mistral.md -docs/integrations/llms/mistral.mdx \ No newline at end of file +docs/integrations/llms/mistral.mdx +docs/integrations/chat/mistral.md +docs/integrations/chat/mistral.mdx \ No newline at end of file diff --git a/docs/core_docs/docs/integrations/chat/azure.ipynb b/docs/core_docs/docs/integrations/chat/azure.ipynb new file mode 100644 index 000000000000..dbae00d112e2 --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/azure.ipynb @@ -0,0 +1,399 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Azure OpenAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# AzureChatOpenAI\n", + "\n", + "This will help you getting started with AzureChatOpenAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all AzureChatOpenAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/v0.2/docs/integrations/chat/azure) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [AzureChatOpenAI](https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html) | [@langchain/openai](https://api.js.langchain.com/modules/langchain_openai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/openai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/openai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) is a cloud service to help you quickly develop generative AI experiences with a diverse set of prebuilt and curated models from OpenAI, Meta and beyond.\n", + "\n", + "LangChain.js supports integration with [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) using the new Azure integration in the [OpenAI SDK](https://github.com/openai/openai-node).\n", + "\n", + "You can learn more about Azure OpenAI and its difference with the OpenAI API on [this page](https://learn.microsoft.com/azure/ai-services/openai/overview).\n", + "\n", + "### Credentials\n", + "\n", + "If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started.\n", + "\n", + "You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal).\n", + "\n", + "Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the \"Keys and Endpoint\" section of your instance. Then, if using Node.js, you can set your credentials as environment variables:\n", + "\n", + "```bash\n", + "AZURE_OPENAI_API_INSTANCE_NAME=\n", + "AZURE_OPENAI_API_DEPLOYMENT_NAME=\n", + "AZURE_OPENAI_API_KEY=\n", + "AZURE_OPENAI_API_VERSION=\"2024-02-01\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain AzureChatOpenAI integration lives in the `@langchain/openai` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/openai\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureChatOpenAI } from \"@langchain/openai\" \n", + "\n", + "const llm = new AzureChatOpenAI({\n", + " model: \"gpt-4o\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY, // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME\n", + " azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME, // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION, // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9qrWKByvVrzWMxSn8joRZAklHoB32\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 39\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 39\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9qrWR7WiNjZ3leSG4Wd77cnKEVivv\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Using Azure Managed Identity\n", + "\n", + "If you're using Azure Managed Identity, you can configure the credentials like this:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "d7f47b2a", + "metadata": {}, + "outputs": [], + "source": [ + "import {\n", + " DefaultAzureCredential,\n", + " getBearerTokenProvider,\n", + "} from \"@azure/identity\";\n", + "import { AzureChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const credentials = new DefaultAzureCredential();\n", + "const azureADTokenProvider = getBearerTokenProvider(\n", + " credentials,\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ");\n", + "\n", + "const llmWithManagedIdentity = new AzureChatOpenAI({\n", + " azureADTokenProvider,\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + "});" + ] + }, + { + "cell_type": "markdown", + "id": "6a889856", + "metadata": {}, + "source": [ + "## Using a different domain\n", + "\n", + "If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable.\n", + "For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "ace7f876", + "metadata": {}, + "outputs": [], + "source": [ + "import { AzureChatOpenAI } from \"@langchain/openai\";\n", + "\n", + "const llmWithDifferentDomain = new AzureChatOpenAI({\n", + " temperature: 0.9,\n", + " azureOpenAIApiKey: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY\n", + " azureOpenAIApiDeploymentName: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME\n", + " azureOpenAIApiVersion: \"\", // In Node.js defaults to process.env.AZURE_OPENAI_API_VERSION\n", + " azureOpenAIBasePath:\n", + " \"https://westeurope.api.microsoft.com/openai/deployments\", // In Node.js defaults to process.env.AZURE_OPENAI_BASE_PATH\n", + "});\n" + ] + }, + { + "cell_type": "markdown", + "id": "0ac0310c", + "metadata": {}, + "source": [ + "## Migration from Azure OpenAI SDK\n", + "\n", + "If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps:\n", + "\n", + "1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "\n", + " @langchain/openai\n", + "\n", + "\n", + "```\n", + "\n", + "```bash\n", + "npm uninstall @langchain/azure-openai\n", + "```\n", + "\n", + " \n", + "2. Update your imports to use the new `AzureChatOpenAI` class from the `@langchain/openai` package:\n", + " ```typescript\n", + " import { AzureChatOpenAI } from \"@langchain/openai\";\n", + " ```\n", + "3. Update your code to use the new `AzureChatOpenAI` class and pass the required parameters:\n", + "\n", + " ```typescript\n", + " const model = new AzureChatOpenAI({\n", + " azureOpenAIApiKey: \"\",\n", + " azureOpenAIApiInstanceName: \"\",\n", + " azureOpenAIApiDeploymentName: \"\",\n", + " azureOpenAIApiVersion: \"\",\n", + " });\n", + " ```\n", + "\n", + " Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version.\n", + "\n", + " - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details.\n", + "\n", + " - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version.\n" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all AzureChatOpenAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_openai.AzureChatOpenAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/azure.mdx b/docs/core_docs/docs/integrations/chat/azure.mdx deleted file mode 100644 index 912bd1e72fdd..000000000000 --- a/docs/core_docs/docs/integrations/chat/azure.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -sidebar_label: Azure OpenAI -keywords: [AzureChatOpenAI] ---- - -import CodeBlock from "@theme/CodeBlock"; - -# Azure OpenAI - -[Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) is a cloud service to help you quickly develop generative AI experiences with a diverse set of prebuilt and curated models from OpenAI, Meta and beyond. - -LangChain.js supports integration with [Azure OpenAI](https://azure.microsoft.com/products/ai-services/openai-service/) using the new Azure integration in the [OpenAI SDK](https://github.com/openai/openai-node). - -You can learn more about Azure OpenAI and its difference with the OpenAI API on [this page](https://learn.microsoft.com/azure/ai-services/openai/overview). If you don't have an Azure account, you can [create a free account](https://azure.microsoft.com/free/) to get started. - -:::info - -Previously, LangChain.js supported integration with Azure OpenAI using the dedicated [Azure OpenAI SDK](https://github.com/Azure/azure-sdk-for-js/tree/main/sdk/openai/openai). This SDK is now deprecated in favor of the new Azure integration in the OpenAI SDK, which allows to access the latest OpenAI models and features the same day they are released, and allows seemless transition between the OpenAI API and Azure OpenAI. - -If you are using Azure OpenAI with the deprecated SDK, see the [migration guide](#migration-from-azure-openai-sdk) to update to the new API. - -::: - -## Setup - -You'll first need to install the [`@langchain/openai`](https://www.npmjs.com/package/@langchain/openai) package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install -S @langchain/openai -``` - -You'll also need to have an Azure OpenAI instance deployed. You can deploy a version on Azure Portal following [this guide](https://learn.microsoft.com/azure/ai-services/openai/how-to/create-resource?pivots=web-portal). - -Once you have your instance running, make sure you have the name of your instance and key. You can find the key in the Azure Portal, under the "Keys and Endpoint" section of your instance. - -If you're using Node.js, you can define the following environment variables to use the service: - -```bash -AZURE_OPENAI_API_INSTANCE_NAME= -AZURE_OPENAI_API_DEPLOYMENT_NAME= -AZURE_OPENAI_API_KEY= -AZURE_OPENAI_API_VERSION="2024-02-01" -``` - -Alternatively, you can pass the values directly to the `AzureOpenAI` constructor: - -import AzureOpenAI from "@examples/models/chat/integration_azure_openai.ts"; - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -{AzureOpenAI} - -:::info - -You can find the list of supported API versions in the [Azure OpenAI documentation](https://learn.microsoft.com/azure/ai-services/openai/reference). - -::: - -### Using Azure Managed Identity - -If you're using Azure Managed Identity, you can configure the credentials like this: - -import AzureOpenAIManagedIdentity from "@examples/models/chat/integration_azure_openai_managed_identity.ts"; - -{AzureOpenAIManagedIdentity} - -### Using a different domain - -If your instance is hosted under a domain other than the default `openai.azure.com`, you'll need to use the alternate `AZURE_OPENAI_BASE_PATH` environment variable. -For example, here's how you would connect to the domain `https://westeurope.api.microsoft.com/openai/deployments/{DEPLOYMENT_NAME}`: - -import AzureOpenAIBasePath from "@examples/models/chat/integration_azure_openai_base_path.ts"; - -{AzureOpenAIBasePath} - -## Usage example - -import Example from "@examples/models/chat/integration_azure_chat_openai.ts"; - -{Example} - -## Migration from Azure OpenAI SDK - -If you are using the deprecated Azure OpenAI SDK with the `@langchain/azure-openai` package, you can update your code to use the new Azure integration following these steps: - -1. Install the new `@langchain/openai` package and remove the previous `@langchain/azure-openai` package: - ```bash npm2yarn - npm install @langchain/openai - npm uninstall @langchain/azure-openai - ``` -2. Update your imports to use the new `AzureChatOpenAI` class from the `@langchain/openai` package: - ```typescript - import { AzureChatOpenAI } from "@langchain/openai"; - ``` -3. Update your code to use the new `AzureChatOpenAI` class and pass the required parameters: - - ```typescript - const model = new AzureChatOpenAI({ - azureOpenAIApiKey: "", - azureOpenAIApiInstanceName: "", - azureOpenAIApiDeploymentName: "", - azureOpenAIApiVersion: "", - }); - ``` - - Notice that the constructor now requires the `azureOpenAIApiInstanceName` parameter instead of the `azureOpenAIEndpoint` parameter, and adds the `azureOpenAIApiVersion` parameter to specify the API version. - - - If you were using Azure Managed Identity, you now need to use the `azureADTokenProvider` parameter to the constructor instead of `credentials`, see the [Azure Managed Identity](#using-azure-managed-identity) section for more details. - - - If you were using environment variables, you now have to set the `AZURE_OPENAI_API_INSTANCE_NAME` environment variable instead of `AZURE_OPENAI_API_ENDPOINT`, and add the `AZURE_OPENAI_API_VERSION` environment variable to specify the API version. diff --git a/docs/core_docs/docs/integrations/chat/fireworks.ipynb b/docs/core_docs/docs/integrations/chat/fireworks.ipynb new file mode 100644 index 000000000000..aaaf3e03e75a --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/fireworks.ipynb @@ -0,0 +1,284 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Fireworks\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatFireworks\n", + "\n", + "This will help you getting started with `ChatFireworks` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatFireworks` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/fireworks) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatFireworks](https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_chat_models_fireworks.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access `ChatFireworks` models you'll need to create a Fireworks account, get an API key, and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [the Fireworks website](https://fireworks.ai/login) to sign up to Fireworks and generate an API key. Once you've done this set the `FIREWORKS_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export FIREWORKS_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain `ChatFireworks` integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatFireworks } from \"@langchain/community/chat_models/fireworks\" \n", + "\n", + "const llm = new ChatFireworks({\n", + " model: \"accounts/fireworks/models/llama-v3p1-70b-instruct\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rBYHbb6QYRrKyr2tMhO9pH4AYXR4\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 39\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 39\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rBYM3KSIhHOuTXpBvA5oFyk8RSaN\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "Behind the scenes, Fireworks AI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", + "\n", + "- Certain properties are not supported by the Fireworks API, see [here](https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility).\n", + "- Generation using multiple prompts is not supported." + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatFireworks features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_fireworks.ChatFireworks.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/fireworks.mdx b/docs/core_docs/docs/integrations/chat/fireworks.mdx deleted file mode 100644 index 2db7cb178435..000000000000 --- a/docs/core_docs/docs/integrations/chat/fireworks.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -sidebar_label: Fireworks ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatFireworks - -You can use models provided by Fireworks AI as follows: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/community -``` - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -import Fireworks from "@examples/models/chat/integration_fireworks.ts"; - -{Fireworks} - -Behind the scenes, Fireworks AI uses the OpenAI SDK and OpenAI compatible API, with some caveats: - -- Certain properties are not supported by the Fireworks API, see [here](https://readme.fireworks.ai/docs/openai-compatibility#api-compatibility). -- Generation using multiple prompts is not supported. diff --git a/docs/core_docs/docs/integrations/chat/groq.mdx b/docs/core_docs/docs/integrations/chat/groq.mdx deleted file mode 100644 index 794a06b75a4e..000000000000 --- a/docs/core_docs/docs/integrations/chat/groq.mdx +++ /dev/null @@ -1,74 +0,0 @@ ---- -sidebar_label: Groq ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatGroq - -## Setup - -In order to use the Groq API you'll need an API key. You can sign up for a Groq account and create an API key [here](https://wow.groq.com/). - -You'll first need to install the [`@langchain/groq`](https://www.npmjs.com/package/@langchain/groq) package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/groq -``` - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -## Usage - -import ChatGroqExample from "@examples/models/chat/chat_groq.ts"; - -{ChatGroqExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/2ba59207-1383-4e42-b6a6-c1ddcfcd5710/r) -::: - -## Tool calling - -Groq chat models support calling multiple functions to get all required data to answer a question. -Here's an example: - -import GroqTools from "@examples/models/chat/integration_groq_tool_calls.ts"; - -{GroqTools} - -### `.withStructuredOutput({ ... })` - -:::info -The `.withStructuredOutput` method is in beta. It is actively being worked on, so the API may change. -::: - -You can also use the `.withStructuredOutput({ ... })` method to coerce `ChatGroq` into returning a structured output. - -The method allows for passing in either a Zod object, or a valid JSON schema (like what is returned from [`zodToJsonSchema`](https://www.npmjs.com/package/zod-to-json-schema)). - -Using the method is simple. Just define your LLM and call `.withStructuredOutput({ ... })` on it, passing the desired schema. - -Here is an example using a Zod schema and the `functionCalling` mode (default mode): - -import WSAZodExample from "@examples/models/chat/integration_groq_wsa_zod.ts"; - -{WSAZodExample} - -## Streaming - -Groq's API also supports streaming token responses. The example below demonstrates how to use this feature. - -import ChatStreamGroqExample from "@examples/models/chat/chat_stream_groq.ts"; - -{ChatStreamGroqExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/72832eb5-b9ae-4ce0-baa2-c2e95eca61a7/r) -::: diff --git a/docs/core_docs/docs/integrations/chat/mistral.ipynb b/docs/core_docs/docs/integrations/chat/mistral.ipynb new file mode 100644 index 000000000000..f3f61fec8bff --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/mistral.ipynb @@ -0,0 +1,663 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: MistralAI\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatMistralAI\n", + "\n", + "This will help you getting started with ChatMistralAI [chat models](/docs/concepts/#chat-models). For detailed documentation of all ChatMistralAI features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/mistralai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatMistralAI](https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html) | [@langchain/mistralai](https://api.js.langchain.com/modules/langchain_mistralai.html) | ❌ | ❌ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/mistralai?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/mistralai?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ❌ | ❌ | ❌ | ✅ | ✅ | ❌ | \n", + "\n", + "## Setup\n", + "\n", + "To access `ChatMistralAI` models you'll need to create a `ChatMistralAI` account, get an API key, and install the `@langchain/mistralai` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head [here](https://console.mistral.ai/) to sign up to Mistral AI and generate an API key. Once you've done this set the `MISTRAL_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export MISTRAL_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatMistralAI integration lives in the `@langchain/mistralai` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + "@langchain/mistralai\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatMistralAI } from \"@langchain/mistralai\" \n", + "\n", + "const llm = new ChatMistralAI({\n", + " model: \"mistral-small\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation\n", + "\n", + "When sending chat messages to mistral, there are a few requirements to follow:\n", + "\n", + "- The first message can _*not*_ be an assistant (ai) message.\n", + "- Messages _*must*_ alternate between user and assistant (ai) messages.\n", + "- Messages can _*not*_ end with an assistant (ai) or system message." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Sure, I'd be happy to help you translate that sentence into French! The English sentence \\\"I love programming\\\" translates to \\\"J'aime programmer\\\" in French. Let me know if you have any other questions or need further assistance!\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 52,\n", + " \"promptTokens\": 32,\n", + " \"totalTokens\": 84\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 32,\n", + " \"output_tokens\": 52,\n", + " \"total_tokens\": 84\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Sure, I'd be happy to help you translate that sentence into French! The English sentence \"I love programming\" translates to \"J'aime programmer\" in French. Let me know if you have any other questions or need further assistance!\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"content\": \"Ich liebe Programmierung. (German translation)\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 12,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 38\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 12,\n", + " \"total_tokens\": 38\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tool calling\n", + "\n", + "Mistral's API now supports tool calling and JSON mode!\n", + "The examples below demonstrates how to use them, along with how to use the `withStructuredOutput` method to easily compose structured output LLM calls." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "98d9034c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'calculator',\n", + " args: { operation: 'add', number1: 2, number2: 2 },\n", + " type: 'tool_call',\n", + " id: 'Tn8X3UCSP'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { ChatMistralAI } from \"@langchain/mistralai\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { z } from \"zod\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "\n", + "const calculatorSchema = z.object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute.\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + "});\n", + "\n", + "const calculatorTool = tool((input) => {\n", + " return JSON.stringify(input);\n", + "}, {\n", + " name: \"calculator\",\n", + " description: \"A simple calculator tool\",\n", + " schema: calculatorSchema,\n", + "});\n", + "\n", + "// Bind the tool to the model\n", + "const modelWithTool = new ChatMistralAI({\n", + " model: \"mistral-large-latest\",\n", + "}).bind({\n", + " tools: [calculatorTool],\n", + "});\n", + "\n", + "\n", + "const calcToolPrompt = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant who always needs to use a calculator.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "// Chain your prompt, model, and output parser together\n", + "const chainWithCalcTool = calcToolPrompt.pipe(modelWithTool);\n", + "\n", + "const calcToolRes = await chainWithCalcTool.invoke({\n", + " input: \"What is 2 + 2?\",\n", + "});\n", + "console.log(calcToolRes.tool_calls);" + ] + }, + { + "cell_type": "markdown", + "id": "e6ff9f76", + "metadata": {}, + "source": [ + "### `.withStructuredOutput({ ... })`\n", + "\n", + "Using the `.withStructuredOutput` method, you can easily make the LLM return structured output, given only a Zod or JSON schema:\n", + "\n", + "```{=mdx}\n", + "\n", + ":::note\n", + "The Mistral tool calling API requires descriptions for each tool field. If descriptions are not supplied, the API will error.\n", + ":::\n", + "\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "a8638d82", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ operation: 'add', number1: 2, number2: 2 }\n" + ] + } + ], + "source": [ + "import { ChatMistralAI } from \"@langchain/mistralai\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { z } from \"zod\";\n", + "\n", + "const calculatorSchemaForWSO = z\n", + " .object({\n", + " operation: z\n", + " .enum([\"add\", \"subtract\", \"multiply\", \"divide\"])\n", + " .describe(\"The type of operation to execute.\"),\n", + " number1: z.number().describe(\"The first number to operate on.\"),\n", + " number2: z.number().describe(\"The second number to operate on.\"),\n", + " })\n", + " .describe(\"A simple calculator tool\");\n", + "\n", + "const llmForWSO = new ChatMistralAI({\n", + " model: \"mistral-large-latest\",\n", + "})\n", + "\n", + "// Pass the schema and tool name to the withStructuredOutput method\n", + "const modelWithStructuredOutput = llmForWSO.withStructuredOutput(calculatorSchemaForWSO, {\n", + " name: \"calculator\",\n", + "});\n", + "\n", + "const promptForWSO = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant who always needs to use a calculator.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "// Chain your prompt and model together\n", + "const chainWSO = promptForWSO.pipe(modelWithStructuredOutput);\n", + "\n", + "const responseWSO = await chainWSO.invoke({\n", + " input: \"What is 2 + 2?\",\n", + "});\n", + "console.log(responseWSO);" + ] + }, + { + "cell_type": "markdown", + "id": "38d8a048", + "metadata": {}, + "source": [ + "You can supply a \"name\" field to give the LLM additional context around what you are trying to generate. You can also pass 'includeRaw' to get the raw message back from the model too." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "9786b41a", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{\n", + " raw: AIMessage {\n", + " lc_serializable: true,\n", + " lc_kwargs: {\n", + " content: '',\n", + " tool_calls: [\n", + " {\n", + " name: 'calculator',\n", + " args: { operation: 'add', number1: 2, number2: 2 },\n", + " type: 'tool_call',\n", + " id: 'w48T6Nc3d'\n", + " }\n", + " ],\n", + " invalid_tool_calls: [],\n", + " additional_kwargs: {\n", + " tool_calls: [\n", + " {\n", + " id: 'w48T6Nc3d',\n", + " function: {\n", + " name: 'calculator',\n", + " arguments: '{\"operation\": \"add\", \"number1\": 2, \"number2\": 2}'\n", + " },\n", + " type: 'function'\n", + " }\n", + " ]\n", + " },\n", + " usage_metadata: { input_tokens: 205, output_tokens: 34, total_tokens: 239 },\n", + " response_metadata: {}\n", + " },\n", + " lc_namespace: [ 'langchain_core', 'messages' ],\n", + " content: '',\n", + " name: undefined,\n", + " additional_kwargs: {\n", + " tool_calls: [\n", + " {\n", + " id: 'w48T6Nc3d',\n", + " function: {\n", + " name: 'calculator',\n", + " arguments: '{\"operation\": \"add\", \"number1\": 2, \"number2\": 2}'\n", + " },\n", + " type: 'function'\n", + " }\n", + " ]\n", + " },\n", + " response_metadata: {\n", + " tokenUsage: { completionTokens: 34, promptTokens: 205, totalTokens: 239 },\n", + " finish_reason: 'tool_calls'\n", + " },\n", + " id: undefined,\n", + " tool_calls: [\n", + " {\n", + " name: 'calculator',\n", + " args: { operation: 'add', number1: 2, number2: 2 },\n", + " type: 'tool_call',\n", + " id: 'w48T6Nc3d'\n", + " }\n", + " ],\n", + " invalid_tool_calls: [],\n", + " usage_metadata: { input_tokens: 205, output_tokens: 34, total_tokens: 239 }\n", + " },\n", + " parsed: { operation: 'add', number1: 2, number2: 2 }\n", + "}\n" + ] + } + ], + "source": [ + "const includeRawModel = llmForWSO.withStructuredOutput(calculatorSchemaForWSO, {\n", + " name: \"calculator\",\n", + " includeRaw: true,\n", + "});\n", + "const includeRawChain = promptForWSO.pipe(includeRawModel);\n", + "\n", + "const includeRawResponse = await includeRawChain.invoke({\n", + " input: \"What is 2 + 2?\",\n", + "});\n", + "console.dir(includeRawResponse, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "6b7b374f", + "metadata": {}, + "source": [ + "### Using JSON schema:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "9f1dc9bd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ operation: 'add', number1: 2, number2: 2 }\n" + ] + } + ], + "source": [ + "import { ChatMistralAI } from \"@langchain/mistralai\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const calculatorJsonSchema = {\n", + " type: \"object\",\n", + " properties: {\n", + " operation: {\n", + " type: \"string\",\n", + " enum: [\"add\", \"subtract\", \"multiply\", \"divide\"],\n", + " description: \"The type of operation to execute.\",\n", + " },\n", + " number1: { type: \"number\", description: \"The first number to operate on.\" },\n", + " number2: {\n", + " type: \"number\",\n", + " description: \"The second number to operate on.\",\n", + " },\n", + " },\n", + " required: [\"operation\", \"number1\", \"number2\"],\n", + " description: \"A simple calculator tool\",\n", + "};\n", + "\n", + "const llmForJsonSchema = new ChatMistralAI({\n", + " model: \"mistral-large-latest\",\n", + "});\n", + "\n", + "// Pass the schema and tool name to the withStructuredOutput method\n", + "const modelWithJsonSchemaTool = llmForJsonSchema.withStructuredOutput(calculatorJsonSchema);\n", + "\n", + "const promptForJsonSchema = ChatPromptTemplate.fromMessages([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant who always needs to use a calculator.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + "]);\n", + "\n", + "// Chain your prompt and model together\n", + "const chainWithJsonSchema = promptForJsonSchema.pipe(modelWithJsonSchemaTool);\n", + "\n", + "const responseFromJsonSchema = await chainWithJsonSchema.invoke({\n", + " input: \"What is 2 + 2?\",\n", + "});\n", + "console.log(responseFromJsonSchema);\n" + ] + }, + { + "cell_type": "markdown", + "id": "3c8bc1d4", + "metadata": {}, + "source": [ + "### Tool calling agent\n", + "\n", + "The larger Mistral models not only support tool calling, but can also be used in the Tool Calling agent.\n", + "Here's an example:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "76bd0061", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "It's 28 °C in Paris.\n" + ] + } + ], + "source": [ + "import { z } from \"zod\";\n", + "import { ChatMistralAI } from \"@langchain/mistralai\";\n", + "import { tool } from \"@langchain/core/tools\";\n", + "import { AgentExecutor, createToolCallingAgent } from \"langchain/agents\";\n", + "\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "\n", + "const llmForAgent = new ChatMistralAI({\n", + " temperature: 0,\n", + " model: \"mistral-large-latest\",\n", + "});\n", + "\n", + "// Prompt template must have \"input\" and \"agent_scratchpad input variables\"\n", + "const agentPrompt = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a helpful assistant\"],\n", + " [\"placeholder\", \"{chat_history}\"],\n", + " [\"human\", \"{input}\"],\n", + " [\"placeholder\", \"{agent_scratchpad}\"],\n", + "]);\n", + "\n", + "// Mocked tool\n", + "const currentWeatherToolForAgent = tool(async () => \"28 °C\", {\n", + " name: \"get_current_weather\",\n", + " description: \"Get the current weather in a given location\",\n", + " schema: z.object({\n", + " location: z.string().describe(\"The city and state, e.g. San Francisco, CA\"),\n", + " }),\n", + "});\n", + "\n", + "const agent = createToolCallingAgent({\n", + " llm: llmForAgent,\n", + " tools: [currentWeatherToolForAgent],\n", + " prompt: agentPrompt,\n", + "});\n", + "\n", + "const agentExecutor = new AgentExecutor({\n", + " agent,\n", + " tools: [currentWeatherToolForAgent],\n", + "});\n", + "\n", + "const agentInput = \"What's the weather like in Paris?\";\n", + "const agentRes = await agentExecutor.invoke({ input: agentInput });\n", + "\n", + "console.log(agentRes.output);\n" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all ChatMistralAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_mistralai.ChatMistralAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/mistral.mdx b/docs/core_docs/docs/integrations/chat/mistral.mdx deleted file mode 100644 index 2566ee06c223..000000000000 --- a/docs/core_docs/docs/integrations/chat/mistral.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -sidebar_label: Mistral AI ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatMistralAI - -[Mistral AI](https://mistral.ai/) is a research organization and hosting platform for LLMs. -The LangChain implementation of Mistral's models uses their hosted generation API, making it easier to access their models without needing to run them locally. - -:::tip -Want to run Mistral's models locally? Check out our [Ollama integration](/docs/integrations/chat/ollama). -::: - -## Models - -Mistral's API offers access to two of their open source, and proprietary models. -See [this page](https://docs.mistral.ai/getting-started/models/) for an up to date list. - -## Setup - -In order to use the Mistral API you'll need an API key. You can sign up for a Mistral account and create an API key [here](https://console.mistral.ai/). - -You'll first need to install the [`@langchain/mistralai`](https://www.npmjs.com/package/@langchain/mistralai) package: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/mistralai -``` - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -## Usage - -When sending chat messages to mistral, there are a few requirements to follow: - -- The first message can _*not*_ be an assistant (ai) message. -- Messages _*must*_ alternate between user and assistant (ai) messages. -- Messages can _*not*_ end with an assistant (ai) or system message. - -import ChatMistralAIExample from "@examples/models/chat/chat_mistralai.ts"; - -{ChatMistralAIExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/d69d0db9-f29e-45aa-a40d-b53f6273d7d0/r) -::: - -### Streaming - -Mistral's API also supports streaming token responses. The example below demonstrates how to use this feature. - -import ChatStreamMistralAIExample from "@examples/models/chat/chat_stream_mistralai.ts"; - -{ChatStreamMistralAIExample} - -:::info -You can see a LangSmith trace of this example [here](https://smith.langchain.com/public/061d90f2-ac7e-44c5-8790-8b23299f9217/r) -::: - -### Tool calling - -Mistral's API now supports tool calling and JSON mode! -The examples below demonstrates how to use them, along with how to use the `withStructuredOutput` method to easily compose structured output LLM calls. - -import ToolCalling from "@examples/models/chat/chat_mistralai_tools.ts"; - -{ToolCalling} - -### `.withStructuredOutput({ ... })` - -:::info -The `.withStructuredOutput` method is in beta. It is actively being worked on, so the API may change. -::: - -Using the `.withStructuredOutput` method, you can easily make the LLM return structured output, given only a Zod or JSON schema: - -:::note -The Mistral tool calling API requires descriptions for each tool field. If descriptions are not supplied, the API will error. -::: - -import WSAExample from "@examples/models/chat/chat_mistralai_wsa.ts"; - -{WSAExample} - -### Using JSON schema: - -import WSAJSONExample from "@examples/models/chat/chat_mistralai_wsa_json.ts"; - -{WSAJSONExample} - -### Tool calling agent - -The larger Mistral models not only support tool calling, but can also be used in the Tool Calling agent. -Here's an example: - -import AgentsExample from "@examples/models/chat/chat_mistralai_agents.ts"; - -{AgentsExample} diff --git a/docs/core_docs/docs/integrations/chat/togetherai.ipynb b/docs/core_docs/docs/integrations/chat/togetherai.ipynb new file mode 100644 index 000000000000..8ed09f8d41c6 --- /dev/null +++ b/docs/core_docs/docs/integrations/chat/togetherai.ipynb @@ -0,0 +1,343 @@ +{ + "cells": [ + { + "cell_type": "raw", + "id": "afaf8039", + "metadata": { + "vscode": { + "languageId": "raw" + } + }, + "source": [ + "---\n", + "sidebar_label: Together\n", + "---" + ] + }, + { + "cell_type": "markdown", + "id": "e49f1e0d", + "metadata": {}, + "source": [ + "# ChatTogetherAI\n", + "\n", + "This will help you getting started with `ChatTogetherAI` [chat models](/docs/concepts/#chat-models). For detailed documentation of all `ChatTogetherAI` features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/togetherai) | Package downloads | Package latest |\n", + "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", + "| [ChatTogetherAI](https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html) | [@langchain/community](https://api.js.langchain.com/modules/langchain_community_chat_models_togetherai.html) | ❌ | ✅ | ✅ | ![NPM - Downloads](https://img.shields.io/npm/dm/@langchain/community?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/@langchain/community?style=flat-square&label=%20&) |\n", + "\n", + "### Model features\n", + "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", + "| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n", + "| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "To access `ChatTogetherAI` models you'll need to create a Together account, get an API key [here](https://api.together.xyz/), and install the `@langchain/community` integration package.\n", + "\n", + "### Credentials\n", + "\n", + "Head to [api.together.ai](https://api.together.ai/) to sign up to TogetherAI and generate an API key. Once you've done this set the `TOGETHER_AI_API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export TOGETHER_AI_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain ChatTogetherAI integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a38cde65-254d-4219-a441-068766c0d4b5", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and generate chat completions:" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "cb09c344-1836-4e0c-acf8-11d13ac1dbae", + "metadata": {}, + "outputs": [], + "source": [ + "import { ChatTogetherAI } from \"@langchain/community/chat_models/togetherai\"\n", + "\n", + "const llm = new ChatTogetherAI({\n", + " model: \"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n", + " temperature: 0,\n", + " maxTokens: undefined,\n", + " timeout: undefined,\n", + " maxRetries: 2,\n", + " // other params...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "id": "2b4f3e15", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "62e0dbc3", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rT9qEDPZ6iLCk6jt3XTzVDDH6pcI\",\n", + " \"content\": \"J'adore la programmation.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 8,\n", + " \"promptTokens\": 31,\n", + " \"totalTokens\": 39\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 31,\n", + " \"output_tokens\": 8,\n", + " \"total_tokens\": 39\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "const aiMsg = await llm.invoke([\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates English to French. Translate the user sentence.\",\n", + " ],\n", + " [\"human\", \"I love programming.\"],\n", + "])\n", + "aiMsg" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "d86145b3-bfef-46e8-b227-4dda5c9c2705", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "J'adore la programmation.\n" + ] + } + ], + "source": [ + "console.log(aiMsg.content)" + ] + }, + { + "cell_type": "markdown", + "id": "18e2bfc0-7e78-4528-a73f-499ac150dca8", + "metadata": {}, + "source": [ + "## Chaining\n", + "\n", + "We can [chain](/docs/how_to/sequence/) our model with a prompt template like so:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "e197d1d7-a070-4c96-9f8a-a0e86d046e0b", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "AIMessage {\n", + " \"id\": \"chatcmpl-9rT9wolZWfJ3xovORxnkdf1rcPbbY\",\n", + " \"content\": \"Ich liebe das Programmieren.\",\n", + " \"additional_kwargs\": {},\n", + " \"response_metadata\": {\n", + " \"tokenUsage\": {\n", + " \"completionTokens\": 6,\n", + " \"promptTokens\": 26,\n", + " \"totalTokens\": 32\n", + " },\n", + " \"finish_reason\": \"stop\"\n", + " },\n", + " \"tool_calls\": [],\n", + " \"invalid_tool_calls\": [],\n", + " \"usage_metadata\": {\n", + " \"input_tokens\": 26,\n", + " \"output_tokens\": 6,\n", + " \"total_tokens\": 32\n", + " }\n", + "}\n" + ] + } + ], + "source": [ + "import { ChatPromptTemplate } from \"@langchain/core/prompts\"\n", + "\n", + "const prompt = ChatPromptTemplate.fromMessages(\n", + " [\n", + " [\n", + " \"system\",\n", + " \"You are a helpful assistant that translates {input_language} to {output_language}.\",\n", + " ],\n", + " [\"human\", \"{input}\"],\n", + " ]\n", + ")\n", + "\n", + "const chain = prompt.pipe(llm);\n", + "await chain.invoke(\n", + " {\n", + " input_language: \"English\",\n", + " output_language: \"German\",\n", + " input: \"I love programming.\",\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "d1ee55bc-ffc8-4cfa-801c-993953a08cfd", + "metadata": {}, + "source": [ + "## Tool calling & JSON mode\n", + "\n", + "The TogetherAI chat supports JSON mode and calling tools.\n", + "\n", + "### Tool calling" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "8de584a8", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[\n", + " {\n", + " name: 'calculator',\n", + " args: { input: '2 + 3' },\n", + " type: 'tool_call',\n", + " id: 'call_nhtnmganqJPAG9I1cN8ULI9R'\n", + " }\n", + "]\n" + ] + } + ], + "source": [ + "import { ChatTogetherAI } from \"@langchain/community/chat_models/togetherai\";\n", + "import { ChatPromptTemplate } from \"@langchain/core/prompts\";\n", + "import { convertToOpenAITool } from \"@langchain/core/utils/function_calling\";\n", + "import { Calculator } from \"@langchain/community/tools/calculator\";\n", + "\n", + "// Use a pre-built tool\n", + "const calculatorTool = convertToOpenAITool(new Calculator());\n", + "\n", + "const modelWithCalculator = new ChatTogetherAI({\n", + " temperature: 0,\n", + " // This is the default env variable name it will look for if none is passed.\n", + " apiKey: process.env.TOGETHER_AI_API_KEY,\n", + " // Together JSON mode/tool calling only supports a select number of models\n", + " model: \"mistralai/Mixtral-8x7B-Instruct-v0.1\",\n", + "}).bind({\n", + " // Bind the tool to the model.\n", + " tools: [calculatorTool],\n", + " tool_choice: calculatorTool, // Specify what tool the model should use\n", + "});\n", + "\n", + "const promptForTools = ChatPromptTemplate.fromMessages([\n", + " [\"system\", \"You are a super not-so-smart mathmatician.\"],\n", + " [\"human\", \"Help me out, how can I add {math}?\"],\n", + "]);\n", + "\n", + "// Use LCEL to chain the prompt to the model.\n", + "const responseWithTool = await promptForTools.pipe(modelWithCalculator).invoke({\n", + " math: \"2 plus 3\",\n", + "});\n", + "\n", + "console.dir(responseWithTool.tool_calls, { depth: null });" + ] + }, + { + "cell_type": "markdown", + "id": "3a5bb5ca-c3ae-4a58-be67-2cd18574b9a3", + "metadata": {}, + "source": [ + "Behind the scenes, TogetherAI uses the OpenAI SDK and OpenAI compatible API, with some caveats:\n", + "\n", + "- Certain properties are not supported by the TogetherAI API, see [here](https://docs.together.ai/reference/chat-completions).\n", + "\n", + "## API reference\n", + "\n", + "For detailed documentation of all ChatTogetherAI features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_chat_models_togetherai.ChatTogetherAI.html" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/core_docs/docs/integrations/chat/togetherai.mdx b/docs/core_docs/docs/integrations/chat/togetherai.mdx deleted file mode 100644 index f938be05c8c4..000000000000 --- a/docs/core_docs/docs/integrations/chat/togetherai.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -sidebar_label: TogetherAI ---- - -import CodeBlock from "@theme/CodeBlock"; - -# ChatTogetherAI - -## Setup - -1. Create a TogetherAI account and get your API key [here](https://api.together.xyz/). -2. Export or set your API key inline. The ChatTogetherAI class defaults to `process.env.TOGETHER_AI_API_KEY`. - -```bash -export TOGETHER_AI_API_KEY=your-api-key -``` - -You can use models provided by TogetherAI as follows: - -import IntegrationInstallTooltip from "@mdx_components/integration_install_tooltip.mdx"; - - - -```bash npm2yarn -npm install @langchain/community -``` - -import UnifiedModelParamsTooltip from "@mdx_components/unified_model_params_tooltip.mdx"; - - - -import TogetherAI from "@examples/models/chat/integration_togetherai.ts"; - -{TogetherAI} - -## Tool calling & JSON mode - -The TogetherAI chat supports JSON mode and calling tools. - -### Tool calling - -import TogetherToolsExample from "@examples/models/chat/integration_togetherai_tools.ts"; - -{TogetherToolsExample} - -:::tip -See a LangSmith trace of the above example [here](https://smith.langchain.com/public/5082ea20-c2de-410f-80e2-dbdfbf4d8adb/r). -::: - -### JSON mode - -To use JSON mode you must include the string "JSON" inside the prompt. -Typical conventions include telling the model to use JSON, eg: `Respond to the user in JSON format`. - -import TogetherJSONModeExample from "@examples/models/chat/integration_togetherai_json.ts"; - -{TogetherJSONModeExample} - -:::tip -See a LangSmith trace of the above example [here](https://smith.langchain.com/public/3864aebb-5096-4b5f-b096-e54ddd1ec3d2/r). -::: - -Behind the scenes, TogetherAI uses the OpenAI SDK and OpenAI compatible API, with some caveats: - -- Certain properties are not supported by the TogetherAI API, see [here](https://docs.together.ai/reference/chat-completions). diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb new file mode 100644 index 000000000000..b12e3a8e5a00 --- /dev/null +++ b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.ipynb @@ -0,0 +1,304 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: CheerioWebBaseLoader\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cheerio\n", + "\n", + "This notebook provides a quick overview for getting started with [CheerioWebBaseLoader](/docs/integrations/document_loaders/). For detailed documentation of all CheerioWebBaseLoader features and configurations head to the [API reference](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_cheerio.CheerioWebBaseLoader.html).\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "This example goes over how to load data from webpages using Cheerio. One document will be created for each webpage.\n", + "\n", + "Cheerio is a fast and lightweight library that allows you to parse and traverse HTML documents using a jQuery-like syntax. You can use Cheerio to extract data from web pages, without having to render them in a browser.\n", + "\n", + "However, Cheerio does not simulate a web browser, so it cannot execute JavaScript code on the page. This means that it cannot extract data from dynamic web pages that require JavaScript to render. To do that, you can use the [`PlaywrightWebBaseLoader`](/docs/integrations/document_loaders/web_loaders/web_playwright) or [`PuppeteerWebBaseLoader`](/docs/integrations/document_loaders/web_loaders/web_puppeteer) instead.\n", + "\n", + "| Class | Package | Local | Serializable | PY support|\n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [CheerioWebBaseLoader](https://api.js.langchain.com/classes/langchain_community_document_loaders_web_cheerio.CheerioWebBaseLoader.html) | @langchain/community | ✅ | ✅ | ❌ | \n", + "### Loader features\n", + "| Source | Web Support | Node Support\n", + "| :---: | :---: | :---: | \n", + "| CheerioWebBaseLoader | ✅ | ✅ | \n", + "\n", + "## Setup\n", + "\n", + "- TODO: Update with relevant info.\n", + "\n", + "To access `CheerioWebBaseLoader` document loader you'll need to install the `@langchain/community` integration package, along with the `cheerio` peer dependency.\n", + "\n", + "### Credentials\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain CheerioWebBaseLoader integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community cheerio\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:\n", + "\n", + "- TODO: Update model instantiation with relevant params." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\"\n", + "\n", + "const loader = new CheerioWebBaseLoader(\"https://news.ycombinator.com/item?id=34817881\", {\n", + " // optional params: ...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Document {\n", + " pageContent: '\\n' +\n", + " ' \\n' +\n", + " ' Hacker News\\n' +\n", + " ' new | past | comments | ask | show | jobs | submit \\n' +\n", + " ' login\\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " '\\n' +\n", + " ' \\n' +\n", + " ' What Lights the Universe’s Standard Candles? (quantamagazine.org)\\n' +\n", + " ' 75 points by Amorymeltzer on Feb 17, 2023 | hide | past | favorite | 6 comments \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' delta_p_delta_x on Feb 17, 2023 \\n' +\n", + " ' | next [–] \\n' +\n", + " ' \\n' +\n", + " \" Astrophysical and cosmological simulations are often insightful. They're also very cross-disciplinary; besides the obvious astrophysics, there's networking and sysadmin, parallel computing and algorithm theory (so that the simulation programs are actually fast but still accurate), systems design, and even a bit of graphic design for the visualisations.Some of my favourite simulation projects:- IllustrisTNG: https://www.tng-project.org/- SWIFT: https://swift.dur.ac.uk/- CO5BOLD: https://www.astro.uu.se/~bf/co5bold_main.html (which produced these animations of a red-giant star: https://www.astro.uu.se/~bf/movie/AGBmovie.html)- AbacusSummit: https://abacussummit.readthedocs.io/en/latest/And I can add the simulations in the article, too.\\n\" +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' froeb on Feb 18, 2023 \\n' +\n", + " ' | parent | next [–] \\n' +\n", + " ' \\n' +\n", + " \" Supernova simulations are especially interesting too. I have heard them described as the only time in physics when all 4 of the fundamental forces are important. The explosion can be quite finicky too. If I remember right, you can't get supernova to explode properly in 1D simulations, only in higher dimensions. This was a mystery until the realization that turbulence is necessary for supernova to trigger--there is no turbulent flow in 1D.\\n\" +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' andrewflnr on Feb 17, 2023 \\n' +\n", + " ' | prev | next [–] \\n' +\n", + " ' \\n' +\n", + " \" Whoa. I didn't know the accretion theory of Ia supernovae was dead, much less that it had been since 2011.\\n\" +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' andreareina on Feb 17, 2023 \\n' +\n", + " ' | prev | next [–] \\n' +\n", + " ' \\n' +\n", + " ' This seems to be the paper https://academic.oup.com/mnras/article/517/4/5260/6779709\\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' andreareina on Feb 17, 2023 \\n' +\n", + " ' | prev [–] \\n' +\n", + " ' \\n' +\n", + " \" Wouldn't double detonation show up as variance in the brightness?\\n\" +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' yencabulator on Feb 18, 2023 \\n' +\n", + " ' | parent [–] \\n' +\n", + " ' \\n' +\n", + " ' Or widening of the peak. If one type Ia supernova goes 1,2,3,2,1, the sum of two could go 1+0=1\\n' +\n", + " ' 2+1=3\\n' +\n", + " ' 3+2=5\\n' +\n", + " ' 2+3=5\\n' +\n", + " ' 1+2=3\\n' +\n", + " ' 0+1=1\\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " ' \\n' +\n", + " '\\n' +\n", + " '\\n' +\n", + " 'Guidelines | FAQ | Lists | API | Security | Legal | Apply to YC | Contact\\n' +\n", + " 'Search: \\n' +\n", + " ' \\n' +\n", + " ' \\n',\n", + " metadata: { source: 'https://news.ycombinator.com/item?id=34817881' },\n", + " id: undefined\n", + "}\n" + ] + } + ], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "{ source: 'https://news.ycombinator.com/item?id=34817881' }\n" + ] + } + ], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Additional configurations\n", + "\n", + "`CheerioWebBaseLoader` supports additional configuration when instantiating the loader. Here is an example of how to use it with the `selector` field passed, making it only load content from the provided HTML class names:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Some of my favourite simulation projects:- IllustrisTNG: https://www.tng-project.org/- SWIFT: https://swift.dur.ac.uk/- CO5BOLD: https://www.astro.uu.se/~bf/co5bold_main.html (which produced these animations of a red-giant star: https://www.astro.uu.se/~bf/movie/AGBmovie.html)- AbacusSummit: https://abacussummit.readthedocs.io/en/latest/And I can add the simulations in the article, too.\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n" + ] + } + ], + "source": [ + "import { CheerioWebBaseLoader } from \"@langchain/community/document_loaders/web/cheerio\"\n", + "\n", + "const loaderWithSelector = new CheerioWebBaseLoader(\"https://news.ycombinator.com/item?id=34817881\", {\n", + " selector: \"p\",\n", + "});\n", + "\n", + "const docsWithSelector = await loaderWithSelector.load();\n", + "docsWithSelector[0].pageContent;" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all CheerioWebBaseLoader features and configurations head to the API reference: https://api.js.langchain.com/classes/langchain_community_document_loaders_web_cheerio.CheerioWebBaseLoader.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "TypeScript", + "language": "typescript", + "name": "tslab" + }, + "language_info": { + "codemirror_mode": { + "mode": "typescript", + "name": "javascript", + "typescript": true + }, + "file_extension": ".ts", + "mimetype": "text/typescript", + "name": "typescript", + "version": "3.7.2" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.mdx b/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.mdx deleted file mode 100644 index a33912f424ae..000000000000 --- a/docs/core_docs/docs/integrations/document_loaders/web_loaders/web_cheerio.mdx +++ /dev/null @@ -1,46 +0,0 @@ ---- -sidebar_position: 1 -sidebar_label: Cheerio -hide_table_of_contents: true ---- - -# Webpages, with Cheerio - -This example goes over how to load data from webpages using Cheerio. One document will be created for each webpage. - -Cheerio is a fast and lightweight library that allows you to parse and traverse HTML documents using a jQuery-like syntax. You can use Cheerio to extract data from web pages, without having to render them in a browser. - -However, Cheerio does not simulate a web browser, so it cannot execute JavaScript code on the page. This means that it cannot extract data from dynamic web pages that require JavaScript to render. To do that, you can use the [`PlaywrightWebBaseLoader`](/docs/integrations/document_loaders/web_loaders/web_playwright) or [`PuppeteerWebBaseLoader`](/docs/integrations/document_loaders/web_loaders/web_puppeteer) instead. - -## Setup - -```bash npm2yarn -npm install cheerio -``` - -## Usage - -```typescript -import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; - -const loader = new CheerioWebBaseLoader( - "https://news.ycombinator.com/item?id=34817881" -); - -const docs = await loader.load(); -``` - -## Usage, with a custom selector - -```typescript -import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; - -const loader = new CheerioWebBaseLoader( - "https://news.ycombinator.com/item?id=34817881", - { - selector: "p.athing", - } -); - -const docs = await loader.load(); -``` diff --git a/docs/core_docs/scripts/validate_notebook.ts b/docs/core_docs/scripts/validate_notebook.ts index 6f26d940be3a..776684ebc73f 100644 --- a/docs/core_docs/scripts/validate_notebook.ts +++ b/docs/core_docs/scripts/validate_notebook.ts @@ -15,15 +15,22 @@ export function extract(filepath: string) { // Deduplicate imports const importDeclarations = sourceFile.getImportDeclarations(); - const uniqueImports = new Map>(); + const uniqueImports = new Map< + string, + { default?: string; named: Set } + >(); importDeclarations.forEach((importDecl) => { const moduleSpecifier = importDecl.getModuleSpecifierValue(); if (!uniqueImports.has(moduleSpecifier)) { - uniqueImports.set(moduleSpecifier, new Set()); + uniqueImports.set(moduleSpecifier, { named: new Set() }); + } + const defaultImport = importDecl.getDefaultImport(); + if (defaultImport) { + uniqueImports.get(moduleSpecifier)!.default = defaultImport.getText(); } importDecl.getNamedImports().forEach((namedImport) => { - uniqueImports.get(moduleSpecifier)!.add(namedImport.getText()); + uniqueImports.get(moduleSpecifier)!.named.add(namedImport.getText()); }); }); @@ -31,12 +38,15 @@ export function extract(filepath: string) { importDeclarations.forEach((importDecl) => importDecl.remove()); // Add deduplicated imports at the top - uniqueImports.forEach((namedImports, moduleSpecifier) => { - sourceFile.addImportDeclaration({ - moduleSpecifier, - namedImports: Array.from(namedImports), - }); - }); + uniqueImports.forEach( + ({ default: defaultImport, named }, moduleSpecifier) => { + sourceFile.addImportDeclaration({ + moduleSpecifier, + defaultImport, + namedImports: Array.from(named), + }); + } + ); return sourceFile.getFullText(); } diff --git a/langchain-core/src/tools/index.ts b/langchain-core/src/tools/index.ts index c9e2c98402ae..a2c488e58bca 100644 --- a/langchain-core/src/tools/index.ts +++ b/langchain-core/src/tools/index.ts @@ -20,6 +20,7 @@ import { ZodObjectAny } from "../types/zod.js"; import { MessageContent } from "../messages/base.js"; import { AsyncLocalStorageProviderSingleton } from "../singletons/index.js"; import { _isToolCall, ToolInputParsingException } from "./utils.js"; +import { isZodSchema } from "../utils/types/is_zod_schema.js"; export { ToolInputParsingException }; @@ -319,16 +320,19 @@ export interface DynamicToolInput extends BaseDynamicToolInput { * Interface for the input parameters of the DynamicStructuredTool class. */ export interface DynamicStructuredToolInput< - T extends ZodObjectAny = ZodObjectAny + // eslint-disable-next-line @typescript-eslint/no-explicit-any + T extends ZodObjectAny | Record = ZodObjectAny > extends BaseDynamicToolInput { func: ( input: BaseDynamicToolInput["responseFormat"] extends "content_and_artifact" ? ToolCall - : z.infer, + : T extends ZodObjectAny + ? z.infer + : T, runManager?: CallbackManagerForToolRun, config?: RunnableConfig ) => Promise; - schema: T; + schema: T extends ZodObjectAny ? T : T; } /** @@ -382,10 +386,14 @@ export class DynamicTool extends Tool { * description, designed to work with structured data. It extends the * StructuredTool class and overrides the _call method to execute the * provided function when the tool is called. + * + * Schema can be passed as Zod or JSON schema. The tool will not validate + * input if JSON schema is passed. */ export class DynamicStructuredTool< - T extends ZodObjectAny = ZodObjectAny -> extends StructuredTool { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + T extends ZodObjectAny | Record = ZodObjectAny +> extends StructuredTool { static lc_name() { return "DynamicStructuredTool"; } @@ -396,7 +404,7 @@ export class DynamicStructuredTool< func: DynamicStructuredToolInput["func"]; - schema: T; + schema: T extends ZodObjectAny ? T : ZodObjectAny; constructor(fields: DynamicStructuredToolInput) { super(fields); @@ -404,14 +412,16 @@ export class DynamicStructuredTool< this.description = fields.description; this.func = fields.func; this.returnDirect = fields.returnDirect ?? this.returnDirect; - this.schema = fields.schema; + this.schema = ( + isZodSchema(fields.schema) ? fields.schema : z.object({}) + ) as T extends ZodObjectAny ? T : ZodObjectAny; } /** * @deprecated Use .invoke() instead. Will be removed in 0.3.0. */ async call( - arg: z.output | ToolCall, + arg: (T extends ZodObjectAny ? z.output : T) | ToolCall, configArg?: RunnableConfig | Callbacks, /** @deprecated */ tags?: string[] @@ -424,11 +434,12 @@ export class DynamicStructuredTool< } protected _call( - arg: z.output | ToolCall, + arg: (T extends ZodObjectAny ? z.output : T) | ToolCall, runManager?: CallbackManagerForToolRun, parentConfig?: RunnableConfig ): Promise { - return this.func(arg, runManager, parentConfig); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + return this.func(arg as any, runManager, parentConfig); } } @@ -447,10 +458,16 @@ export abstract class BaseToolkit { /** * Parameters for the tool function. - * @template {ZodObjectAny | z.ZodString = ZodObjectAny} RunInput The input schema for the tool. Either any Zod object, or a Zod string. + * Schema can be provided as Zod or JSON schema. + * If you pass JSON schema, tool inputs will not be validated. + * @template {ZodObjectAny | z.ZodString | Record = ZodObjectAny} RunInput The input schema for the tool. Either any Zod object, a Zod string, or JSON schema. */ interface ToolWrapperParams< - RunInput extends ZodObjectAny | z.ZodString = ZodObjectAny + RunInput extends + | ZodObjectAny + | z.ZodString + // eslint-disable-next-line @typescript-eslint/no-explicit-any + | Record = ZodObjectAny > extends ToolParams { /** * The name of the tool. If using with an LLM, this @@ -483,8 +500,11 @@ interface ToolWrapperParams< /** * Creates a new StructuredTool instance with the provided function, name, description, and schema. * + * Schema can be provided as Zod or JSON schema. + * If you pass JSON schema, tool inputs will not be validated. + * * @function - * @template {ZodObjectAny | z.ZodString = ZodObjectAny} T The input schema for the tool. Either any Zod object, or a Zod string. + * @template {ZodObjectAny | z.ZodString | Record = ZodObjectAny} T The input schema for the tool. Either any Zod object, a Zod string, or JSON schema instance. * * @param {RunnableFunc, ToolReturnType>} func - The function to invoke when the tool is called. * @param {ToolWrapperParams} fields - An object containing the following properties: @@ -494,18 +514,27 @@ interface ToolWrapperParams< * * @returns {DynamicStructuredTool} A new StructuredTool instance. */ -export function tool( +export function tool( func: RunnableFunc, ToolReturnType>, fields: ToolWrapperParams ): DynamicTool; -export function tool( +export function tool( func: RunnableFunc, ToolReturnType>, fields: ToolWrapperParams ): DynamicStructuredTool; -export function tool( - func: RunnableFunc, ToolReturnType>, +// eslint-disable-next-line @typescript-eslint/no-explicit-any +export function tool>( + func: RunnableFunc, + fields: ToolWrapperParams +): DynamicStructuredTool; + +export function tool< + // eslint-disable-next-line @typescript-eslint/no-explicit-any + T extends ZodObjectAny | z.ZodString | Record = ZodObjectAny +>( + func: RunnableFunc : T, ToolReturnType>, fields: ToolWrapperParams ): | DynamicStructuredTool @@ -518,7 +547,9 @@ export function tool( fields.description ?? fields.schema?.description ?? `${fields.name} tool`, - func, + // TS doesn't restrict the type here based on the guard above + // eslint-disable-next-line @typescript-eslint/no-explicit-any + func: func as any, }); } @@ -528,7 +559,8 @@ export function tool( return new DynamicStructuredTool({ ...fields, description, - schema: fields.schema as T extends ZodObjectAny ? T : ZodObjectAny, + // eslint-disable-next-line @typescript-eslint/no-explicit-any + schema: fields.schema as any, // TODO: Consider moving into DynamicStructuredTool constructor func: async (input, runManager, config) => { return new Promise((resolve, reject) => { @@ -539,7 +571,9 @@ export function tool( childConfig, async () => { try { - resolve(func(input, childConfig)); + // TS doesn't restrict the type here based on the guard above + // eslint-disable-next-line @typescript-eslint/no-explicit-any + resolve(func(input as any, childConfig)); } catch (e) { reject(e); } diff --git a/langchain-core/src/tools/tests/tools.test.ts b/langchain-core/src/tools/tests/tools.test.ts index bf577a4a1dc9..4c38800b3489 100644 --- a/langchain-core/src/tools/tests/tools.test.ts +++ b/langchain-core/src/tools/tests/tools.test.ts @@ -1,6 +1,6 @@ import { test, expect } from "@jest/globals"; import { z } from "zod"; -import { tool } from "../index.js"; +import { DynamicStructuredTool, tool } from "../index.js"; import { ToolMessage } from "../../messages/tool.js"; test("Tool should error if responseFormat is content_and_artifact but the function doesn't return a tuple", async () => { @@ -115,3 +115,100 @@ test("Tool can accept single string input", async () => { const result = await stringTool.invoke("b"); expect(result).toBe("ba"); }); + +test("Tool declared with JSON schema", async () => { + const weatherSchema = { + type: "object", + properties: { + location: { + type: "string", + description: "A place", + }, + }, + required: ["location"], + }; + const weatherTool = tool( + (_) => { + return "Sunny"; + }, + { + name: "weather", + schema: weatherSchema, + } + ); + + const weatherTool2 = new DynamicStructuredTool({ + name: "weather", + description: "get the weather", + func: async (_) => { + return "Sunny"; + }, + schema: weatherSchema, + }); + // No validation on JSON schema tools + await weatherTool.invoke({ + somethingSilly: true, + }); + await weatherTool2.invoke({ + somethingSilly: true, + }); +}); + +test("Tool input typing is enforced", async () => { + const weatherSchema = z.object({ + location: z.string(), + }); + + const weatherTool = tool( + (_) => { + return "Sunny"; + }, + { + name: "weather", + schema: weatherSchema, + } + ); + + const weatherTool2 = new DynamicStructuredTool({ + name: "weather", + description: "get the weather", + func: async (_) => { + return "Sunny"; + }, + schema: weatherSchema, + }); + + const weatherTool3 = tool( + async (_) => { + return "Sunny"; + }, + { + name: "weather", + description: "get the weather", + schema: z.string(), + } + ); + + await expect(async () => { + await weatherTool.invoke({ + // @ts-expect-error Invalid argument + badval: "someval", + }); + }).rejects.toThrow(); + const res = await weatherTool.invoke({ + location: "somewhere", + }); + expect(res).toEqual("Sunny"); + await expect(async () => { + await weatherTool2.invoke({ + // @ts-expect-error Invalid argument + badval: "someval", + }); + }).rejects.toThrow(); + const res2 = await weatherTool2.invoke({ + location: "someval", + }); + expect(res2).toEqual("Sunny"); + const res3 = await weatherTool3.invoke("blah"); + expect(res3).toEqual("Sunny"); +}); diff --git a/langchain/src/tools/sql.ts b/langchain/src/tools/sql.ts index 09584fe980e3..6a139af3d5c2 100644 --- a/langchain/src/tools/sql.ts +++ b/langchain/src/tools/sql.ts @@ -156,7 +156,7 @@ export class QueryCheckerTool extends Tool { template = ` {query} -Double check the sqlite query above for common mistakes, including: +Double check the SQL query above for common mistakes, including: - Using NOT IN with NULL values - Using UNION when UNION ALL should have been used - Using BETWEEN for exclusive ranges diff --git a/libs/langchain-community/src/llms/layerup_security.ts b/libs/langchain-community/src/llms/layerup_security.ts index e60676094892..5d84e7188add 100644 --- a/libs/langchain-community/src/llms/layerup_security.ts +++ b/libs/langchain-community/src/llms/layerup_security.ts @@ -1,7 +1,7 @@ import { LLM, BaseLLM, - type BaseLLMParams, + type BaseLLMCallOptions, } from "@langchain/core/language_models/llms"; import { GuardrailResponse, @@ -9,7 +9,7 @@ import { LLMMessage, } from "@layerup/layerup-security"; -export interface LayerupSecurityOptions extends BaseLLMParams { +export interface LayerupSecurityOptions extends BaseLLMCallOptions { llm: BaseLLM; layerupApiKey?: string; layerupApiBaseUrl?: string; @@ -101,7 +101,7 @@ export class LayerupSecurity extends LLM { return "layerup_security"; } - async _call(input: string, options?: BaseLLMParams): Promise { + async _call(input: string, options?: BaseLLMCallOptions): Promise { // Since LangChain LLMs only support string inputs, we will wrap each call to Layerup in a single-message // array of messages, then extract the string element when we need to access it. let messages: LLMMessage[] = [ diff --git a/libs/langchain-community/src/llms/tests/layerup_security.test.ts b/libs/langchain-community/src/llms/tests/layerup_security.test.ts index 670a56ca200b..883dca6fd0cb 100644 --- a/libs/langchain-community/src/llms/tests/layerup_security.test.ts +++ b/libs/langchain-community/src/llms/tests/layerup_security.test.ts @@ -1,5 +1,8 @@ import { test } from "@jest/globals"; -import { LLM, type BaseLLMParams } from "@langchain/core/language_models/llms"; +import { + LLM, + type BaseLLMCallOptions, +} from "@langchain/core/language_models/llms"; import { GuardrailResponse } from "@layerup/layerup-security/types.js"; import { LayerupSecurity, @@ -18,7 +21,7 @@ export class MockLLM extends LLM { return "mock_llm"; } - async _call(_input: string, _options?: BaseLLMParams): Promise { + async _call(_input: string, _options?: BaseLLMCallOptions): Promise { return "Hi Bob! How are you?"; } } diff --git a/libs/langchain-mistralai/src/chat_models.ts b/libs/langchain-mistralai/src/chat_models.ts index 4c14e304a9dd..2f8265174beb 100644 --- a/libs/langchain-mistralai/src/chat_models.ts +++ b/libs/langchain-mistralai/src/chat_models.ts @@ -77,7 +77,8 @@ interface TokenUsage { export type MistralAIToolChoice = "auto" | "any" | "none"; type MistralAIToolInput = { type: string; function: MistralAIFunction }; -interface MistralAICallOptions + +export interface ChatMistralAICallOptions extends Omit { response_format?: { type: "text" | "json_object"; @@ -91,8 +92,6 @@ interface MistralAICallOptions streamUsage?: boolean; } -export interface ChatMistralAICallOptions extends MistralAICallOptions {} - /** * Input to chat model class. */ @@ -406,7 +405,7 @@ function _convertStructuredToolToMistralTool( * Integration with a chat model. */ export class ChatMistralAI< - CallOptions extends MistralAICallOptions = MistralAICallOptions + CallOptions extends ChatMistralAICallOptions = ChatMistralAICallOptions > extends BaseChatModel implements ChatMistralAIInput diff --git a/libs/langchain-scripts/package.json b/libs/langchain-scripts/package.json index 82048e1f88a6..1b9e36114282 100644 --- a/libs/langchain-scripts/package.json +++ b/libs/langchain-scripts/package.json @@ -44,6 +44,7 @@ "axios": "^1.6.7", "commander": "^11.1.0", "glob": "^10.3.10", + "lodash": "^4.17.21", "readline": "^1.3.0", "rimraf": "^5.0.1", "rollup": "^4.5.2", @@ -55,6 +56,7 @@ "@swc/core": "^1.3.90", "@swc/jest": "^0.2.29", "@tsconfig/recommended": "^1.0.3", + "@types/lodash": "^4", "@typescript-eslint/eslint-plugin": "^6.12.0", "@typescript-eslint/parser": "^6.12.0", "dotenv": "^16.3.1", diff --git a/libs/langchain-scripts/src/cli/docs/chat.ts b/libs/langchain-scripts/src/cli/docs/chat.ts index 6ac2e2b9f256..196caabddc19 100644 --- a/libs/langchain-scripts/src/cli/docs/chat.ts +++ b/libs/langchain-scripts/src/cli/docs/chat.ts @@ -12,6 +12,7 @@ const PACKAGE_NAME_SHORT_SNAKE_CASE_PLACEHOLDER = "__package_name_short_snake_case__"; const PACKAGE_NAME_SNAKE_CASE_PLACEHOLDER = "__package_name_snake_case__"; const PACKAGE_NAME_PRETTY_PLACEHOLDER = "__package_name_pretty__"; +const PACKAGE_IMPORT_PATH_PLACEHOLDER = "__import_path__"; const MODULE_NAME_PLACEHOLDER = "__ModuleName__"; // This should not be prefixed with `Chat` as it's used for API keys. const MODULE_NAME_ALL_CAPS_PLACEHOLDER = "__MODULE_NAME_ALL_CAPS__"; @@ -31,6 +32,7 @@ const PY_SUPPORT_PLACEHOLDER = "__py_support__"; const API_REF_BASE_PACKAGE_URL = `https://api.js.langchain.com/modules/langchain_${PACKAGE_NAME_PLACEHOLDER}.html`; const API_REF_BASE_MODULE_URL = `https://api.js.langchain.com/classes/langchain_${PACKAGE_NAME_PLACEHOLDER}.${MODULE_NAME_PLACEHOLDER}.html`; + const TEMPLATE_PATH = path.resolve("./src/cli/docs/templates/chat.ipynb"); const INTEGRATIONS_DOCS_PATH = path.resolve( "../../docs/core_docs/docs/integrations/chat" @@ -67,57 +69,57 @@ type ExtraFields = { async function promptExtraFields(): Promise { const hasToolCalling = await getUserInput( - "Does the tool support tool calling? (y/n) ", + "Does this integration support tool calling? (y/n) ", undefined, true ); const hasJsonMode = await getUserInput( - "Does the tool support JSON mode? (y/n) ", + "Does this integration support JSON mode? (y/n) ", undefined, true ); const hasImageInput = await getUserInput( - "Does the tool support image input? (y/n) ", + "Does this integration support image input? (y/n) ", undefined, true ); const hasAudioInput = await getUserInput( - "Does the tool support audio input? (y/n) ", + "Does this integration support audio input? (y/n) ", undefined, true ); const hasVideoInput = await getUserInput( - "Does the tool support video input? (y/n) ", + "Does this integration support video input? (y/n) ", undefined, true ); const hasTokenLevelStreaming = await getUserInput( - "Does the tool support token level streaming? (y/n) ", + "Does this integration support token level streaming? (y/n) ", undefined, true ); const hasTokenUsage = await getUserInput( - "Does the tool support token usage? (y/n) ", + "Does this integration support token usage? (y/n) ", undefined, true ); const hasLogprobs = await getUserInput( - "Does the tool support logprobs? (y/n) ", + "Does this integration support logprobs? (y/n) ", undefined, true ); const hasLocal = await getUserInput( - "Does the tool support local usage? (y/n) ", + "Does this integration support local usage? (y/n) ", undefined, true ); const hasSerializable = await getUserInput( - "Does the tool support serializable output? (y/n) ", + "Does this integration support serializable output? (y/n) ", undefined, true ); const hasPySupport = await getUserInput( - "Does the tool support Python support? (y/n) ", + "Does this integration have Python support? (y/n) ", undefined, true ); @@ -140,6 +142,7 @@ async function promptExtraFields(): Promise { export async function fillChatIntegrationDocTemplate(fields: { packageName: string; moduleName: string; + isCommunity: boolean; }) { // Ask the user if they'd like to fill in extra fields, if so, prompt them. let extraFields: ExtraFields | undefined; @@ -151,14 +154,27 @@ export async function fillChatIntegrationDocTemplate(fields: { extraFields = await promptExtraFields(); } - const formattedApiRefPackageUrl = API_REF_BASE_PACKAGE_URL.replace( - PACKAGE_NAME_PLACEHOLDER, - fields.packageName - ); - const formattedApiRefModuleUrl = API_REF_BASE_MODULE_URL.replace( - PACKAGE_NAME_PLACEHOLDER, - fields.packageName - ).replace(MODULE_NAME_PLACEHOLDER, fields.moduleName); + let formattedApiRefPackageUrl = ""; + let formattedApiRefModuleUrl = ""; + if (fields.isCommunity) { + formattedApiRefPackageUrl = API_REF_BASE_PACKAGE_URL.replace( + PACKAGE_NAME_PLACEHOLDER, + `community_chat_models_${fields.packageName}` + ); + formattedApiRefModuleUrl = API_REF_BASE_MODULE_URL.replace( + PACKAGE_NAME_PLACEHOLDER, + `community_chat_models_${fields.packageName}` + ).replace(MODULE_NAME_PLACEHOLDER, fields.moduleName); + } else { + formattedApiRefPackageUrl = API_REF_BASE_PACKAGE_URL.replace( + PACKAGE_NAME_PLACEHOLDER, + fields.packageName + ); + formattedApiRefModuleUrl = API_REF_BASE_MODULE_URL.replace( + PACKAGE_NAME_PLACEHOLDER, + fields.packageName + ).replace(MODULE_NAME_PLACEHOLDER, fields.moduleName); + } const success = await Promise.all([ fetchAPIRefUrl(formattedApiRefPackageUrl), @@ -170,8 +186,20 @@ export async function fillChatIntegrationDocTemplate(fields: { } const packageNameShortSnakeCase = fields.packageName.replaceAll("-", "_"); - const fullPackageNameSnakeCase = `langchain_${packageNameShortSnakeCase}`; - const packageNamePretty = `@langchain/${fields.packageName}`; + let fullPackageNameSnakeCase = ""; + let packageNamePretty = ""; + let fullPackageImportPath = ""; + + if (fields.isCommunity) { + fullPackageNameSnakeCase = `langchain_community_chat_models_${packageNameShortSnakeCase}`; + fullPackageImportPath = `@langchain/community/chat_models/${fields.packageName}`; + packageNamePretty = "@langchain/community"; + } else { + fullPackageNameSnakeCase = `langchain_${packageNameShortSnakeCase}`; + packageNamePretty = `@langchain/${fields.packageName}`; + fullPackageImportPath = packageNamePretty; + } + let moduleNameAllCaps = fields.moduleName.toUpperCase(); if (moduleNameAllCaps.startsWith("CHAT")) { moduleNameAllCaps = moduleNameAllCaps.replace("CHAT", ""); @@ -185,6 +213,7 @@ export async function fillChatIntegrationDocTemplate(fields: { packageNameShortSnakeCase ) .replaceAll(PACKAGE_NAME_PRETTY_PLACEHOLDER, packageNamePretty) + .replaceAll(PACKAGE_IMPORT_PATH_PLACEHOLDER, fullPackageImportPath) .replaceAll(MODULE_NAME_PLACEHOLDER, fields.moduleName) .replaceAll(MODULE_NAME_ALL_CAPS_PLACEHOLDER, moduleNameAllCaps) .replaceAll( diff --git a/libs/langchain-scripts/src/cli/docs/document_loaders.ts b/libs/langchain-scripts/src/cli/docs/document_loaders.ts new file mode 100644 index 000000000000..359b94034e44 --- /dev/null +++ b/libs/langchain-scripts/src/cli/docs/document_loaders.ts @@ -0,0 +1,175 @@ +import * as path from "node:path"; +import * as fs from "node:fs"; +import _ from "lodash"; +import { + boldText, + getUserInput, + greenText, + redBackground, +} from "../utils/get-input.js"; + +const NODE_OR_WEB_PLACEHOLDER = "__fs_or_web__"; +const PACKAGE_NAME_PLACEHOLDER = "__package_name__"; +const MODULE_NAME_PLACEHOLDER = "__ModuleName__"; +const PACKAGE_NAME_SHORT_SNAKE_CASE_PLACEHOLDER = + "__package_name_short_snake_case__"; +const PACKAGE_NAME_SNAKE_CASE_PLACEHOLDER = "__package_name_snake_case__"; +const PACKAGE_IMPORT_PATH_PLACEHOLDER = "__import_path__"; + +// This should not be prefixed with `Chat` as it's used for API keys. +const MODULE_NAME_ALL_CAPS_PLACEHOLDER = "__MODULE_NAME_ALL_CAPS__"; + +const SERIALIZABLE_PLACEHOLDER = "__serializable__"; +const LOCAL_PLACEHOLDER = "__local__"; +const PY_SUPPORT_PLACEHOLDER = "__py_support__"; + +const WEB_SUPPORT_PLACEHOLDER = "__web_support__"; +const NODE_SUPPORT_PLACEHOLDER = "__fs_support__"; + +const API_REF_BASE_MODULE_URL = `https://api.js.langchain.com/classes/langchain_community_document_loaders_${NODE_OR_WEB_PLACEHOLDER}_${PACKAGE_NAME_PLACEHOLDER}.${MODULE_NAME_PLACEHOLDER}.html`; + +const TEMPLATE_PATH = path.resolve( + "./src/cli/docs/templates/document_loaders.ipynb" +); +const INTEGRATIONS_DOCS_PATH = path.resolve( + "../../docs/core_docs/docs/integrations/document_loaders" +); + +const fetchAPIRefUrl = async (url: string): Promise => { + try { + const res = await fetch(url); + if (res.status !== 200) { + throw new Error(`API Reference URL ${url} not found.`); + } + return true; + } catch (_) { + return false; + } +}; + +type ExtraFields = { + nodeSupport: boolean; + webSupport: boolean; + serializable: boolean; + pySupport: boolean; + local: boolean; +}; + +async function promptExtraFields(): Promise { + const hasNodeSupport = await getUserInput( + "Does this integration support Node environments? (y/n) ", + undefined, + true + ); + const hasWebSupport = await getUserInput( + "Does this integration support web environments? (y/n) ", + undefined, + true + ); + const hasSerializable = await getUserInput( + "Does this integration support serializable output? (y/n) ", + undefined, + true + ); + const hasPySupport = await getUserInput( + "Does this integration have Python support? (y/n) ", + undefined, + true + ); + const hasLocalSupport = await getUserInput( + "Does this integration support running locally? (y/n) ", + undefined, + true + ); + + return { + nodeSupport: hasNodeSupport.toLowerCase() === "y", + webSupport: hasWebSupport.toLowerCase() === "y", + serializable: hasSerializable.toLowerCase() === "y", + pySupport: hasPySupport.toLowerCase() === "y", + local: hasLocalSupport.toLowerCase() === "y", + }; +} + +export async function fillDocLoaderIntegrationDocTemplate(fields: { + packageName: string; + moduleName: string; + webSupport?: boolean; + nodeSupport?: boolean; +}) { + // Ask the user if they'd like to fill in extra fields, if so, prompt them. + let extraFields: ExtraFields | undefined; + const shouldPromptExtraFields = await getUserInput( + "Would you like to fill out optional fields? (y/n) ", + "white_background" + ); + if (shouldPromptExtraFields.toLowerCase() === "y") { + extraFields = await promptExtraFields(); + } + + const formattedApiRefModuleUrl = API_REF_BASE_MODULE_URL.replace( + PACKAGE_NAME_PLACEHOLDER, + fields.packageName + ) + .replace(MODULE_NAME_PLACEHOLDER, fields.moduleName) + .replace(NODE_OR_WEB_PLACEHOLDER, extraFields?.webSupport ? "web" : "fs"); + + const success = await fetchAPIRefUrl(formattedApiRefModuleUrl); + if (!success) { + // Don't error out because this might be used before the package is released. + console.error("Invalid package or module name. API reference not found."); + } + + const packageNameShortSnakeCase = fields.packageName.replaceAll("-", "_"); + const fullPackageNameSnakeCase = `langchain_community_document_loaders_${ + extraFields?.webSupport ? "web" : "fs" + }_${packageNameShortSnakeCase}`; + const fullPackageImportPath = `@langchain/community/document_loaders/${ + extraFields?.webSupport ? "web" : "fs" + }/${fields.packageName}`; + + let moduleNameAllCaps = _.snakeCase(fields.moduleName).toUpperCase(); + if (moduleNameAllCaps.endsWith("DOCUMENT_LOADER")) { + moduleNameAllCaps = moduleNameAllCaps.replace("DOCUMENT_LOADER", ""); + } + + const docTemplate = (await fs.promises.readFile(TEMPLATE_PATH, "utf-8")) + .replaceAll(PACKAGE_NAME_PLACEHOLDER, fields.packageName) + .replaceAll(PACKAGE_NAME_SNAKE_CASE_PLACEHOLDER, fullPackageNameSnakeCase) + .replaceAll( + PACKAGE_NAME_SHORT_SNAKE_CASE_PLACEHOLDER, + packageNameShortSnakeCase + ) + .replaceAll(PACKAGE_IMPORT_PATH_PLACEHOLDER, fullPackageImportPath) + .replaceAll(MODULE_NAME_PLACEHOLDER, fields.moduleName) + .replaceAll(MODULE_NAME_ALL_CAPS_PLACEHOLDER, moduleNameAllCaps) + .replace(WEB_SUPPORT_PLACEHOLDER, extraFields?.webSupport ? "✅" : "❌") + .replace(NODE_SUPPORT_PLACEHOLDER, extraFields?.nodeSupport ? "✅" : "❌") + .replace(LOCAL_PLACEHOLDER, extraFields?.local ? "✅" : "❌") + .replace( + SERIALIZABLE_PLACEHOLDER, + extraFields?.serializable ? "✅" : "beta" + ) + .replace(PY_SUPPORT_PLACEHOLDER, extraFields?.pySupport ? "✅" : "❌"); + + const docPath = path.join( + INTEGRATIONS_DOCS_PATH, + extraFields?.webSupport ? "web_loaders" : "file_loaders", + `${packageNameShortSnakeCase}.ipynb` + ); + await fs.promises.writeFile(docPath, docTemplate); + const prettyDocPath = docPath.split("docs/core_docs/")[1]; + + const updatePythonDocUrlText = ` ${redBackground( + "- Update the Python documentation URL with the proper URL." + )}`; + const successText = `\nSuccessfully created new document loader integration doc at ${prettyDocPath}.`; + + console.log( + `${greenText(successText)}\n +${boldText("Next steps:")} +${extraFields?.pySupport ? updatePythonDocUrlText : ""} + - Run all code cells in the generated doc to record the outputs. + - Add extra sections on integration specific features.\n` + ); +} diff --git a/libs/langchain-scripts/src/cli/docs/index.ts b/libs/langchain-scripts/src/cli/docs/index.ts index 87543142e703..a7a89745e7a1 100644 --- a/libs/langchain-scripts/src/cli/docs/index.ts +++ b/libs/langchain-scripts/src/cli/docs/index.ts @@ -3,29 +3,32 @@ // --------------------------------------------- import { Command } from "commander"; import { fillChatIntegrationDocTemplate } from "./chat.js"; +import { fillDocLoaderIntegrationDocTemplate } from "./document_loaders.js"; type CLIInput = { package: string; module: string; type: string; + community: boolean; }; async function main() { const program = new Command(); program .description("Create a new integration doc.") - .option( - "--package ", - "Package name, eg openai. Should be value of @langchain/" - ) + .option("--package ", "Package name, eg openai.") .option("--module ", "Module name, e.g ChatOpenAI") - .option("--type ", "Type of integration, e.g. 'chat'"); + .option("--type ", "Type of integration, e.g. 'chat'") + .option( + "--community", + "If the integration is a community integration. Will effect the fields populated in the template." + ); program.parse(); const options = program.opts(); - const { module: moduleName, type } = options; + const { module: moduleName, type, community: isCommunity } = options; let { package: packageName } = options; if (packageName.startsWith("@langchain/")) { @@ -34,11 +37,21 @@ async function main() { switch (type) { case "chat": - await fillChatIntegrationDocTemplate({ packageName, moduleName }); + await fillChatIntegrationDocTemplate({ + packageName, + moduleName, + isCommunity, + }); + break; + case "doc_loader": + await fillDocLoaderIntegrationDocTemplate({ + packageName, + moduleName, + }); break; default: console.error( - `Invalid type: ${type}.\nOnly 'chat' is supported at this time.` + `Invalid type: ${type}.\nOnly 'chat' and 'doc_loader' are supported at this time.` ); process.exit(1); } diff --git a/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb b/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb index 99bcfdcf4562..1f6508d8a861 100644 --- a/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb +++ b/libs/langchain-scripts/src/cli/docs/templates/chat.ipynb @@ -24,9 +24,9 @@ "\n", "- TODO: Make sure Python integration doc link is correct, if applicable.\n", "\n", - "| Class | Package | Local | Serializable | [PY support](https:/python.langchain.com/v0.2/docs/integrations/chat/__package_name_short_snake_case__) | Package downloads | Package latest |\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/chat/__package_name_short_snake_case__) | Package downloads | Package latest |\n", "| :--- | :--- | :---: | :---: | :---: | :---: | :---: |\n", - "| [__ModuleName__](https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html) | [__package_name_pretty__](https://api.js.langchain.com/modules/__package_name_snake_case__.html) | __local__ | __serializable__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name_pretty__?style=flat-square&label=%20) | ![NPM - Version](https://img.shields.io/npm/v/__package_name_pretty__?style=flat-square&label=%20) |\n", + "| [__ModuleName__](https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html) | [__package_name_pretty__](https://api.js.langchain.com/modules/__package_name_snake_case__.html) | __local__ | __serializable__ | __py_support__ | ![NPM - Downloads](https://img.shields.io/npm/dm/__package_name_pretty__?style=flat-square&label=%20&) | ![NPM - Version](https://img.shields.io/npm/v/__package_name_pretty__?style=flat-square&label=%20&) |\n", "\n", "### Model features\n", "| [Tool calling](/docs/how_to/tool_calling) | [Structured output](/docs/how_to/structured_output/) | JSON mode | [Image input](/docs/how_to/multimodal_inputs/) | Audio input | Video input | [Token-level streaming](/docs/how_to/chat_streaming/) | [Token usage](/docs/how_to/chat_token_usage_tracking/) | [Logprobs](/docs/how_to/logprobs/) |\n", @@ -45,46 +45,30 @@ "\n", "Head to (TODO: link) to sign up to `__ModuleName__` and generate an API key. Once you've done this set the `__MODULE_NAME_ALL_CAPS___API_KEY` environment variable:\n", "\n", - "```{=mdx}\n", - "\n", "```bash\n", "export __MODULE_NAME_ALL_CAPS___API_KEY=\"your-api-key\"\n", "```\n", "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "72ee0c4b-9764-423a-9dbf-95129e185210", - "metadata": {}, - "source": [ "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", "\n", - "```{=mdx}\n", - "\n", "```bash\n", "# export LANGCHAIN_TRACING_V2=\"true\"\n", "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", "```\n", "\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "0730d6a1-c893-4840-9817-5e5251676d5d", - "metadata": {}, - "source": [ "### Installation\n", "\n", "The LangChain __ModuleName__ integration lives in the `__package_name_pretty__` package:\n", "\n", "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", "\n", - "```bash npm2yarn\n", - "npm i __package_name_pretty__\n", - "```\n", + "\n", + "\n", + "\n", + " __package_name_pretty__\n", + "\n", "\n", "```" ] @@ -112,7 +96,7 @@ }, "outputs": [], "source": [ - "import { __ModuleName__ } from \"__package_name_pretty__\" \n", + "import { __ModuleName__ } from \"__import_path__\"\n", "\n", "const llm = new __ModuleName__({\n", " model: \"model-name\",\n", diff --git a/libs/langchain-scripts/src/cli/docs/templates/document_loaders.ipynb b/libs/langchain-scripts/src/cli/docs/templates/document_loaders.ipynb new file mode 100644 index 000000000000..bc49c5c0801c --- /dev/null +++ b/libs/langchain-scripts/src/cli/docs/templates/document_loaders.ipynb @@ -0,0 +1,189 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "sidebar_label: __ModuleName__\n", + "---" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# __ModuleName__\n", + "\n", + "- TODO: Make sure API reference link is correct.\n", + "\n", + "This notebook provides a quick overview for getting started with [__ModuleName__](/docs/integrations/document_loaders/). For detailed documentation of all __ModuleName__ features and configurations head to the [API reference](https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html).\n", + "\n", + "- TODO: Add any other relevant links, like information about underlying API, etc.\n", + "\n", + "## Overview\n", + "### Integration details\n", + "\n", + "- TODO: Fill in table features.\n", + "- TODO: Remove JS support link if not relevant, otherwise ensure link is correct.\n", + "- TODO: Make sure API reference links are correct.\n", + "\n", + "| Class | Package | Local | Serializable | [PY support](https://python.langchain.com/docs/integrations/document_loaders/__package_name_short_snake_case__)|\n", + "| :--- | :--- | :---: | :---: | :---: |\n", + "| [__ModuleName__](https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html) | @langchain/community | __local__ | __serializable__ | __py_support__ | \n", + "### Loader features\n", + "| Source | Web Support | Node Support\n", + "| :---: | :---: | :---: | \n", + "| __ModuleName__ | __web_support__ | __fs_support__ | \n", + "\n", + "## Setup\n", + "\n", + "- TODO: Update with relevant info.\n", + "\n", + "To access `__ModuleName__` document loader you'll need to install the `@langchain/community` integration package, and create a **__ModuleName__** account and get an API key.\n", + "\n", + "### Credentials\n", + "\n", + "- TODO: Update with relevant info.\n", + "\n", + "Head to (TODO: link) to sign up to __ModuleName__ and generate an API key. Once you've done this set the `__MODULE_NAME_ALL_CAPS___API_KEY` environment variable:\n", + "\n", + "```bash\n", + "export __MODULE_NAME_ALL_CAPS___API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "If you want to get automated tracing of your model calls you can also set your [LangSmith](https://docs.smith.langchain.com/) API key by uncommenting below:\n", + "\n", + "```bash\n", + "# export LANGCHAIN_TRACING_V2=\"true\"\n", + "# export LANGCHAIN_API_KEY=\"your-api-key\"\n", + "```\n", + "\n", + "### Installation\n", + "\n", + "The LangChain __ModuleName__ integration lives in the `@langchain/community` package:\n", + "\n", + "```{=mdx}\n", + "import IntegrationInstallTooltip from \"@mdx_components/integration_install_tooltip.mdx\";\n", + "import Npm2Yarn from \"@theme/Npm2Yarn\";\n", + "\n", + "\n", + "\n", + "\n", + " @langchain/community\n", + "\n", + "\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation\n", + "\n", + "Now we can instantiate our model object and load documents:\n", + "\n", + "- TODO: Update model instantiation with relevant params." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "import { __ModuleName__ } from \"__import_path__\"\n", + "\n", + "const loader = new __ModuleName__({\n", + " // required params = ...\n", + " // optional params = ...\n", + "})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load\n", + "\n", + "- TODO: Run cells to show loading capabilities" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "const docs = await loader.load()\n", + "docs[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "typescript" + } + }, + "outputs": [], + "source": [ + "console.log(docs[0].metadata)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## TODO: Any functionality specific to this document loader\n", + "\n", + "E.g. using specific configs for different loading behavior. Delete if not relevant." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference\n", + "\n", + "For detailed documentation of all __ModuleName__ features and configurations head to the API reference: https://api.js.langchain.com/classes/__package_name_snake_case__.__ModuleName__.html" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/yarn.lock b/yarn.lock index 20617a68d1fb..f2c95da3ec90 100644 --- a/yarn.lock +++ b/yarn.lock @@ -12339,6 +12339,7 @@ __metadata: "@swc/core": ^1.3.90 "@swc/jest": ^0.2.29 "@tsconfig/recommended": ^1.0.3 + "@types/lodash": ^4 "@typescript-eslint/eslint-plugin": ^6.12.0 "@typescript-eslint/parser": ^6.12.0 axios: ^1.6.7 @@ -12354,6 +12355,7 @@ __metadata: glob: ^10.3.10 jest: ^29.5.0 jest-environment-node: ^29.6.4 + lodash: ^4.17.21 prettier: ^2.8.3 readline: ^1.3.0 release-it: ^15.10.1