diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 7a40881c7eb96..79a1e6cfeef17 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,6 +1,9 @@ blank_issues_enabled: true version: 2.1 contact_links: + - name: 🤔 Question or Problem + about: Ask a question or ask about a problem in GitHub Discussions. + url: https://github.com/langchain-ai/langchain/discussions - name: Discord url: https://discord.gg/6adMQxSpJS about: General community discussions diff --git a/.github/actions/poetry_setup/action.yml b/.github/actions/poetry_setup/action.yml index d1342465c34bd..4f7c606077807 100644 --- a/.github/actions/poetry_setup/action.yml +++ b/.github/actions/poetry_setup/action.yml @@ -26,7 +26,7 @@ inputs: runs: using: composite steps: - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 name: Setup python ${{ inputs.python-version }} with: python-version: ${{ inputs.python-version }} diff --git a/.github/scripts/check_diff.py b/.github/scripts/check_diff.py index a10dee9ec32b0..15192fe5daf1d 100644 --- a/.github/scripts/check_diff.py +++ b/.github/scripts/check_diff.py @@ -13,6 +13,10 @@ files = sys.argv[1:] dirs_to_run = set() + if len(files) == 300: + # max diff length is 300 files - there are likely files missing + raise ValueError("Max diff reached. Please manually run CI on changed libs.") + for file in files: if any( file.startswith(dir_) @@ -48,4 +52,5 @@ dirs_to_run.update(LANGCHAIN_DIRS) else: pass - print(json.dumps(list(dirs_to_run))) + json_output = json.dumps(list(dirs_to_run)) + print(f"dirs-to-run={json_output}") diff --git a/.github/workflows/_integration_test.yml b/.github/workflows/_integration_test.yml index e3507c4880c33..e6c8296c59c19 100644 --- a/.github/workflows/_integration_test.yml +++ b/.github/workflows/_integration_test.yml @@ -37,6 +37,12 @@ jobs: shell: bash run: poetry install --with test,test_integration + - name: 'Authenticate to Google Cloud' + id: 'auth' + uses: google-github-actions/auth@v2 + with: + credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}' + - name: Run integration tests shell: bash env: @@ -44,6 +50,7 @@ jobs: ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }} TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} run: | make integration_tests diff --git a/.github/workflows/_release.yml b/.github/workflows/_release.yml index 649f9c7c4e213..46a847276ae51 100644 --- a/.github/workflows/_release.yml +++ b/.github/workflows/_release.yml @@ -1,5 +1,5 @@ name: release - +run-name: Release ${{ inputs.working-directory }} by @${{ github.actor }} on: workflow_call: inputs: @@ -149,6 +149,12 @@ jobs: run: make tests working-directory: ${{ inputs.working-directory }} + - name: 'Authenticate to Google Cloud' + id: 'auth' + uses: google-github-actions/auth@v2 + with: + credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}' + - name: Run integration tests if: ${{ startsWith(inputs.working-directory, 'libs/partners/') }} env: @@ -156,9 +162,16 @@ jobs: ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }} TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} run: make integration_tests working-directory: ${{ inputs.working-directory }} + - name: Run unit tests with minimum dependency versions + if: ${{ (inputs.working-directory == 'libs/langchain') || (inputs.working-directory == 'libs/community') || (inputs.working-directory == 'libs/experimental') }} + run: | + poetry run pip install -r _test_minimum_requirements.txt + make tests + working-directory: ${{ inputs.working-directory }} publish: needs: diff --git a/.github/workflows/check_diffs.yml b/.github/workflows/check_diffs.yml index c09b6cc54635b..83dd2d58638fa 100644 --- a/.github/workflows/check_diffs.yml +++ b/.github/workflows/check_diffs.yml @@ -5,11 +5,6 @@ on: push: branches: [master] pull_request: - paths: - - ".github/actions/**" - - ".github/tools/**" - - ".github/workflows/**" - - "libs/**" # If another push to the same PR or branch happens while this workflow is still running, # cancel the earlier run in favor of the next run. @@ -26,13 +21,14 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: '3.10' - id: files uses: Ana06/get-changed-files@v2.2.0 - id: set-matrix - run: echo "dirs-to-run=$(python .github/scripts/check_diff.py ${{ steps.files.outputs.all }})" >> $GITHUB_OUTPUT + run: | + python .github/scripts/check_diff.py ${{ steps.files.outputs.all }} >> $GITHUB_OUTPUT outputs: dirs-to-run: ${{ steps.set-matrix.outputs.dirs-to-run }} ci: diff --git a/.github/workflows/scheduled_test.yml b/.github/workflows/scheduled_test.yml index ffacc28a999b8..0130f427d92b8 100644 --- a/.github/workflows/scheduled_test.yml +++ b/.github/workflows/scheduled_test.yml @@ -36,7 +36,7 @@ jobs: - name: 'Authenticate to Google Cloud' id: 'auth' - uses: 'google-github-actions/auth@v1' + uses: google-github-actions/auth@v2 with: credentials_json: '${{ secrets.GOOGLE_CREDENTIALS }}' diff --git a/.readthedocs.yaml b/.readthedocs.yaml index d3d92d5870e66..9b5eb5beb113c 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -10,7 +10,7 @@ build: tools: python: "3.11" commands: - - python -mvirtualenv $READTHEDOCS_VIRTUALENV_PATH + - python -m virtualenv $READTHEDOCS_VIRTUALENV_PATH - python -m pip install --upgrade --no-cache-dir pip setuptools - python -m pip install --upgrade --no-cache-dir sphinx readthedocs-sphinx-ext - python -m pip install ./libs/partners/* diff --git a/cookbook/LLaMA2_sql_chat.ipynb b/cookbook/LLaMA2_sql_chat.ipynb index 86e4d1ec99603..3b697f314de82 100644 --- a/cookbook/LLaMA2_sql_chat.ipynb +++ b/cookbook/LLaMA2_sql_chat.ipynb @@ -61,13 +61,13 @@ ], "source": [ "# Local\n", - "from langchain.chat_models import ChatOllama\n", + "from langchain_community.chat_models import ChatOllama\n", "\n", "llama2_chat = ChatOllama(model=\"llama2:13b-chat\")\n", "llama2_code = ChatOllama(model=\"codellama:7b-instruct\")\n", "\n", "# API\n", - "from langchain.llms import Replicate\n", + "from langchain_community.llms import Replicate\n", "\n", "# REPLICATE_API_TOKEN = getpass()\n", "# os.environ[\"REPLICATE_API_TOKEN\"] = REPLICATE_API_TOKEN\n", @@ -107,7 +107,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import SQLDatabase\n", + "from langchain_community.utilities import SQLDatabase\n", "\n", "db = SQLDatabase.from_uri(\"sqlite:///nba_roster.db\", sample_rows_in_table_info=0)\n", "\n", @@ -125,7 +125,7 @@ "id": "654b3577-baa2-4e12-a393-f40e5db49ac7", "metadata": {}, "source": [ - "## Query a SQL DB \n", + "## Query a SQL Database \n", "\n", "Follow the runnables workflow [here](https://python.langchain.com/docs/expression_language/cookbook/sql_db)." ] @@ -149,8 +149,9 @@ ], "source": [ "# Prompt\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", + "# Update the template based on the type of SQL Database like MySQL, Microsoft SQL Server and so on\n", "template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n", "{schema}\n", "\n", @@ -277,7 +278,7 @@ "source": [ "# Prompt\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "\n", "template = \"\"\"Given an input question, convert it to a SQL query. No pre-amble. Based on the table schema below, write a SQL query that would answer the user's question:\n", "{schema}\n", diff --git a/cookbook/Multi_modal_RAG.ipynb b/cookbook/Multi_modal_RAG.ipynb index cbfccdfdd01f3..79f311328a143 100644 --- a/cookbook/Multi_modal_RAG.ipynb +++ b/cookbook/Multi_modal_RAG.ipynb @@ -101,7 +101,7 @@ "If you want to use the provided folder, then simply opt for a [pdf loader](https://python.langchain.com/docs/modules/data_connection/document_loaders/pdf) for the document:\n", "\n", "```\n", - "from langchain.document_loaders import PyPDFLoader\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "loader = PyPDFLoader(path + fname)\n", "docs = loader.load()\n", "tables = [] # Ignore w/ basic pdf loader\n", @@ -198,9 +198,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "# Generate summaries of text elements\n", @@ -341,7 +341,7 @@ "Add raw docs and doc summaries to [Multi Vector Retriever](https://python.langchain.com/docs/modules/data_connection/retrievers/multi_vector#summary): \n", "\n", "* Store the raw texts, tables, and images in the `docstore`.\n", - "* Store the texts, table summaries, and image summaries in the `vectorstore` for semantic retrieval." + "* Store the texts, table summaries, and image summaries in the `vectorstore` for efficient semantic retrieval." ] }, { @@ -353,11 +353,11 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "\n", "def create_multi_vector_retriever(\n", diff --git a/cookbook/Multi_modal_RAG_google.ipynb b/cookbook/Multi_modal_RAG_google.ipynb index 741ddefea2e62..e2b88b5317cab 100644 --- a/cookbook/Multi_modal_RAG_google.ipynb +++ b/cookbook/Multi_modal_RAG_google.ipynb @@ -93,7 +93,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import PyPDFLoader\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "\n", "loader = PyPDFLoader(\"./cj/cj.pdf\")\n", "docs = loader.load()\n", @@ -158,11 +158,11 @@ } ], "source": [ - "from langchain.chat_models import ChatVertexAI\n", - "from langchain.llms import VertexAI\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.schema.output_parser import StrOutputParser\n", + "from langchain_community.chat_models import ChatVertexAI\n", + "from langchain_community.llms import VertexAI\n", "from langchain_core.messages import AIMessage\n", + "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda\n", "\n", "\n", @@ -243,7 +243,7 @@ "import base64\n", "import os\n", "\n", - "from langchain.schema.messages import HumanMessage\n", + "from langchain_core.messages import HumanMessage\n", "\n", "\n", "def encode_image(image_path):\n", @@ -342,11 +342,11 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import VertexAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", - "from langchain.schema.document import Document\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import VertexAIEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", + "from langchain_core.documents import Document\n", "\n", "\n", "def create_multi_vector_retriever(\n", @@ -440,7 +440,7 @@ "import re\n", "\n", "from IPython.display import HTML, display\n", - "from langchain.schema.runnable import RunnableLambda, RunnablePassthrough\n", + "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", "from PIL import Image\n", "\n", "\n", diff --git a/cookbook/Semi_Structured_RAG.ipynb b/cookbook/Semi_Structured_RAG.ipynb index 91820b69d2fd0..2429413558ee2 100644 --- a/cookbook/Semi_Structured_RAG.ipynb +++ b/cookbook/Semi_Structured_RAG.ipynb @@ -235,9 +235,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -318,11 +318,11 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n", diff --git a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb index 3c618e45d6728..82ce6faf7f3df 100644 --- a/cookbook/Semi_structured_and_multi_modal_RAG.ipynb +++ b/cookbook/Semi_structured_and_multi_modal_RAG.ipynb @@ -211,9 +211,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -373,11 +373,11 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# The vectorstore to use to index the child chunks\n", "vectorstore = Chroma(collection_name=\"summaries\", embedding_function=OpenAIEmbeddings())\n", diff --git a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb index 5d43158455f8f..19b9218ae76d9 100644 --- a/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb +++ b/cookbook/Semi_structured_multi_modal_RAG_LLaMA2.ipynb @@ -209,9 +209,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOllama\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_community.chat_models import ChatOllama\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate" ] }, { @@ -376,10 +376,10 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import GPT4AllEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.embeddings import GPT4AllEmbeddings\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_core.documents import Document\n", "\n", "# The vectorstore to use to index the child chunks\n", diff --git a/cookbook/advanced_rag_eval.ipynb b/cookbook/advanced_rag_eval.ipynb index 2ca2048f5dbc9..45d424b452d4c 100644 --- a/cookbook/advanced_rag_eval.ipynb +++ b/cookbook/advanced_rag_eval.ipynb @@ -62,7 +62,7 @@ "path = \"/Users/rlm/Desktop/cpi/\"\n", "\n", "# Load\n", - "from langchain.document_loaders import PyPDFLoader\n", + "from langchain_community.document_loaders import PyPDFLoader\n", "\n", "loader = PyPDFLoader(path + \"cpi.pdf\")\n", "pdf_pages = loader.load()\n", @@ -132,8 +132,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "baseline = Chroma.from_texts(\n", " texts=all_splits_pypdf_texts,\n", @@ -160,9 +160,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Prompt\n", "prompt_text = \"\"\"You are an assistant tasked with summarizing tables and text for retrieval. \\\n", diff --git a/docs/docs/modules/agents/how_to/agent_vectorstore.ipynb b/cookbook/agent_vectorstore.ipynb similarity index 97% rename from docs/docs/modules/agents/how_to/agent_vectorstore.ipynb rename to cookbook/agent_vectorstore.ipynb index 7f14b74387bea..388e4702a3a6a 100644 --- a/docs/docs/modules/agents/how_to/agent_vectorstore.ipynb +++ b/cookbook/agent_vectorstore.ipynb @@ -13,7 +13,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "9b22020a", "metadata": {}, @@ -29,10 +28,9 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "llm = OpenAI(temperature=0)" ] @@ -70,7 +68,7 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "loader = TextLoader(doc_path)\n", "documents = loader.load()\n", @@ -100,7 +98,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.document_loaders import WebBaseLoader" + "from langchain_community.document_loaders import WebBaseLoader" ] }, { @@ -146,7 +144,6 @@ "source": [] }, { - "attachments": {}, "cell_type": "markdown", "id": "c0a6c031", "metadata": {}, @@ -163,7 +160,7 @@ "source": [ "# Import things that are needed generically\n", "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { @@ -280,7 +277,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "787a9b5e", "metadata": {}, @@ -289,7 +285,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "9161ba91", "metadata": {}, @@ -411,7 +406,6 @@ ] }, { - "attachments": {}, "cell_type": "markdown", "id": "49a0cbbe", "metadata": {}, @@ -525,7 +519,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.3" + "version": "3.10.1" } }, "nbformat": 4, diff --git a/cookbook/analyze_document.ipynb b/cookbook/analyze_document.ipynb index 9bfc43918a6c9..4b872d823a74e 100644 --- a/cookbook/analyze_document.ipynb +++ b/cookbook/analyze_document.ipynb @@ -29,7 +29,7 @@ "outputs": [], "source": [ "from langchain.chains import AnalyzeDocumentChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)" ] diff --git a/cookbook/autogpt/autogpt.ipynb b/cookbook/autogpt/autogpt.ipynb index 4410ac16c2c23..0d4930c4837c7 100644 --- a/cookbook/autogpt/autogpt.ipynb +++ b/cookbook/autogpt/autogpt.ipynb @@ -28,9 +28,9 @@ "outputs": [], "source": [ "from langchain.agents import Tool\n", - "from langchain.tools.file_management.read import ReadFileTool\n", - "from langchain.tools.file_management.write import WriteFileTool\n", - "from langchain.utilities import SerpAPIWrapper\n", + "from langchain_community.tools.file_management.read import ReadFileTool\n", + "from langchain_community.tools.file_management.write import WriteFileTool\n", + "from langchain_community.utilities import SerpAPIWrapper\n", "\n", "search = SerpAPIWrapper()\n", "tools = [\n", @@ -62,8 +62,8 @@ "outputs": [], "source": [ "from langchain.docstore import InMemoryDocstore\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -100,8 +100,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain_experimental.autonomous_agents import AutoGPT" + "from langchain_experimental.autonomous_agents import AutoGPT\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -167,7 +167,7 @@ }, "outputs": [], "source": [ - "from langchain.memory.chat_message_histories import FileChatMessageHistory\n", + "from langchain_community.chat_message_histories import FileChatMessageHistory\n", "\n", "agent = AutoGPT.from_llm_and_tools(\n", " ai_name=\"Tom\",\n", diff --git a/cookbook/autogpt/marathon_times.ipynb b/cookbook/autogpt/marathon_times.ipynb index 42f0607592096..44f2445e640b5 100644 --- a/cookbook/autogpt/marathon_times.ipynb +++ b/cookbook/autogpt/marathon_times.ipynb @@ -39,10 +39,10 @@ "\n", "import nest_asyncio\n", "import pandas as pd\n", - "from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.docstore.document import Document\n", + "from langchain_community.agent_toolkits.pandas.base import create_pandas_dataframe_agent\n", "from langchain_experimental.autonomous_agents import AutoGPT\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Needed synce jupyter runs an async eventloop\n", "nest_asyncio.apply()" @@ -93,8 +93,8 @@ "from typing import Optional\n", "\n", "from langchain.agents import tool\n", - "from langchain.tools.file_management.read import ReadFileTool\n", - "from langchain.tools.file_management.write import WriteFileTool\n", + "from langchain_community.tools.file_management.read import ReadFileTool\n", + "from langchain_community.tools.file_management.write import WriteFileTool\n", "\n", "ROOT_DIR = \"./data/\"\n", "\n", @@ -311,8 +311,8 @@ "# Memory\n", "import faiss\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings_model = OpenAIEmbeddings()\n", "embedding_size = 1536\n", diff --git a/cookbook/baby_agi.ipynb b/cookbook/baby_agi.ipynb index a11c998e44caf..9545632a42fcd 100644 --- a/cookbook/baby_agi.ipynb +++ b/cookbook/baby_agi.ipynb @@ -31,9 +31,8 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", - "from langchain_experimental.autonomous_agents import BabyAGI" + "from langchain_experimental.autonomous_agents import BabyAGI\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings" ] }, { @@ -54,7 +53,7 @@ "outputs": [], "source": [ "from langchain.docstore import InMemoryDocstore\n", - "from langchain.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS" ] }, { diff --git a/cookbook/baby_agi_with_agent.ipynb b/cookbook/baby_agi_with_agent.ipynb index 27fc1ec55a4cd..13476e53196c2 100644 --- a/cookbook/baby_agi_with_agent.ipynb +++ b/cookbook/baby_agi_with_agent.ipynb @@ -28,10 +28,9 @@ "from typing import Optional\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_experimental.autonomous_agents import BabyAGI" + "from langchain_experimental.autonomous_agents import BabyAGI\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings" ] }, { @@ -63,7 +62,7 @@ "%pip install faiss-cpu > /dev/null\n", "%pip install google-search-results > /dev/null\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS" ] }, { @@ -108,8 +107,8 @@ "source": [ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities import SerpAPIWrapper\n", + "from langchain_community.utilities import SerpAPIWrapper\n", + "from langchain_openai import OpenAI\n", "\n", "todo_prompt = PromptTemplate.from_template(\n", " \"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}\"\n", diff --git a/cookbook/camel_role_playing.ipynb b/cookbook/camel_role_playing.ipynb index 158b231c4497d..ab8f44adf99b0 100644 --- a/cookbook/camel_role_playing.ipynb +++ b/cookbook/camel_role_playing.ipynb @@ -36,7 +36,6 @@ "source": [ "from typing import List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.chat import (\n", " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", @@ -46,7 +45,8 @@ " BaseMessage,\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/causal_program_aided_language_model.ipynb b/cookbook/causal_program_aided_language_model.ipynb index 2c2c1f3d83af7..0f1e5fb8c32b4 100644 --- a/cookbook/causal_program_aided_language_model.ipynb +++ b/cookbook/causal_program_aided_language_model.ipynb @@ -47,9 +47,9 @@ "outputs": [], "source": [ "from IPython.display import SVG\n", - "from langchain.llms import OpenAI\n", "from langchain_experimental.cpal.base import CPALChain\n", "from langchain_experimental.pal_chain import PALChain\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0, max_tokens=512)\n", "cpal_chain = CPALChain.from_univariate_prompt(llm=llm, verbose=True)\n", diff --git a/cookbook/code-analysis-deeplake.ipynb b/cookbook/code-analysis-deeplake.ipynb index 506650476639c..67c1ecbe39f3f 100644 --- a/cookbook/code-analysis-deeplake.ipynb +++ b/cookbook/code-analysis-deeplake.ipynb @@ -23,9 +23,9 @@ "metadata": {}, "source": [ "1. Prepare data:\n", - " 1. Upload all python project files using the `langchain.document_loaders.TextLoader`. We will call these files the **documents**.\n", + " 1. Upload all python project files using the `langchain_community.document_loaders.TextLoader`. We will call these files the **documents**.\n", " 2. Split all documents to chunks using the `langchain.text_splitter.CharacterTextSplitter`.\n", - " 3. Embed chunks and upload them into the DeepLake using `langchain.embeddings.openai.OpenAIEmbeddings` and `langchain.vectorstores.DeepLake`\n", + " 3. Embed chunks and upload them into the DeepLake using `langchain.embeddings.openai.OpenAIEmbeddings` and `langchain_community.vectorstores.DeepLake`\n", "2. Question-Answering:\n", " 1. Build a chain from `langchain.chat_models.ChatOpenAI` and `langchain.chains.ConversationalRetrievalChain`\n", " 2. Prepare questions.\n", @@ -166,7 +166,7 @@ } ], "source": [ - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "root_dir = \"../../../../../../libs\"\n", "\n", @@ -657,7 +657,7 @@ } ], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()\n", "embeddings" @@ -706,7 +706,7 @@ { "data": { "text/plain": [ - "" + "" ] }, "execution_count": 15, @@ -715,7 +715,7 @@ } ], "source": [ - "from langchain.vectorstores import DeepLake\n", + "from langchain_community.vectorstores import DeepLake\n", "\n", "username = \"\"\n", "\n", @@ -740,7 +740,7 @@ "metadata": {}, "outputs": [], "source": [ - "# from langchain.vectorstores import DeepLake\n", + "# from langchain_community.vectorstores import DeepLake\n", "\n", "# db = DeepLake.from_documents(\n", "# texts, embeddings, dataset_path=f\"hub://{}/langchain-code\", runtime={\"tensor_db\": True}\n", @@ -834,7 +834,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(\n", " model_name=\"gpt-3.5-turbo-0613\"\n", diff --git a/cookbook/custom_agent_with_plugin_retrieval.ipynb b/cookbook/custom_agent_with_plugin_retrieval.ipynb index 9b081065fd9f8..9131599da0fab 100644 --- a/cookbook/custom_agent_with_plugin_retrieval.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval.ipynb @@ -40,12 +40,12 @@ " AgentOutputParser,\n", " LLMSingleActionAgent,\n", ")\n", - "from langchain.agents.agent_toolkits import NLAToolkit\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", - "from langchain.tools.plugin import AIPlugin" + "from langchain_community.agent_toolkits import NLAToolkit\n", + "from langchain_community.tools.plugin import AIPlugin\n", + "from langchain_openai import OpenAI" ] }, { @@ -114,9 +114,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb index 2937337d0da0e..30fc61712da6b 100644 --- a/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb +++ b/cookbook/custom_agent_with_plugin_retrieval_using_plugnplai.ipynb @@ -65,12 +65,12 @@ " AgentOutputParser,\n", " LLMSingleActionAgent,\n", ")\n", - "from langchain.agents.agent_toolkits import NLAToolkit\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", - "from langchain.tools.plugin import AIPlugin" + "from langchain_community.agent_toolkits import NLAToolkit\n", + "from langchain_community.tools.plugin import AIPlugin\n", + "from langchain_openai import OpenAI" ] }, { @@ -138,9 +138,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { diff --git a/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb b/cookbook/custom_agent_with_tool_retrieval.ipynb similarity index 97% rename from docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb rename to cookbook/custom_agent_with_tool_retrieval.ipynb index dd7d041a07b50..7981a13716ba0 100644 --- a/docs/docs/modules/agents/how_to/custom_agent_with_tool_retrieval.ipynb +++ b/cookbook/custom_agent_with_tool_retrieval.ipynb @@ -7,8 +7,6 @@ "source": [ "# Custom agent with tool retrieval\n", "\n", - "This notebook builds off of [this notebook](/docs/modules/agents/how_to/custom_llm_agent) and assumes familiarity with how agents work.\n", - "\n", "The novel idea introduced in this notebook is the idea of using retrieval to select the set of tools to use to answer an agent query. This is useful when you have many many tools to select from. You cannot put the description of all the tools in the prompt (because of context length issues) so instead you dynamically select the N tools you do want to consider using at run time.\n", "\n", "In this notebook we will create a somewhat contrived example. We will have one legitimate tool (search) and then 99 fake tools which are just nonsense. We will then add a step in the prompt template that takes the user input and retrieves tool relevant to the query." @@ -41,10 +39,10 @@ " Tool,\n", ")\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", - "from langchain.utilities import SerpAPIWrapper" + "from langchain_community.utilities import SerpAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { @@ -105,9 +103,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.schema import Document\n", - "from langchain.vectorstores import FAISS" + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -489,7 +487,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.3" + "version": "3.10.1" }, "vscode": { "interpreter": { diff --git a/docs/docs/modules/agents/how_to/custom_multi_action_agent.ipynb b/cookbook/custom_multi_action_agent.ipynb similarity index 98% rename from docs/docs/modules/agents/how_to/custom_multi_action_agent.ipynb rename to cookbook/custom_multi_action_agent.ipynb index bfe084a280eab..271c4c0d81610 100644 --- a/docs/docs/modules/agents/how_to/custom_multi_action_agent.ipynb +++ b/cookbook/custom_multi_action_agent.ipynb @@ -26,7 +26,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentExecutor, BaseMultiActionAgent, Tool\n", - "from langchain.utilities import SerpAPIWrapper" + "from langchain_community.utilities import SerpAPIWrapper" ] }, { diff --git a/cookbook/databricks_sql_db.ipynb b/cookbook/databricks_sql_db.ipynb index c37794143b887..08faf009653d4 100644 --- a/cookbook/databricks_sql_db.ipynb +++ b/cookbook/databricks_sql_db.ipynb @@ -80,7 +80,7 @@ "outputs": [], "source": [ "# Connecting to Databricks with SQLDatabase wrapper\n", - "from langchain.utilities import SQLDatabase\n", + "from langchain_community.utilities import SQLDatabase\n", "\n", "db = SQLDatabase.from_databricks(catalog=\"samples\", schema=\"nyctaxi\")" ] @@ -93,7 +93,7 @@ "outputs": [], "source": [ "# Creating a OpenAI Chat LLM wrapper\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(temperature=0, model_name=\"gpt-4\")" ] @@ -115,7 +115,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import SQLDatabaseChain\n", + "from langchain_community.utilities import SQLDatabaseChain\n", "\n", "db_chain = SQLDatabaseChain.from_llm(llm, db, verbose=True)" ] @@ -177,7 +177,7 @@ "outputs": [], "source": [ "from langchain.agents import create_sql_agent\n", - "from langchain.agents.agent_toolkits import SQLDatabaseToolkit\n", + "from langchain_community.agent_toolkits import SQLDatabaseToolkit\n", "\n", "toolkit = SQLDatabaseToolkit(db=db, llm=llm)\n", "agent = create_sql_agent(llm=llm, toolkit=toolkit, verbose=True)" diff --git a/cookbook/deeplake_semantic_search_over_chat.ipynb b/cookbook/deeplake_semantic_search_over_chat.ipynb index aa6eb8c7cc26b..3dd2c920049a7 100644 --- a/cookbook/deeplake_semantic_search_over_chat.ipynb +++ b/cookbook/deeplake_semantic_search_over_chat.ipynb @@ -52,13 +52,12 @@ "import os\n", "\n", "from langchain.chains import RetrievalQA\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.text_splitter import (\n", " CharacterTextSplitter,\n", " RecursiveCharacterTextSplitter,\n", ")\n", - "from langchain.vectorstores import DeepLake\n", + "from langchain_community.vectorstores import DeepLake\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", diff --git a/cookbook/docugami_xml_kg_rag.ipynb b/cookbook/docugami_xml_kg_rag.ipynb index 9e3170179c0ea..a9c8607935e64 100644 --- a/cookbook/docugami_xml_kg_rag.ipynb +++ b/cookbook/docugami_xml_kg_rag.ipynb @@ -470,13 +470,13 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", " SystemMessagePromptTemplate,\n", ")\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -545,11 +545,11 @@ "source": [ "import uuid\n", "\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers.multi_vector import MultiVectorRetriever\n", "from langchain.storage import InMemoryStore\n", - "from langchain.vectorstores.chroma import Chroma\n", + "from langchain_community.vectorstores.chroma import Chroma\n", "from langchain_core.documents import Document\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "\n", "def build_retriever(text_elements, tables, table_summaries):\n", diff --git a/cookbook/elasticsearch_db_qa.ipynb b/cookbook/elasticsearch_db_qa.ipynb index 02a8faa77a450..3a38446a30d75 100644 --- a/cookbook/elasticsearch_db_qa.ipynb +++ b/cookbook/elasticsearch_db_qa.ipynb @@ -39,7 +39,7 @@ "source": [ "from elasticsearch import Elasticsearch\n", "from langchain.chains.elasticsearch_database import ElasticsearchDatabaseChain\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/extraction_openai_tools.ipynb b/cookbook/extraction_openai_tools.ipynb index 23a8c1f2d7b66..dae98315f7fbf 100644 --- a/cookbook/extraction_openai_tools.ipynb +++ b/cookbook/extraction_openai_tools.ipynb @@ -22,8 +22,8 @@ "from typing import List, Optional\n", "\n", "from langchain.chains.openai_tools import create_extraction_chain_pydantic\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain_core.pydantic_v1 import BaseModel" + "from langchain_core.pydantic_v1 import BaseModel\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -153,7 +153,7 @@ "from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n", "from langchain_core.runnables import Runnable\n", "from langchain_core.pydantic_v1 import BaseModel\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.messages import SystemMessage\n", "from langchain_core.language_models import BaseLanguageModel\n", "\n", diff --git a/cookbook/fake_llm.ipynb b/cookbook/fake_llm.ipynb index 016f3e9fcced7..7d6fb84bb1338 100644 --- a/cookbook/fake_llm.ipynb +++ b/cookbook/fake_llm.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms.fake import FakeListLLM" + "from langchain_community.llms.fake import FakeListLLM" ] }, { diff --git a/cookbook/forward_looking_retrieval_augmented_generation.ipynb b/cookbook/forward_looking_retrieval_augmented_generation.ipynb index ff17aac14a8b0..0abfe0bfeff60 100644 --- a/cookbook/forward_looking_retrieval_augmented_generation.ipynb +++ b/cookbook/forward_looking_retrieval_augmented_generation.ipynb @@ -73,10 +73,9 @@ " AsyncCallbackManagerForRetrieverRun,\n", " CallbackManagerForRetrieverRun,\n", ")\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.llms import OpenAI\n", "from langchain.schema import BaseRetriever, Document\n", - "from langchain.utilities import GoogleSerperAPIWrapper" + "from langchain_community.utilities import GoogleSerperAPIWrapper\n", + "from langchain_openai import ChatOpenAI, OpenAI" ] }, { diff --git a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb index 8313966cb41a9..e2e6694405844 100644 --- a/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb +++ b/cookbook/generative_agents_interactive_simulacra_of_human_behavior.ipynb @@ -47,11 +47,10 @@ "from datetime import datetime, timedelta\n", "from typing import List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.docstore import InMemoryDocstore\n", - "from langchain.embeddings import OpenAIEmbeddings\n", "from langchain.retrievers import TimeWeightedVectorStoreRetriever\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "from termcolor import colored" ] }, diff --git a/cookbook/hugginggpt.ipynb b/cookbook/hugginggpt.ipynb index 41fe127f0bc52..751948e88d33a 100644 --- a/cookbook/hugginggpt.ipynb +++ b/cookbook/hugginggpt.ipynb @@ -75,8 +75,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain_experimental.autonomous_agents import HuggingGPT\n", + "from langchain_openai import OpenAI\n", "\n", "# %env OPENAI_API_BASE=http://localhost:8000/v1" ] diff --git a/docs/docs/modules/agents/tools/human_approval.ipynb b/cookbook/human_approval.ipynb similarity index 99% rename from docs/docs/modules/agents/tools/human_approval.ipynb rename to cookbook/human_approval.ipynb index a01b7269cefa8..59e46bbc4ef4a 100644 --- a/docs/docs/modules/agents/tools/human_approval.ipynb +++ b/cookbook/human_approval.ipynb @@ -159,7 +159,7 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", - "from langchain.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/human_input_chat_model.ipynb b/cookbook/human_input_chat_model.ipynb index 35a2f5969d0a2..e2ecbfc951f14 100644 --- a/cookbook/human_input_chat_model.ipynb +++ b/cookbook/human_input_chat_model.ipynb @@ -20,7 +20,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models.human import HumanInputChatModel" + "from langchain_community.chat_models.human import HumanInputChatModel" ] }, { diff --git a/cookbook/human_input_llm.ipynb b/cookbook/human_input_llm.ipynb index c06da208d38e4..fa8a877408289 100644 --- a/cookbook/human_input_llm.ipynb +++ b/cookbook/human_input_llm.ipynb @@ -19,7 +19,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms.human import HumanInputLLM" + "from langchain_community.llms.human import HumanInputLLM" ] }, { diff --git a/cookbook/hypothetical_document_embeddings.ipynb b/cookbook/hypothetical_document_embeddings.ipynb index d815aa9c443b9..58cde25fe9cba 100644 --- a/cookbook/hypothetical_document_embeddings.ipynb +++ b/cookbook/hypothetical_document_embeddings.ipynb @@ -21,9 +21,8 @@ "outputs": [], "source": [ "from langchain.chains import HypotheticalDocumentEmbedder, LLMChain\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings" ] }, { @@ -172,7 +171,7 @@ "outputs": [], "source": [ "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.vectorstores import Chroma\n", "\n", "with open(\"../../state_of_the_union.txt\") as f:\n", " state_of_the_union = f.read()\n", diff --git a/cookbook/learned_prompt_optimization.ipynb b/cookbook/learned_prompt_optimization.ipynb index eded9d6804d8a..b7894d4482caa 100644 --- a/cookbook/learned_prompt_optimization.ipynb +++ b/cookbook/learned_prompt_optimization.ipynb @@ -49,7 +49,7 @@ "source": [ "# pick and configure the LLM of your choice\n", "\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")" ] diff --git a/cookbook/llm_bash.ipynb b/cookbook/llm_bash.ipynb index c247908acf5e9..61a56f17836f6 100644 --- a/cookbook/llm_bash.ipynb +++ b/cookbook/llm_bash.ipynb @@ -43,8 +43,8 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", "from langchain_experimental.llm_bash.base import LLMBashChain\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "\n", diff --git a/cookbook/llm_checker.ipynb b/cookbook/llm_checker.ipynb index eea872bf719dc..4c128fdc2afab 100644 --- a/cookbook/llm_checker.ipynb +++ b/cookbook/llm_checker.ipynb @@ -42,7 +42,7 @@ ], "source": [ "from langchain.chains import LLMCheckerChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0.7)\n", "\n", diff --git a/cookbook/llm_math.ipynb b/cookbook/llm_math.ipynb index 0e2079b95589a..6260be2f0351b 100644 --- a/cookbook/llm_math.ipynb +++ b/cookbook/llm_math.ipynb @@ -46,7 +46,7 @@ ], "source": [ "from langchain.chains import LLMMathChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "llm_math = LLMMathChain.from_llm(llm, verbose=True)\n", diff --git a/cookbook/llm_summarization_checker.ipynb b/cookbook/llm_summarization_checker.ipynb index f4679f2463d5e..ed3f1087164a8 100644 --- a/cookbook/llm_summarization_checker.ipynb +++ b/cookbook/llm_summarization_checker.ipynb @@ -331,7 +331,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=2)\n", @@ -822,7 +822,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, verbose=True, max_checks=3)\n", @@ -1096,7 +1096,7 @@ ], "source": [ "from langchain.chains import LLMSummarizationCheckerChain\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "checker_chain = LLMSummarizationCheckerChain.from_llm(llm, max_checks=3, verbose=True)\n", diff --git a/cookbook/llm_symbolic_math.ipynb b/cookbook/llm_symbolic_math.ipynb index bcd500b76c85c..69ccbaf072acf 100644 --- a/cookbook/llm_symbolic_math.ipynb +++ b/cookbook/llm_symbolic_math.ipynb @@ -14,8 +14,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", "from langchain_experimental.llm_symbolic_math.base import LLMSymbolicMathChain\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "llm_symbolic_math = LLMSymbolicMathChain.from_llm(llm)" diff --git a/cookbook/meta_prompt.ipynb b/cookbook/meta_prompt.ipynb index 2339907a269f4..746d3a42032c0 100644 --- a/cookbook/meta_prompt.ipynb +++ b/cookbook/meta_prompt.ipynb @@ -57,9 +57,9 @@ "outputs": [], "source": [ "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferWindowMemory\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/multi_modal_QA.ipynb b/cookbook/multi_modal_QA.ipynb index 2c52034e2c264..160b721116efc 100644 --- a/cookbook/multi_modal_QA.ipynb +++ b/cookbook/multi_modal_QA.ipynb @@ -91,8 +91,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain_core.messages import HumanMessage, SystemMessage" + "from langchain_core.messages import HumanMessage, SystemMessage\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/multi_modal_RAG_chroma.ipynb b/cookbook/multi_modal_RAG_chroma.ipynb index 7df53b8ff9e81..0af89590bf673 100644 --- a/cookbook/multi_modal_RAG_chroma.ipynb +++ b/cookbook/multi_modal_RAG_chroma.ipynb @@ -187,7 +187,7 @@ "\n", "import chromadb\n", "import numpy as np\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.vectorstores import Chroma\n", "from langchain_experimental.open_clip import OpenCLIPEmbeddings\n", "from PIL import Image as _PILImage\n", "\n", @@ -315,10 +315,10 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain_core.messages import HumanMessage, SystemMessage\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "def prompt_func(data_dict):\n", diff --git a/cookbook/multi_modal_output_agent.ipynb b/cookbook/multi_modal_output_agent.ipynb index 71f39e31a74e3..e5929ead11c0c 100644 --- a/cookbook/multi_modal_output_agent.ipynb +++ b/cookbook/multi_modal_output_agent.ipynb @@ -43,8 +43,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI\n", - "from langchain.tools import SteamshipImageGenerationTool" + "from langchain.tools import SteamshipImageGenerationTool\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/multi_player_dnd.ipynb b/cookbook/multi_player_dnd.ipynb index 3a9c3e4b01221..05c4d45914678 100644 --- a/cookbook/multi_player_dnd.ipynb +++ b/cookbook/multi_player_dnd.ipynb @@ -28,11 +28,11 @@ "source": [ "from typing import Callable, List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/multiagent_authoritarian.ipynb b/cookbook/multiagent_authoritarian.ipynb index 18b2fcb7815e3..893b35f7c7868 100644 --- a/cookbook/multiagent_authoritarian.ipynb +++ b/cookbook/multiagent_authoritarian.ipynb @@ -33,7 +33,6 @@ "from typing import Callable, List\n", "\n", "import tenacity\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import RegexParser\n", "from langchain.prompts import (\n", " PromptTemplate,\n", @@ -41,7 +40,8 @@ "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/multiagent_bidding.ipynb b/cookbook/multiagent_bidding.ipynb index 7ee0d7321ee90..fbb9f03f53d1f 100644 --- a/cookbook/multiagent_bidding.ipynb +++ b/cookbook/multiagent_bidding.ipynb @@ -27,13 +27,13 @@ "from typing import Callable, List\n", "\n", "import tenacity\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import RegexParser\n", "from langchain.prompts import PromptTemplate\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/myscale_vector_sql.ipynb b/cookbook/myscale_vector_sql.ipynb index af50a5a154aed..d26ac19d7350c 100644 --- a/cookbook/myscale_vector_sql.ipynb +++ b/cookbook/myscale_vector_sql.ipynb @@ -31,10 +31,10 @@ "from os import environ\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.utilities import SQLDatabase\n", + "from langchain_community.utilities import SQLDatabase\n", "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", + "from langchain_openai import OpenAI\n", "from sqlalchemy import MetaData, create_engine\n", "\n", "MYSCALE_HOST = \"msc-4a9e710a.us-east-1.aws.staging.myscale.cloud\"\n", @@ -57,7 +57,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceInstructEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceInstructEmbeddings\n", "from langchain_experimental.sql.vector_sql import VectorSQLOutputParser\n", "\n", "output_parser = VectorSQLOutputParser.from_embeddings(\n", @@ -75,10 +75,10 @@ "outputs": [], "source": [ "from langchain.callbacks import StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities.sql_database import SQLDatabase\n", + "from langchain_community.utilities.sql_database import SQLDatabase\n", "from langchain_experimental.sql.prompt import MYSCALE_PROMPT\n", "from langchain_experimental.sql.vector_sql import VectorSQLDatabaseChain\n", + "from langchain_openai import OpenAI\n", "\n", "chain = VectorSQLDatabaseChain(\n", " llm_chain=LLMChain(\n", @@ -117,7 +117,6 @@ "outputs": [], "source": [ "from langchain.chains.qa_with_sources.retrieval import RetrievalQAWithSourcesChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain_experimental.retrievers.vector_sql_database import (\n", " VectorSQLDatabaseChainRetriever,\n", ")\n", @@ -126,6 +125,7 @@ " VectorSQLDatabaseChain,\n", " VectorSQLRetrieveAllOutputParser,\n", ")\n", + "from langchain_openai import ChatOpenAI\n", "\n", "output_parser_retrieve_all = VectorSQLRetrieveAllOutputParser.from_embeddings(\n", " output_parser.model\n", diff --git a/cookbook/openai_functions_retrieval_qa.ipynb b/cookbook/openai_functions_retrieval_qa.ipynb index f5bce419330ab..648b28b5e2c17 100644 --- a/cookbook/openai_functions_retrieval_qa.ipynb +++ b/cookbook/openai_functions_retrieval_qa.ipynb @@ -20,10 +20,10 @@ "outputs": [], "source": [ "from langchain.chains import RetrievalQA\n", - "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma" + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAIEmbeddings" ] }, { @@ -52,8 +52,8 @@ "source": [ "from langchain.chains import create_qa_with_sources_chain\n", "from langchain.chains.combine_documents.stuff import StuffDocumentsChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/openai_v1_cookbook.ipynb b/cookbook/openai_v1_cookbook.ipynb index a8bcc2cf541fa..298c6c8aa3650 100644 --- a/cookbook/openai_v1_cookbook.ipynb +++ b/cookbook/openai_v1_cookbook.ipynb @@ -28,8 +28,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain_core.messages import HumanMessage, SystemMessage" + "from langchain_core.messages import HumanMessage, SystemMessage\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -414,7 +414,7 @@ "BREAKING CHANGES:\n", "- To use Azure embeddings with OpenAI V1, you'll need to use the new `AzureOpenAIEmbeddings` instead of the existing `OpenAIEmbeddings`. `OpenAIEmbeddings` continue to work when using Azure with `openai<1`.\n", "```python\n", - "from langchain.embeddings import AzureOpenAIEmbeddings\n", + "from langchain_openai import AzureOpenAIEmbeddings\n", "```\n", "\n", "\n", @@ -456,8 +456,8 @@ "from typing import Literal\n", "\n", "from langchain.output_parsers.openai_tools import PydanticToolsParser\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.utils.openai_functions import convert_pydantic_to_openai_tool\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.pydantic_v1 import BaseModel, Field\n", "\n", "\n", diff --git a/cookbook/petting_zoo.ipynb b/cookbook/petting_zoo.ipynb index bfb8c1a6e90e5..c0db7653b0919 100644 --- a/cookbook/petting_zoo.ipynb +++ b/cookbook/petting_zoo.ipynb @@ -47,12 +47,12 @@ "import inspect\n", "\n", "import tenacity\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.output_parsers import RegexParser\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/plan_and_execute_agent.ipynb b/cookbook/plan_and_execute_agent.ipynb index e37ee550be1ed..d710514658c21 100644 --- a/cookbook/plan_and_execute_agent.ipynb +++ b/cookbook/plan_and_execute_agent.ipynb @@ -30,15 +30,14 @@ "outputs": [], "source": [ "from langchain.chains import LLMMathChain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.llms import OpenAI\n", - "from langchain.utilities import DuckDuckGoSearchAPIWrapper\n", + "from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n", "from langchain_core.tools import Tool\n", "from langchain_experimental.plan_and_execute import (\n", " PlanAndExecute,\n", " load_agent_executor,\n", " load_chat_planner,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI, OpenAI" ] }, { diff --git a/cookbook/press_releases.ipynb b/cookbook/press_releases.ipynb index beb89f10c4a4a..30aba0a68db0d 100644 --- a/cookbook/press_releases.ipynb +++ b/cookbook/press_releases.ipynb @@ -81,8 +81,8 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.retrievers import KayAiRetriever\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo\")\n", "retriever = KayAiRetriever.create(\n", diff --git a/cookbook/program_aided_language_model.ipynb b/cookbook/program_aided_language_model.ipynb index dba6c5eef592f..17320ab8c0583 100644 --- a/cookbook/program_aided_language_model.ipynb +++ b/cookbook/program_aided_language_model.ipynb @@ -17,8 +17,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", - "from langchain_experimental.pal_chain import PALChain" + "from langchain_experimental.pal_chain import PALChain\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/qa_citations.ipynb b/cookbook/qa_citations.ipynb index 06754692ddd8f..a8dbd1c61330a 100644 --- a/cookbook/qa_citations.ipynb +++ b/cookbook/qa_citations.ipynb @@ -27,7 +27,7 @@ ], "source": [ "from langchain.chains import create_citation_fuzzy_match_chain\n", - "from langchain.chat_models import ChatOpenAI" + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb b/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb index f0d1822b1e741..082c12eacfb21 100644 --- a/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb +++ b/cookbook/qianfan_baidu_elasticesearch_RAG.ipynb @@ -59,11 +59,13 @@ "from baidubce.auth.bce_credentials import BceCredentials\n", "from baidubce.bce_client_configuration import BceClientConfiguration\n", "from langchain.chains.retrieval_qa import RetrievalQA\n", - "from langchain.document_loaders.baiducloud_bos_directory import BaiduBOSDirectoryLoader\n", - "from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n", - "from langchain.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import BESVectorStore" + "from langchain_community.document_loaders.baiducloud_bos_directory import (\n", + " BaiduBOSDirectoryLoader,\n", + ")\n", + "from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings\n", + "from langchain_community.llms.baidu_qianfan_endpoint import QianfanLLMEndpoint\n", + "from langchain_community.vectorstores import BESVectorStore" ] }, { diff --git a/cookbook/rag_fusion.ipynb b/cookbook/rag_fusion.ipynb index f7823fb18cb5d..976e8cfab41cb 100644 --- a/cookbook/rag_fusion.ipynb +++ b/cookbook/rag_fusion.ipynb @@ -30,8 +30,8 @@ "outputs": [], "source": [ "import pinecone\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import Pinecone\n", + "from langchain_community.vectorstores import Pinecone\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "pinecone.init(api_key=\"...\", environment=\"...\")" ] @@ -86,8 +86,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/retrieval_in_sql.ipynb b/cookbook/retrieval_in_sql.ipynb index 1a4c27a689d92..998e9aa8dd686 100644 --- a/cookbook/retrieval_in_sql.ipynb +++ b/cookbook/retrieval_in_sql.ipynb @@ -42,8 +42,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.sql_database import SQLDatabase\n", + "from langchain_openai import ChatOpenAI\n", "\n", "CONNECTION_STRING = \"postgresql+psycopg2://postgres:test@localhost:5432/vectordb\" # Replace with your own\n", "db = SQLDatabase.from_uri(CONNECTION_STRING)" @@ -88,7 +88,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings_model = OpenAIEmbeddings()" ] @@ -219,7 +219,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "template = \"\"\"You are a Postgres expert. Given an input question, first create a syntactically correct Postgres query to run, then look at the results of the query and return the answer to the input question.\n", "Unless the user specifies in the question a specific number of examples to obtain, query for at most 5 results using the LIMIT clause as per Postgres. You can order the results to return the most informative data in the database.\n", @@ -267,9 +267,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "db = SQLDatabase.from_uri(\n", " CONNECTION_STRING\n", diff --git a/cookbook/rewrite.ipynb b/cookbook/rewrite.ipynb index 4faf2babcc54d..270d7d964edd5 100644 --- a/cookbook/rewrite.ipynb +++ b/cookbook/rewrite.ipynb @@ -31,11 +31,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.utilities import DuckDuckGoSearchAPIWrapper\n", + "from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnablePassthrough" + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/sales_agent_with_context.ipynb b/cookbook/sales_agent_with_context.ipynb index f402ecb0b92fb..158329a5f09e6 100644 --- a/cookbook/sales_agent_with_context.ipynb +++ b/cookbook/sales_agent_with_context.ipynb @@ -49,14 +49,13 @@ "from langchain.agents.conversational.prompt import FORMAT_INSTRUCTIONS\n", "from langchain.chains import LLMChain, RetrievalQA\n", "from langchain.chains.base import Chain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import BaseLLM, OpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain.prompts.base import StringPromptTemplate\n", "from langchain.schema import AgentAction, AgentFinish\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.llms import BaseLLM\n", + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import ChatOpenAI, OpenAI, OpenAIEmbeddings\n", "from pydantic import BaseModel, Field" ] }, diff --git a/cookbook/selecting_llms_based_on_context_length.ipynb b/cookbook/selecting_llms_based_on_context_length.ipynb index 58976419b5304..d4e22100a9306 100644 --- a/cookbook/selecting_llms_based_on_context_length.ipynb +++ b/cookbook/selecting_llms_based_on_context_length.ipynb @@ -17,10 +17,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.prompt_values import PromptValue" + "from langchain_core.prompt_values import PromptValue\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/self_query_hotel_search.ipynb b/cookbook/self_query_hotel_search.ipynb index 5a84a02f999fe..d38192c5a2cb3 100644 --- a/cookbook/self_query_hotel_search.ipynb +++ b/cookbook/self_query_hotel_search.ipynb @@ -255,7 +255,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model=\"gpt-4\")\n", "res = model.predict(\n", @@ -1083,8 +1083,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import ElasticsearchStore\n", + "from langchain_community.vectorstores import ElasticsearchStore\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "embeddings = OpenAIEmbeddings()" ] diff --git a/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb b/cookbook/sharedmemory_for_tools.ipynb similarity index 99% rename from docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb rename to cookbook/sharedmemory_for_tools.ipynb index 7c1e5df5d5553..3b8efc7359085 100644 --- a/docs/docs/modules/agents/how_to/sharedmemory_for_tools.ipynb +++ b/cookbook/sharedmemory_for_tools.ipynb @@ -24,10 +24,10 @@ "source": [ "from langchain.agents import AgentExecutor, Tool, ZeroShotAgent\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n", "from langchain.prompts import PromptTemplate\n", - "from langchain.utilities import GoogleSearchAPIWrapper" + "from langchain_community.utilities import GoogleSearchAPIWrapper\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/cookbook/smart_llm.ipynb b/cookbook/smart_llm.ipynb index b7146daaae526..0e617617e35c3 100644 --- a/cookbook/smart_llm.ipynb +++ b/cookbook/smart_llm.ipynb @@ -51,9 +51,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_experimental.smart_llm import SmartLLMChain" + "from langchain_experimental.smart_llm import SmartLLMChain\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/sql_db_qa.mdx b/cookbook/sql_db_qa.mdx index 39e0a8209bfa7..73cdd953f3efd 100644 --- a/cookbook/sql_db_qa.mdx +++ b/cookbook/sql_db_qa.mdx @@ -9,8 +9,8 @@ To set it up, follow the instructions on https://database.guide/2-sample-databas ```python -from langchain.llms import OpenAI -from langchain.utilities import SQLDatabase +from langchain_openai import OpenAI +from langchain_community.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain ``` @@ -200,8 +200,8 @@ result["intermediate_steps"] How to add memory to a SQLDatabaseChain: ```python -from langchain.llms import OpenAI -from langchain.utilities import SQLDatabase +from langchain_openai import OpenAI +from langchain_community.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain ``` @@ -647,7 +647,7 @@ Sometimes you may not have the luxury of using OpenAI or other service-hosted la import logging import torch from transformers import AutoTokenizer, GPT2TokenizerFast, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM -from langchain.llms import HuggingFacePipeline +from langchain_community.llms import HuggingFacePipeline # Note: This model requires a large GPU, e.g. an 80GB A100. See documentation for other ways to run private non-OpenAI models. model_id = "google/flan-ul2" @@ -679,7 +679,7 @@ local_llm = HuggingFacePipeline(pipeline=pipe) ```python -from langchain.utilities import SQLDatabase +from langchain_community.utilities import SQLDatabase from langchain_experimental.sql import SQLDatabaseChain db = SQLDatabase.from_uri("sqlite:///../../../../notebooks/Chinook.db", include_tables=['Customer']) @@ -994,9 +994,9 @@ Now that you have some examples (with manually corrected output SQL), you can do ```python from langchain.prompts import FewShotPromptTemplate, PromptTemplate from langchain.chains.sql_database.prompt import _sqlite_prompt, PROMPT_SUFFIX -from langchain.embeddings.huggingface import HuggingFaceEmbeddings +from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings from langchain.prompts.example_selector.semantic_similarity import SemanticSimilarityExampleSelector -from langchain.vectorstores import Chroma +from langchain_community.vectorstores import Chroma example_prompt = PromptTemplate( input_variables=["table_info", "input", "sql_cmd", "sql_result", "answer"], diff --git a/cookbook/stepback-qa.ipynb b/cookbook/stepback-qa.ipynb index 920d0001a2b2b..6827b04da738f 100644 --- a/cookbook/stepback-qa.ipynb +++ b/cookbook/stepback-qa.ipynb @@ -23,10 +23,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnableLambda" + "from langchain_core.prompts import ChatPromptTemplate, FewShotChatMessagePromptTemplate\n", + "from langchain_core.runnables import RunnableLambda\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -129,7 +129,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import DuckDuckGoSearchAPIWrapper\n", + "from langchain_community.utilities import DuckDuckGoSearchAPIWrapper\n", "\n", "search = DuckDuckGoSearchAPIWrapper(max_results=4)\n", "\n", diff --git a/cookbook/tree_of_thought.ipynb b/cookbook/tree_of_thought.ipynb index 7ca32eff7c2ca..63ff323ec6077 100644 --- a/cookbook/tree_of_thought.ipynb +++ b/cookbook/tree_of_thought.ipynb @@ -24,7 +24,7 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=1, max_tokens=512, model=\"gpt-3.5-turbo-instruct\")" ] diff --git a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb index 0e1998a93cdef..4f540fa5abdf9 100644 --- a/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb +++ b/cookbook/twitter-the-algorithm-analysis-deeplake.ipynb @@ -37,8 +37,8 @@ "import getpass\n", "import os\n", "\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.vectorstores import DeepLake\n", + "from langchain_community.vectorstores import DeepLake\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "os.environ[\"OPENAI_API_KEY\"] = getpass.getpass(\"OpenAI API Key:\")\n", "activeloop_token = getpass.getpass(\"Activeloop Token:\")\n", @@ -110,7 +110,7 @@ "source": [ "import os\n", "\n", - "from langchain.document_loaders import TextLoader\n", + "from langchain_community.document_loaders import TextLoader\n", "\n", "root_dir = \"./the-algorithm\"\n", "docs = []\n", @@ -3809,7 +3809,7 @@ "outputs": [], "source": [ "from langchain.chains import ConversationalRetrievalChain\n", - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(model_name=\"gpt-3.5-turbo-0613\") # switch to 'gpt-4'\n", "qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)" diff --git a/cookbook/two_agent_debate_tools.ipynb b/cookbook/two_agent_debate_tools.ipynb index 808053733ede2..b31e769dee17f 100644 --- a/cookbook/two_agent_debate_tools.ipynb +++ b/cookbook/two_agent_debate_tools.ipynb @@ -24,13 +24,13 @@ "source": [ "from typing import Callable, List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationBufferMemory\n", "from langchain.schema import (\n", " AIMessage,\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/two_player_dnd.ipynb b/cookbook/two_player_dnd.ipynb index 627a683e1f81d..d90e4f9365fe7 100644 --- a/cookbook/two_player_dnd.ipynb +++ b/cookbook/two_player_dnd.ipynb @@ -24,11 +24,11 @@ "source": [ "from typing import Callable, List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", - ")" + ")\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/cookbook/wikibase_agent.ipynb b/cookbook/wikibase_agent.ipynb index b44df0c1bc0a3..692193b0229df 100644 --- a/cookbook/wikibase_agent.ipynb +++ b/cookbook/wikibase_agent.ipynb @@ -599,7 +599,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", + "from langchain_openai import ChatOpenAI\n", "\n", "llm = ChatOpenAI(model_name=\"gpt-4\", temperature=0)" ] diff --git a/docs/.local_build.sh b/docs/.local_build.sh index 77b604ededcb7..72e15dec90a4a 100755 --- a/docs/.local_build.sh +++ b/docs/.local_build.sh @@ -20,4 +20,4 @@ wget https://raw.githubusercontent.com/langchain-ai/langserve/main/README.md -O yarn -quarto preview docs +poetry run quarto preview docs diff --git a/docs/docs/_templates/integration.mdx b/docs/docs/_templates/integration.mdx index addb12042b992..234c8cc09eefa 100644 --- a/docs/docs/_templates/integration.mdx +++ b/docs/docs/_templates/integration.mdx @@ -32,7 +32,7 @@ There isn't any special setup for it. See a [usage example](/docs/integrations/llms/INCLUDE_REAL_NAME). ```python -from langchain.llms import integration_class_REPLACE_ME +from langchain_community.llms import integration_class_REPLACE_ME ``` ## Text Embedding Models @@ -40,7 +40,7 @@ from langchain.llms import integration_class_REPLACE_ME See a [usage example](/docs/integrations/text_embedding/INCLUDE_REAL_NAME) ```python -from langchain.embeddings import integration_class_REPLACE_ME +from langchain_community.embeddings import integration_class_REPLACE_ME ``` ## Chat models @@ -48,7 +48,7 @@ from langchain.embeddings import integration_class_REPLACE_ME See a [usage example](/docs/integrations/chat/INCLUDE_REAL_NAME) ```python -from langchain.chat_models import integration_class_REPLACE_ME +from langchain_community.chat_models import integration_class_REPLACE_ME ``` ## Document Loader @@ -56,5 +56,5 @@ from langchain.chat_models import integration_class_REPLACE_ME See a [usage example](/docs/integrations/document_loaders/INCLUDE_REAL_NAME). ```python -from langchain.document_loaders import integration_class_REPLACE_ME +from langchain_community.document_loaders import integration_class_REPLACE_ME ``` diff --git a/docs/docs/changelog/core.mdx b/docs/docs/changelog/core.mdx new file mode 100644 index 0000000000000..9c43d501fcbaf --- /dev/null +++ b/docs/docs/changelog/core.mdx @@ -0,0 +1,27 @@ +# langchain-core + +## 0.1.7 (Jan 5, 2024) + +#### Deleted + +No deletions. + +#### Deprecated + +- `BaseChatModel` methods `__call__`, `call_as_llm`, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.invoke` instead. +- `BaseChatModel` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseChatModel.ainvoke` instead. +- `BaseLLM` methods `__call__, `predict`, `predict_messages`. Will be removed in 0.2.0. Use `BaseLLM.invoke` instead. +- `BaseLLM` methods `apredict`, `apredict_messages`. Will be removed in 0.2.0. Use `BaseLLM.ainvoke` instead. + +#### Fixed + +- Restrict recursive URL scraping: [#15559](https://github.com/langchain-ai/langchain/pull/15559) + +#### Added + +No additions. + +#### Beta + +- Marked `langchain_core.load.load` and `langchain_core.load.loads` as beta. +- Marked `langchain_core.beta.runnables.context.ContextGet` and `langchain_core.beta.runnables.context.ContextSet` as beta. diff --git a/docs/docs/changelog/langchain.mdx b/docs/docs/changelog/langchain.mdx new file mode 100644 index 0000000000000..bffcce729a953 --- /dev/null +++ b/docs/docs/changelog/langchain.mdx @@ -0,0 +1,36 @@ +# langchain + +## 0.1.0 (Jan 5, 2024) + +#### Deleted + +No deletions. + +#### Deprecated + +Deprecated classes and methods will be removed in 0.2.0 + +| Deprecated | Alternative | Reason | +|---------------------------------|-----------------------------------|------------------------------------------------| +| ChatVectorDBChain | ConversationalRetrievalChain | More general to all retrievers | +| create_ernie_fn_chain | create_ernie_fn_runnable | Use LCEL under the hood | +| created_structured_output_chain | create_structured_output_runnable | Use LCEL under the hood | +| NatBotChain | | Not used | +| create_openai_fn_chain | create_openai_fn_runnable | Use LCEL under the hood | +| create_structured_output_chain | create_structured_output_runnable | Use LCEL under the hood | +| load_query_constructor_chain | load_query_constructor_runnable | Use LCEL under the hood | +| VectorDBQA | RetrievalQA | More general to all retrievers | +| Sequential Chain | LCEL | Obviated by LCEL | +| SimpleSequentialChain | LCEL | Obviated by LCEL | +| TransformChain | LCEL/RunnableLambda | Obviated by LCEL | +| create_tagging_chain | create_structured_output_runnable | Use LCEL under the hood | +| ChatAgent | create_react_agent | Use LCEL builder over a class | +| ConversationalAgent | create_react_agent | Use LCEL builder over a class | +| ConversationalChatAgent | create_json_chat_agent | Use LCEL builder over a class | +| initialize_agent | Individual create agent methods | Individual create agent methods are more clear | +| ZeroShotAgent | create_react_agent | Use LCEL builder over a class | +| OpenAIFunctionsAgent | create_openai_functions_agent | Use LCEL builder over a class | +| OpenAIMultiFunctionsAgent | create_openai_tools_agent | Use LCEL builder over a class | +| SelfAskWithSearchAgent | create_self_ask_with_search | Use LCEL builder over a class | +| StructuredChatAgent | create_structured_chat_agent | Use LCEL builder over a class | +| XMLAgent | create_xml_agent | Use LCEL builder over a class | \ No newline at end of file diff --git a/docs/docs/community.md b/docs/docs/community.md deleted file mode 100644 index 81749dffafde7..0000000000000 --- a/docs/docs/community.md +++ /dev/null @@ -1,53 +0,0 @@ -# Community navigator - -Hi! Thanks for being here. We’re lucky to have a community of so many passionate developers building with LangChain–we have so much to teach and learn from each other. Community members contribute code, host meetups, write blog posts, amplify each other’s work, become each other's customers and collaborators, and so much more. - -Whether you’re new to LangChain, looking to go deeper, or just want to get more exposure to the world of building with LLMs, this page can point you in the right direction. - -- **🦜 Contribute to LangChain** - -- **🌍 Meetups, Events, and Hackathons** - -- **📣 Help Us Amplify Your Work** - -- **💬 Stay in the loop** - - -# 🦜 Contribute to LangChain - -LangChain is the product of over 5,000+ contributions by 1,500+ contributors, and there is ******still****** so much to do together. Here are some ways to get involved: - -- **[Open a pull request](https://github.com/langchain-ai/langchain/issues):** We’d appreciate all forms of contributions–new features, infrastructure improvements, better documentation, bug fixes, etc. If you have an improvement or an idea, we’d love to work on it with you. -- **[Read our contributor guidelines:](./contributing/)** We ask contributors to follow a ["fork and pull request"](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow, run a few local checks for formatting, linting, and testing before submitting, and follow certain documentation and testing conventions. - - **First time contributor?** [Try one of these PRs with the “good first issue” tag](https://github.com/langchain-ai/langchain/contribute). -- **Become an expert:** Our experts help the community by answering product questions in Discord. If that’s a role you’d like to play, we’d be so grateful! (And we have some special experts-only goodies/perks we can tell you more about). Send us an email to introduce yourself at hello@langchain.dev and we’ll take it from there! -- **Integrate with LangChain:** If your product integrates with LangChain–or aspires to–we want to help make sure the experience is as smooth as possible for you and end users. Send us an email at hello@langchain.dev and tell us what you’re working on. - - **Become an Integration Maintainer:** Partner with our team to ensure your integration stays up-to-date and talk directly with users (and answer their inquiries) in our Discord. Introduce yourself at hello@langchain.dev if you’d like to explore this role. - - -# 🌍 Meetups, Events, and Hackathons - -One of our favorite things about working in AI is how much enthusiasm there is for building together. We want to help make that as easy and impactful for you as possible! -- **Find a meetup, hackathon, or webinar:** You can find the one for you on our [global events calendar](https://mirror-feeling-d80.notion.site/0bc81da76a184297b86ca8fc782ee9a3?v=0d80342540df465396546976a50cfb3f). - - **Submit an event to our calendar:** Email us at events@langchain.dev with a link to your event page! We can also help you spread the word with our local communities. -- **Host a meetup:** If you want to bring a group of builders together, we want to help! We can publicize your event on our event calendar/Twitter, share it with our local communities in Discord, send swag, or potentially hook you up with a sponsor. Email us at events@langchain.dev to tell us about your event! -- **Become a meetup sponsor:** We often hear from groups of builders that want to get together, but are blocked or limited on some dimension (space to host, budget for snacks, prizes to distribute, etc.). If you’d like to help, send us an email to events@langchain.dev we can share more about how it works! -- **Speak at an event:** Meetup hosts are always looking for great speakers, presenters, and panelists. If you’d like to do that at an event, send us an email to hello@langchain.dev with more information about yourself, what you want to talk about, and what city you’re based in and we’ll try to match you with an upcoming event! -- **Tell us about your LLM community:** If you host or participate in a community that would welcome support from LangChain and/or our team, send us an email at hello@langchain.dev and let us know how we can help. - -# 📣 Help Us Amplify Your Work - -If you’re working on something you’re proud of, and think the LangChain community would benefit from knowing about it, we want to help you show it off. - -- **Post about your work and mention us:** We love hanging out on Twitter to see what people in the space are talking about and working on. If you tag [@langchainai](https://twitter.com/LangChainAI), we’ll almost certainly see it and can show you some love. -- **Publish something on our blog:** If you’re writing about your experience building with LangChain, we’d love to post (or crosspost) it on our blog! E-mail hello@langchain.dev with a draft of your post! Or even an idea for something you want to write about. -- **Get your product onto our [integrations hub](https://integrations.langchain.com/):** Many developers take advantage of our seamless integrations with other products, and come to our integrations hub to find out who those are. If you want to get your product up there, tell us about it (and how it works with LangChain) at hello@langchain.dev. - -# ☀️ Stay in the loop - -Here’s where our team hangs out, talks shop, spotlights cool work, and shares what we’re up to. We’d love to see you there too. - -- **[Twitter](https://twitter.com/LangChainAI):** We post about what we’re working on and what cool things we’re seeing in the space. If you tag @langchainai in your post, we’ll almost certainly see it, and can show you some love! -- **[Discord](https://discord.gg/6adMQxSpJS):** connect with over 30,000 developers who are building with LangChain. -- **[GitHub](https://github.com/langchain-ai/langchain):** Open pull requests, contribute to a discussion, and/or contribute -- **[Subscribe to our bi-weekly Release Notes](https://6w1pwbss0py.typeform.com/to/KjZB1auB):** a twice/month email roundup of the coolest things going on in our orbit diff --git a/docs/docs/contributing/faq.mdx b/docs/docs/contributing/faq.mdx new file mode 100644 index 0000000000000..e0e81564a4992 --- /dev/null +++ b/docs/docs/contributing/faq.mdx @@ -0,0 +1,26 @@ +--- +sidebar_position: 6 +sidebar_label: FAQ +--- +# Frequently Asked Questions + +## Pull Requests (PRs) + +### How do I allow maintainers to edit my PR? + +When you submit a pull request, there may be additional changes +necessary before merging it. Oftentimes, it is more efficient for the +maintainers to make these changes themselves before merging, rather than asking you +to do so in code review. + +By default, most pull requests will have a +`✅ Maintainers are allowed to edit this pull request.` +badge in the right-hand sidebar. + +If you do not see this badge, you may have this setting off for the fork you are +pull-requesting from. See [this Github docs page](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/allowing-changes-to-a-pull-request-branch-created-from-a-fork) +for more information. + +Notably, Github doesn't allow this setting to be enabled for forks in **organizations** ([issue](https://github.com/orgs/community/discussions/5634)). +If you are working in an organization, we recommend submitting your PR from a personal +fork in order to enable this setting. diff --git a/docs/docs/contributing/index.mdx b/docs/docs/contributing/index.mdx index 3b4f6609dd343..366664ed039f4 100644 --- a/docs/docs/contributing/index.mdx +++ b/docs/docs/contributing/index.mdx @@ -12,9 +12,9 @@ As an open-source project in a rapidly developing field, we are extremely open t There are many ways to contribute to LangChain. Here are some common ways people contribute: -- [**Documentation**](./documentation): Help improve our docs, including this one! -- [**Code**](./code): Help us write code, fix bugs, or improve our infrastructure. -- [**Integrations**](./integrations): Help us integrate with your favorite vendors and tools. +- [**Documentation**](./documentation.mdx): Help improve our docs, including this one! +- [**Code**](./code.mdx): Help us write code, fix bugs, or improve our infrastructure. +- [**Integrations**](integrations.mdx): Help us integrate with your favorite vendors and tools. ### 🚩GitHub Issues @@ -40,3 +40,8 @@ smooth for future contributors. In a similar vein, we do enforce certain linting, formatting, and documentation standards in the codebase. If you are finding these difficult (or even just annoying) to work with, feel free to contact a maintainer for help - we do not want these to get in the way of getting good code into the codebase. + +# 🌟 Recognition + +If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)! +If you have a Twitter account you would like us to mention, please let us know in the PR or through another means. \ No newline at end of file diff --git a/docs/docs/contributing/integrations.mdx b/docs/docs/contributing/integrations.mdx index dc9ab30fdf363..f2820cf7f3fec 100644 --- a/docs/docs/contributing/integrations.mdx +++ b/docs/docs/contributing/integrations.mdx @@ -53,9 +53,9 @@ And we would write tests in: - Integration tests: `libs/community/tests/integration_tests/chat_models/test_parrot_link.py` And add documentation to: + - `docs/docs/integrations/chat/parrot_link.ipynb` -- `docs/docs/ ## Partner Packages Partner packages are in `libs/partners/*` and are installed by users with `pip install langchain-{partner}`, and exported members can be imported with code like diff --git a/docs/docs/contributing/packages.mdx b/docs/docs/contributing/packages.mdx deleted file mode 100644 index 9613699e7146c..0000000000000 --- a/docs/docs/contributing/packages.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -sidebar_label: Package Versioning -sidebar_position: 4 ---- - -# 📕 Package Versioning - -As of now, LangChain has an ad hoc release process: releases are cut with high frequency by -a maintainer and published to [PyPI](https://pypi.org/). -The different packages are versioned slightly differently. - -## `langchain-core` - -`langchain-core` is currently on version `0.1.x`. - -As `langchain-core` contains the base abstractions and runtime for the whole LangChain ecosystem, we will communicate any breaking changes with advance notice and version bumps. The exception for this is anything in `langchain_core.beta`. The reason for `langchain_core.beta` is that given the rate of change of the field, being able to move quickly is still a priority, and this module is our attempt to do so. - -Minor version increases will occur for: - -- Breaking changes for any public interfaces NOT in `langchain_core.beta` - -Patch version increases will occur for: - -- Bug fixes -- New features -- Any changes to private interfaces -- Any changes to `langchain_core.beta` - -## `langchain` - -`langchain` is currently on version `0.0.x` - -All changes will be accompanied by a patch version increase. Any changes to public interfaces are nearly always done in a backwards compatible way and will be communicated ahead of time when they are not backwards compatible. - -We are targeting January 2024 for a release of `langchain` v0.1, at which point `langchain` will adopt the same versioning policy as `langchain-core`. - -## `langchain-community` - -`langchain-community` is currently on version `0.0.x` - -All changes will be accompanied by a patch version increase. - -## `langchain-experimental` - -`langchain-experimental` is currently on version `0.0.x` - -All changes will be accompanied by a patch version increase. - -## Partner Packages - -Partner packages are versioned independently. - -# 🌟 Recognition - -If your contribution has made its way into a release, we will want to give you credit on Twitter (only if you want though)! -If you have a Twitter account you would like us to mention, please let us know in the PR or through another means. diff --git a/docs/docs/expression_language/cookbook/agent.ipynb b/docs/docs/expression_language/cookbook/agent.ipynb index 452c4762f76ce..5459c6cc11f64 100644 --- a/docs/docs/expression_language/cookbook/agent.ipynb +++ b/docs/docs/expression_language/cookbook/agent.ipynb @@ -12,18 +12,20 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 8, "id": "af4381de", "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentExecutor, XMLAgent, tool\n", - "from langchain.chat_models import ChatAnthropic" + "from langchain import hub\n", + "from langchain.agents import AgentExecutor, tool\n", + "from langchain.agents.output_parsers import XMLAgentOutputParser\n", + "from langchain_community.chat_models import ChatAnthropic" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "24cc8134", "metadata": {}, "outputs": [], @@ -33,7 +35,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "67c0b0e4", "metadata": {}, "outputs": [], @@ -46,7 +48,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "7203b101", "metadata": {}, "outputs": [], @@ -56,18 +58,18 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "b68e756d", "metadata": {}, "outputs": [], "source": [ - "# Get prompt to use\n", - "prompt = XMLAgent.get_default_prompt()" + "# Get the prompt to use - you can modify this!\n", + "prompt = hub.pull(\"hwchase17/xml-agent-convo\")" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 7, "id": "61ab3e9a", "metadata": {}, "outputs": [], @@ -107,27 +109,27 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 12, "id": "e92f1d6f", "metadata": {}, "outputs": [], "source": [ "agent = (\n", " {\n", - " \"question\": lambda x: x[\"question\"],\n", - " \"intermediate_steps\": lambda x: convert_intermediate_steps(\n", + " \"input\": lambda x: x[\"input\"],\n", + " \"agent_scratchpad\": lambda x: convert_intermediate_steps(\n", " x[\"intermediate_steps\"]\n", " ),\n", " }\n", " | prompt.partial(tools=convert_tools(tool_list))\n", " | model.bind(stop=[\"\", \"\"])\n", - " | XMLAgent.get_default_output_parser()\n", + " | XMLAgentOutputParser()\n", ")" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 13, "id": "6ce6ec7a", "metadata": {}, "outputs": [], @@ -137,7 +139,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 14, "id": "fb5cb2e3", "metadata": {}, "outputs": [ @@ -148,10 +150,8 @@ "\n", "\n", "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3m search\n", - "weather in new york\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m\n", - "\n", - "The weather in New York is 32 degrees\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m searchweather in New York\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m search\n", + "weather in New York\u001b[0m\u001b[36;1m\u001b[1;3m32 degrees\u001b[0m\u001b[32;1m\u001b[1;3m The weather in New York is 32 degrees\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] @@ -159,17 +159,17 @@ { "data": { "text/plain": [ - "{'question': 'whats the weather in New york?',\n", + "{'input': 'whats the weather in New york?',\n", " 'output': 'The weather in New York is 32 degrees'}" ] }, - "execution_count": 9, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "agent_executor.invoke({\"question\": \"whats the weather in New york?\"})" + "agent_executor.invoke({\"input\": \"whats the weather in New york?\"})" ] }, { diff --git a/docs/docs/expression_language/cookbook/code_writing.ipynb b/docs/docs/expression_language/cookbook/code_writing.ipynb index 5da7992c73525..f8892dc1757c9 100644 --- a/docs/docs/expression_language/cookbook/code_writing.ipynb +++ b/docs/docs/expression_language/cookbook/code_writing.ipynb @@ -10,6 +10,16 @@ "Example of how to use LCEL to write Python code." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "0653c7c7", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-core langchain-experimental langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -17,12 +27,12 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import (\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import (\n", " ChatPromptTemplate,\n", ")\n", - "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_experimental.utilities import PythonREPL" + "from langchain_experimental.utilities import PythonREPL\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/cookbook/embedding_router.ipynb b/docs/docs/expression_language/cookbook/embedding_router.ipynb index 2123963b71aea..17bb0e31119c5 100644 --- a/docs/docs/expression_language/cookbook/embedding_router.ipynb +++ b/docs/docs/expression_language/cookbook/embedding_router.ipynb @@ -12,6 +12,16 @@ "One especially useful technique is to use embeddings to route a query to the most relevant prompt. Here's a very simple example." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "b793a0aa", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain-core langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -19,12 +29,11 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.prompts import PromptTemplate\n", "from langchain.utils.math import cosine_similarity\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import PromptTemplate\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "physics_template = \"\"\"You are a very smart physics professor. \\\n", "You are great at answering questions about physics in a concise and easy to understand manner. \\\n", diff --git a/docs/docs/expression_language/cookbook/memory.ipynb b/docs/docs/expression_language/cookbook/memory.ipynb index a5fcbbb8c2e43..c128d498e3c87 100644 --- a/docs/docs/expression_language/cookbook/memory.ipynb +++ b/docs/docs/expression_language/cookbook/memory.ipynb @@ -10,6 +10,16 @@ "This shows how to add memory to an arbitrary chain. Right now, you can use the memory classes but need to hook it up manually" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "18753dee", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -19,10 +29,10 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.memory import ConversationBufferMemory\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "prompt = ChatPromptTemplate.from_messages(\n", diff --git a/docs/docs/expression_language/cookbook/moderation.ipynb b/docs/docs/expression_language/cookbook/moderation.ipynb index 1d091e3497d60..3377b9c167e13 100644 --- a/docs/docs/expression_language/cookbook/moderation.ipynb +++ b/docs/docs/expression_language/cookbook/moderation.ipynb @@ -10,6 +10,16 @@ "This shows how to add in moderation (or other safeguards) around your LLM application." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "6acf3505", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 20, @@ -18,8 +28,8 @@ "outputs": [], "source": [ "from langchain.chains import OpenAIModerationChain\n", - "from langchain.llms import OpenAI\n", - "from langchain.prompts import ChatPromptTemplate" + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import OpenAI" ] }, { diff --git a/docs/docs/expression_language/cookbook/multiple_chains.ipynb b/docs/docs/expression_language/cookbook/multiple_chains.ipynb index 01a98cea328c3..60f87c3764a20 100644 --- a/docs/docs/expression_language/cookbook/multiple_chains.ipynb +++ b/docs/docs/expression_language/cookbook/multiple_chains.ipynb @@ -19,6 +19,14 @@ "Runnables can easily be used to string together multiple Chains" ] }, + { + "cell_type": "raw", + "id": "0f316b5c", + "metadata": {}, + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 4, @@ -39,9 +47,9 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt1 = ChatPromptTemplate.from_template(\"what is the city {person} is from?\")\n", "prompt2 = ChatPromptTemplate.from_template(\n", diff --git a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb index 9f6bae10cfcf1..83de75f181823 100644 --- a/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_llm_parser.ipynb @@ -35,6 +35,14 @@ "Note, you can mix and match PromptTemplate/ChatPromptTemplates and LLMs/ChatModels as you like here." ] }, + { + "cell_type": "raw", + "id": "ef79a54b", + "metadata": {}, + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -42,8 +50,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {foo}\")\n", "model = ChatOpenAI()\n", diff --git a/docs/docs/expression_language/cookbook/prompt_size.ipynb b/docs/docs/expression_language/cookbook/prompt_size.ipynb index 2ee6945c88aa4..d4ad50e9d1624 100644 --- a/docs/docs/expression_language/cookbook/prompt_size.ipynb +++ b/docs/docs/expression_language/cookbook/prompt_size.ipynb @@ -12,6 +12,16 @@ "With LCEL, it's easy to add custom functionality for managing the size of prompts within your chain or agent. Let's look at simple agent example that can search Wikipedia for information." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "1846587d", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai wikipedia" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -19,19 +29,17 @@ "metadata": {}, "outputs": [], "source": [ - "# !pip install langchain wikipedia\n", - "\n", "from operator import itemgetter\n", "\n", "from langchain.agents import AgentExecutor, load_tools\n", "from langchain.agents.format_scratchpad import format_to_openai_function_messages\n", "from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain.prompts.chat import ChatPromptValue\n", "from langchain.tools import WikipediaQueryRun\n", - "from langchain.tools.render import format_tool_to_openai_function\n", - "from langchain.utilities import WikipediaAPIWrapper" + "from langchain_community.tools.convert_to_openai import format_tool_to_openai_function\n", + "from langchain_community.utilities import WikipediaAPIWrapper\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/cookbook/retrieval.ipynb b/docs/docs/expression_language/cookbook/retrieval.ipynb index f914170aa8633..e7708ca675bfc 100644 --- a/docs/docs/expression_language/cookbook/retrieval.ipynb +++ b/docs/docs/expression_language/cookbook/retrieval.ipynb @@ -26,7 +26,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install langchain openai faiss-cpu tiktoken" + "%pip install --upgrade --quiet langchain langchain-openai faiss-cpu tiktoken" ] }, { @@ -38,12 +38,11 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", - "from langchain_core.runnables import RunnableLambda, RunnablePassthrough" + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" ] }, { diff --git a/docs/docs/expression_language/cookbook/sql_db.ipynb b/docs/docs/expression_language/cookbook/sql_db.ipynb index 8e872655ecd54..d69d6f1180724 100644 --- a/docs/docs/expression_language/cookbook/sql_db.ipynb +++ b/docs/docs/expression_language/cookbook/sql_db.ipynb @@ -19,6 +19,14 @@ "We can replicate our SQLDatabaseChain with Runnables." ] }, + { + "cell_type": "raw", + "id": "b3121aa8", + "metadata": {}, + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -26,7 +34,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "template = \"\"\"Based on the table schema below, write a SQL query that would answer the user's question:\n", "{schema}\n", @@ -43,7 +51,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.utilities import SQLDatabase" + "from langchain_community.utilities import SQLDatabase" ] }, { @@ -93,9 +101,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "\n", @@ -152,8 +160,7 @@ "outputs": [], "source": [ "full_chain = (\n", - " RunnablePassthrough.assign(query=sql_response)\n", - " | RunnablePassthrough.assign(\n", + " RunnablePassthrough.assign(query=sql_response).assign(\n", " schema=get_schema,\n", " response=lambda x: db.run(x[\"query\"]),\n", " )\n", diff --git a/docs/docs/expression_language/cookbook/tools.ipynb b/docs/docs/expression_language/cookbook/tools.ipynb index c0f6a99794dee..d214e8791c818 100644 --- a/docs/docs/expression_language/cookbook/tools.ipynb +++ b/docs/docs/expression_language/cookbook/tools.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install duckduckgo-search" + "%pip install --upgrade --quiet langchain langchain-openai duckduckgo-search" ] }, { @@ -27,10 +27,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.tools import DuckDuckGoSearchRun\n", - "from langchain_core.output_parsers import StrOutputParser" + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/get_started.ipynb b/docs/docs/expression_language/get_started.ipynb index d3533eacd3472..be3cbec701872 100644 --- a/docs/docs/expression_language/get_started.ipynb +++ b/docs/docs/expression_language/get_started.ipynb @@ -30,30 +30,38 @@ "The most basic and common use case is chaining a prompt template and a model together. To see how this works, let's create a chain that takes a topic and generates a joke:" ] }, + { + "cell_type": "raw", + "id": "278b0027", + "metadata": {}, + "source": [ + "%pip install --upgrade --quiet langchain-core langchain-community langchain-openai" + ] + }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 1, "id": "466b65b3", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "\"Why did the ice cream go to therapy?\\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\"" + "\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always drip when things heat up!\"" ] }, - "execution_count": 7, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\"tell me a short joke about {topic}\")\n", - "model = ChatOpenAI()\n", + "model = ChatOpenAI(model=\"gpt-4\")\n", "output_parser = StrOutputParser()\n", "\n", "chain = prompt | model | output_parser\n", @@ -89,7 +97,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 2, "id": "b8656990", "metadata": {}, "outputs": [ @@ -99,7 +107,7 @@ "ChatPromptValue(messages=[HumanMessage(content='tell me a short joke about ice cream')])" ] }, - "execution_count": 8, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } @@ -111,7 +119,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 3, "id": "e6034488", "metadata": {}, "outputs": [ @@ -121,7 +129,7 @@ "[HumanMessage(content='tell me a short joke about ice cream')]" ] }, - "execution_count": 9, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -132,7 +140,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 4, "id": "60565463", "metadata": {}, "outputs": [ @@ -142,7 +150,7 @@ "'Human: tell me a short joke about ice cream'" ] }, - "execution_count": 10, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -163,17 +171,17 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 5, "id": "33cf5f72", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "AIMessage(content=\"Why did the ice cream go to therapy? \\n\\nBecause it had too many toppings and couldn't find its cone-fidence!\")" + "AIMessage(content=\"Why don't ice creams ever get invited to parties?\\n\\nBecause they always bring a melt down!\")" ] }, - "execution_count": 11, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } @@ -193,23 +201,23 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 6, "id": "8feb05da", "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "'\\n\\nRobot: Why did the ice cream go to therapy? Because it had a rocky road.'" + "'\\n\\nRobot: Why did the ice cream truck break down? Because it had a meltdown!'" ] }, - "execution_count": 12, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_openai.llms import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n", "llm.invoke(prompt_value)" @@ -324,12 +332,12 @@ "# Requires:\n", "# pip install langchain docarray tiktoken\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import DocArrayInMemorySearch\n", + "from langchain_community.vectorstores import DocArrayInMemorySearch\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnableParallel, RunnablePassthrough\n", + "from langchain_openai.chat_models import ChatOpenAI\n", + "from langchain_openai.embeddings import OpenAIEmbeddings\n", "\n", "vectorstore = DocArrayInMemorySearch.from_texts(\n", " [\"harrison worked at kensho\", \"bears like to eat honey\"],\n", @@ -486,7 +494,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/expression_language/how_to/binding.ipynb b/docs/docs/expression_language/how_to/binding.ipynb index 6f9978bcc10ee..375b863585b0d 100644 --- a/docs/docs/expression_language/how_to/binding.ipynb +++ b/docs/docs/expression_language/how_to/binding.ipynb @@ -12,6 +12,16 @@ "Suppose we have a simple prompt + model sequence:" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "c5dad8b5", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -19,10 +29,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain.schema import StrOutputParser\n", - "from langchain_core.runnables import RunnablePassthrough" + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/how_to/configure.ipynb b/docs/docs/expression_language/how_to/configure.ipynb index 5b7804697665f..bb508b97ede3c 100644 --- a/docs/docs/expression_language/how_to/configure.ipynb +++ b/docs/docs/expression_language/how_to/configure.ipynb @@ -34,6 +34,16 @@ "With LLMs we can configure things like temperature" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "40ed76a2", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 35, @@ -41,9 +51,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain_core.runnables import ConfigurableField\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI(temperature=0).configurable_fields(\n", " temperature=ConfigurableField(\n", @@ -263,9 +273,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic, ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", - "from langchain_core.runnables import ConfigurableField" + "from langchain_community.chat_models import ChatAnthropic\n", + "from langchain_core.runnables import ConfigurableField\n", + "from langchain_openai import ChatOpenAI" ] }, { diff --git a/docs/docs/expression_language/how_to/decorator.ipynb b/docs/docs/expression_language/how_to/decorator.ipynb new file mode 100644 index 0000000000000..e01acfafc4b1e --- /dev/null +++ b/docs/docs/expression_language/how_to/decorator.ipynb @@ -0,0 +1,136 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "b45110ef", + "metadata": {}, + "source": [ + "# Create a runnable with the `@chain` decorator\n", + "\n", + "You can also turn an arbitrary function into a chain by adding a `@chain` decorator. This is functionaly equivalent to wrapping in a [`RunnableLambda`](./functions).\n", + "\n", + "This will have the benefit of improved observability by tracing your chain correctly. Any calls to runnables inside this function will be traced as nested childen.\n", + "\n", + "It will also allow you to use this as any other runnable, compose it in chain, etc.\n", + "\n", + "Let's take a look at this in action!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23b2b564", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "d9370420", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_core.runnables import chain\n", + "from langchain_openai import ChatOpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "b7f74f7e", + "metadata": {}, + "outputs": [], + "source": [ + "prompt1 = ChatPromptTemplate.from_template(\"Tell me a joke about {topic}\")\n", + "prompt2 = ChatPromptTemplate.from_template(\"What is the subject of this joke: {joke}\")" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "2b0365c4", + "metadata": {}, + "outputs": [], + "source": [ + "@chain\n", + "def custom_chain(text):\n", + " prompt_val1 = prompt1.invoke({\"topic\": text})\n", + " output1 = ChatOpenAI().invoke(prompt_val1)\n", + " parsed_output1 = StrOutputParser().invoke(output1)\n", + " chain2 = prompt2 | ChatOpenAI() | StrOutputParser()\n", + " return chain2.invoke({\"joke\": parsed_output1})" + ] + }, + { + "cell_type": "markdown", + "id": "904d6872", + "metadata": {}, + "source": [ + "`custom_chain` is now a runnable, meaning you will need to use `invoke`" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "6448bdd3", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'The subject of this joke is bears.'" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "custom_chain.invoke(\"bears\")" + ] + }, + { + "cell_type": "markdown", + "id": "aa767ea9", + "metadata": {}, + "source": [ + "If you check out your LangSmith traces, you should see a `custom_chain` trace in there, with the calls to OpenAI nested underneath" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f1245bdc", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/expression_language/how_to/fallbacks.ipynb b/docs/docs/expression_language/how_to/fallbacks.ipynb index cc3578106a37d..23459f8be7376 100644 --- a/docs/docs/expression_language/how_to/fallbacks.ipynb +++ b/docs/docs/expression_language/how_to/fallbacks.ipynb @@ -24,6 +24,16 @@ "IMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retrying and not failing." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "ebb61b1f", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -31,7 +41,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic, ChatOpenAI" + "from langchain_community.chat_models import ChatAnthropic\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -141,7 +152,7 @@ } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -240,8 +251,8 @@ "outputs": [], "source": [ "# Now lets create a chain with the normal OpenAI model\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_openai import OpenAI\n", "\n", "prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n", "\n", @@ -291,7 +302,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.11.4" } }, "nbformat": 4, diff --git a/docs/docs/expression_language/how_to/functions.ipynb b/docs/docs/expression_language/how_to/functions.ipynb index ceeb46102bcaf..9c69d2deedf64 100644 --- a/docs/docs/expression_language/how_to/functions.ipynb +++ b/docs/docs/expression_language/how_to/functions.ipynb @@ -24,6 +24,14 @@ "Note that all inputs to these functions need to be a SINGLE argument. If you have a function that accepts multiple arguments, you should write a wrapper that accepts a single input and unpacks it into multiple argument." ] }, + { + "cell_type": "raw", + "id": "9a5fe916", + "metadata": {}, + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -33,9 +41,9 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnableLambda\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "def length_function(text):\n", @@ -190,7 +198,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.5" + "version": "3.10.1" } }, "nbformat": 4, diff --git a/docs/docs/expression_language/how_to/generators.ipynb b/docs/docs/expression_language/how_to/generators.ipynb index c9635c8aacd86..e43f607444b55 100644 --- a/docs/docs/expression_language/how_to/generators.ipynb +++ b/docs/docs/expression_language/how_to/generators.ipynb @@ -24,6 +24,15 @@ "## Sync version" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -32,9 +41,9 @@ "source": [ "from typing import Iterator, List\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.chat import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_openai import ChatOpenAI\n", "\n", "prompt = ChatPromptTemplate.from_template(\n", " \"Write a comma-separated list of 5 animals similar to: {animal}\"\n", diff --git a/docs/docs/expression_language/how_to/inspect.ipynb b/docs/docs/expression_language/how_to/inspect.ipynb new file mode 100644 index 0000000000000..e50d976d5f214 --- /dev/null +++ b/docs/docs/expression_language/how_to/inspect.ipynb @@ -0,0 +1,234 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "8c5eb99a", + "metadata": {}, + "source": [ + "# Inspect your runnables\n", + "\n", + "Once you create a runnable with LCEL, you may often want to inspect it to get a better sense for what is going on. This notebook covers some methods for doing so.\n", + "\n", + "First, let's create an example LCEL. We will create one that does retrieval" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d816e954", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai faiss-cpu tiktoken" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "a88f4b24", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.prompts import ChatPromptTemplate\n", + "from langchain.vectorstores import FAISS\n", + "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.runnables import RunnableLambda, RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "139228c2", + "metadata": {}, + "outputs": [], + "source": [ + "vectorstore = FAISS.from_texts(\n", + " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", + ")\n", + "retriever = vectorstore.as_retriever()\n", + "\n", + "template = \"\"\"Answer the question based only on the following context:\n", + "{context}\n", + "\n", + "Question: {question}\n", + "\"\"\"\n", + "prompt = ChatPromptTemplate.from_template(template)\n", + "\n", + "model = ChatOpenAI()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "70e3fe93", + "metadata": {}, + "outputs": [], + "source": [ + "chain = (\n", + " {\"context\": retriever, \"question\": RunnablePassthrough()}\n", + " | prompt\n", + " | model\n", + " | StrOutputParser()\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "849e3c42", + "metadata": {}, + "source": [ + "## Get a graph\n", + "\n", + "You can get a graph of the runnable" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "2448b6c2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Graph(nodes={'7308e6063c6d40818c5a0cc1cc7444f2': Node(id='7308e6063c6d40818c5a0cc1cc7444f2', data=Input'>), '292bbd8021d44ec3a31fbe724d9002c1': Node(id='292bbd8021d44ec3a31fbe724d9002c1', data=Output'>), '9212f219cf05488f95229c56ea02b192': Node(id='9212f219cf05488f95229c56ea02b192', data=VectorStoreRetriever(tags=['FAISS', 'OpenAIEmbeddings'], vectorstore=)), 'c7a8e65fa5cf44b99dbe7d1d6e36886f': Node(id='c7a8e65fa5cf44b99dbe7d1d6e36886f', data=RunnablePassthrough()), '818b9bfd40a341008373d5b9f9d0784b': Node(id='818b9bfd40a341008373d5b9f9d0784b', data=ChatPromptTemplate(input_variables=['context', 'question'], messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template='Answer the question based only on the following context:\\n{context}\\n\\nQuestion: {question}\\n'))])), 'b9f1d3ddfa6b4334a16ea439df22b11e': Node(id='b9f1d3ddfa6b4334a16ea439df22b11e', data=ChatOpenAI(client=, openai_api_key='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', openai_proxy='')), '2bf84f6355c44731848345ca7d0f8ab9': Node(id='2bf84f6355c44731848345ca7d0f8ab9', data=StrOutputParser()), '1aeb2da5da5a43bb8771d3f338a473a2': Node(id='1aeb2da5da5a43bb8771d3f338a473a2', data=)}, edges=[Edge(source='7308e6063c6d40818c5a0cc1cc7444f2', target='9212f219cf05488f95229c56ea02b192'), Edge(source='9212f219cf05488f95229c56ea02b192', target='292bbd8021d44ec3a31fbe724d9002c1'), Edge(source='7308e6063c6d40818c5a0cc1cc7444f2', target='c7a8e65fa5cf44b99dbe7d1d6e36886f'), Edge(source='c7a8e65fa5cf44b99dbe7d1d6e36886f', target='292bbd8021d44ec3a31fbe724d9002c1'), Edge(source='292bbd8021d44ec3a31fbe724d9002c1', target='818b9bfd40a341008373d5b9f9d0784b'), Edge(source='818b9bfd40a341008373d5b9f9d0784b', target='b9f1d3ddfa6b4334a16ea439df22b11e'), Edge(source='2bf84f6355c44731848345ca7d0f8ab9', target='1aeb2da5da5a43bb8771d3f338a473a2'), Edge(source='b9f1d3ddfa6b4334a16ea439df22b11e', target='2bf84f6355c44731848345ca7d0f8ab9')])" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chain.get_graph()" + ] + }, + { + "cell_type": "markdown", + "id": "065b02fb", + "metadata": {}, + "source": [ + "## Print a graph\n", + "\n", + "While that is not super legible, you can print it to get a display that's easier to understand" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "d5ab1515", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " +---------------------------------+ \n", + " | ParallelInput | \n", + " +---------------------------------+ \n", + " ** ** \n", + " *** *** \n", + " ** ** \n", + "+----------------------+ +-------------+ \n", + "| VectorStoreRetriever | | Passthrough | \n", + "+----------------------+ +-------------+ \n", + " ** ** \n", + " *** *** \n", + " ** ** \n", + " +----------------------------------+ \n", + " | ParallelOutput | \n", + " +----------------------------------+ \n", + " * \n", + " * \n", + " * \n", + " +--------------------+ \n", + " | ChatPromptTemplate | \n", + " +--------------------+ \n", + " * \n", + " * \n", + " * \n", + " +------------+ \n", + " | ChatOpenAI | \n", + " +------------+ \n", + " * \n", + " * \n", + " * \n", + " +-----------------+ \n", + " | StrOutputParser | \n", + " +-----------------+ \n", + " * \n", + " * \n", + " * \n", + " +-----------------------+ \n", + " | StrOutputParserOutput | \n", + " +-----------------------+ \n" + ] + } + ], + "source": [ + "chain.get_graph().print_ascii()" + ] + }, + { + "cell_type": "markdown", + "id": "2babf851", + "metadata": {}, + "source": [ + "## Get the prompts\n", + "\n", + "An important part of every chain is the prompts that are used. You can get the graphs present in the chain:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "34b2118d", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[ChatPromptTemplate(input_variables=['context', 'question'], messages=[HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['context', 'question'], template='Answer the question based only on the following context:\\n{context}\\n\\nQuestion: {question}\\n'))])]" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chain.get_prompts()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ed965769", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.1" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/docs/expression_language/how_to/map.ipynb b/docs/docs/expression_language/how_to/map.ipynb index 02e431899e822..67eefe5897e5b 100644 --- a/docs/docs/expression_language/how_to/map.ipynb +++ b/docs/docs/expression_language/how_to/map.ipynb @@ -1,7 +1,7 @@ { "cells": [ { - "cell_type": "markdown", + "cell_type": "raw", "id": "e2596041-9b76-4e74-836f-e6235086bbf0", "metadata": {}, "source": [ @@ -26,6 +26,16 @@ "\n" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "2627ffd7", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 3, @@ -44,12 +54,11 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "vectorstore = FAISS.from_texts(\n", " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", @@ -128,12 +137,11 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "vectorstore = FAISS.from_texts(\n", " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", @@ -192,9 +200,9 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnableParallel\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "joke_chain = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\") | model\n", diff --git a/docs/docs/expression_language/how_to/message_history.ipynb b/docs/docs/expression_language/how_to/message_history.ipynb index d16ead46776fb..774c2b92d94d0 100644 --- a/docs/docs/expression_language/how_to/message_history.ipynb +++ b/docs/docs/expression_language/how_to/message_history.ipynb @@ -41,7 +41,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install -U langchain redis anthropic" + "%pip install --upgrade --quiet langchain redis anthropic" ] }, { @@ -131,10 +131,10 @@ "source": [ "from typing import Optional\n", "\n", - "from langchain.chat_models import ChatAnthropic\n", - "from langchain.memory.chat_message_histories import RedisChatMessageHistory\n", - "from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_community.chat_message_histories import RedisChatMessageHistory\n", + "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.chat_history import BaseChatMessageHistory\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", "from langchain_core.runnables.history import RunnableWithMessageHistory" ] }, diff --git a/docs/docs/expression_language/how_to/passthrough.ipynb b/docs/docs/expression_language/how_to/passthrough.ipynb index 7615b7a0a5b1e..d5dba8a2cb776 100644 --- a/docs/docs/expression_language/how_to/passthrough.ipynb +++ b/docs/docs/expression_language/how_to/passthrough.ipynb @@ -28,6 +28,16 @@ "See the example below:" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "e169b952", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 11, @@ -66,7 +76,7 @@ "\n", "In the second line, we used `RunnablePastshrough.assign` with a lambda that multiplies the numerical value by 3. In this cased, `extra` was set with `{'num': 1, 'mult': 3}` which is the original value with the `mult` key added. \n", "\n", - "Finally, we also set a third key in the map with `modified` which uses a labmda to set a single value adding 1 to the num, which resulted in `modified` key with the value of `2`." + "Finally, we also set a third key in the map with `modified` which uses a lambda to set a single value adding 1 to the num, which resulted in `modified` key with the value of `2`." ] }, { @@ -97,12 +107,11 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.prompts import ChatPromptTemplate\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import ChatOpenAI, OpenAIEmbeddings\n", "\n", "vectorstore = FAISS.from_texts(\n", " [\"harrison worked at kensho\"], embedding=OpenAIEmbeddings()\n", diff --git a/docs/docs/expression_language/how_to/routing.ipynb b/docs/docs/expression_language/how_to/routing.ipynb index b78cff2629ec1..0738d4889c93a 100644 --- a/docs/docs/expression_language/how_to/routing.ipynb +++ b/docs/docs/expression_language/how_to/routing.ipynb @@ -2,6 +2,7 @@ "cells": [ { "cell_type": "markdown", + "id": "9e45e81c-e16e-4c6c-b6a3-2362e5193827", "metadata": {}, "source": [ "---\n", @@ -51,8 +52,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.chat_models import ChatAnthropic\n", "from langchain_core.output_parsers import StrOutputParser" ] }, diff --git a/docs/docs/expression_language/interface.ipynb b/docs/docs/expression_language/interface.ipynb index 30432b671b9e2..ffc9225ac41b5 100644 --- a/docs/docs/expression_language/interface.ipynb +++ b/docs/docs/expression_language/interface.ipynb @@ -50,6 +50,14 @@ "Let's take a look at these methods. To do so, we'll create a super simple PromptTemplate + ChatModel chain." ] }, + { + "cell_type": "raw", + "id": "57768739", + "metadata": {}, + "source": [ + "%pip install --upgrade --quiet langchain-core langchain-community langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -57,8 +65,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "model = ChatOpenAI()\n", "prompt = ChatPromptTemplate.from_template(\"tell me a joke about {topic}\")\n", @@ -659,10 +667,10 @@ } ], "source": [ - "from langchain.embeddings import OpenAIEmbeddings\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "template = \"\"\"Answer the question based only on the following context:\n", "{context}\n", diff --git a/docs/docs/expression_language/why.ipynb b/docs/docs/expression_language/why.ipynb index 2cdc5d63e0c1b..4d041d4b6a0ea 100644 --- a/docs/docs/expression_language/why.ipynb +++ b/docs/docs/expression_language/why.ipynb @@ -35,6 +35,14 @@ "To better understand the value of LCEL, it's helpful to see it in action and think about how we might recreate similar functionality without it. In this walkthrough we'll do just that with our [basic example](/docs/expression_language/get_started#basic_example) from the get started section. We'll take our simple prompt + model chain, which under the hood already defines a lot of functionality, and see what it would take to recreate all of it." ] }, + { + "cell_type": "raw", + "id": "b99b47ec", + "metadata": {}, + "source": [ + "%pip install --upgrade --quiet langchain-core langchain-openai" + ] + }, { "cell_type": "code", "execution_count": null, @@ -42,8 +50,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", "\n", "\n", @@ -389,7 +397,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(model=\"gpt-3.5-turbo-instruct\")\n", "llm_chain = (\n", @@ -468,7 +476,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "anthropic = ChatAnthropic(model=\"claude-2\")\n", "anthropic_chain = (\n", @@ -1002,11 +1010,12 @@ "source": [ "import os\n", "\n", - "from langchain.chat_models import ChatAnthropic, ChatOpenAI\n", - "from langchain.llms import OpenAI\n", + "from langchain_community.chat_models import ChatAnthropic\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain_openai import OpenAI\n", "from langchain_core.output_parsers import StrOutputParser\n", "from langchain_core.prompts import ChatPromptTemplate\n", - "from langchain_core.runnables import RunnablePassthrough\n", + "from langchain_core.runnables import RunnablePassthrough, ConfigurableField\n", "\n", "os.environ[\"LANGCHAIN_API_KEY\"] = \"...\"\n", "os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n", diff --git a/docs/docs/get_started/introduction.mdx b/docs/docs/get_started/introduction.mdx index 2cef4cc5f65c0..7452c5e2482ea 100644 --- a/docs/docs/get_started/introduction.mdx +++ b/docs/docs/get_started/introduction.mdx @@ -84,7 +84,7 @@ Walkthroughs and techniques for common end-to-end use cases, like: ### [Integrations](/docs/integrations/providers/) LangChain is part of a rich ecosystem of tools that integrate with our framework and build on top of it. Check out our growing list of [integrations](/docs/integrations/providers/). -### [Guides](/docs/guides/guides/debugging) +### [Guides](../guides/debugging.md) Best practices for developing with LangChain. ### [API reference](https://api.python.langchain.com) diff --git a/docs/docs/get_started/quickstart.mdx b/docs/docs/get_started/quickstart.mdx index 1367a6cf6fff0..652ef97c7c94c 100644 --- a/docs/docs/get_started/quickstart.mdx +++ b/docs/docs/get_started/quickstart.mdx @@ -11,6 +11,13 @@ In this quickstart we'll show you how to: That's a fair amount to cover! Let's dive in. ## Setup + +### Jupyter Notebook + +This guide (and most of the other guides in the documentation) use [Jupyter notebooks](https://jupyter.org/) and assume the reader is as well. Jupyter notebooks are perfect for learning how to work with LLM systems because often times things can go wrong (unexpected output, API down, etc) and going through guides in an interactive environment is a great way to better understand them. + +You do not NEED to go through the guide in a Jupyter Notebook, but it is recommended. See [here](https://jupyter.org/install) for instructions on how to install. + ### Installation To install LangChain run: @@ -31,281 +38,438 @@ import CodeBlock from "@theme/CodeBlock"; For more details, see our [Installation guide](/docs/get_started/installation). -### Environment +### LangSmith -Using LangChain will usually require integrations with one or more model providers, data stores, APIs, etc. For this example, we'll use OpenAI's model APIs. +Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. +As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. +The best way to do this is with [LangSmith](https://smith.langchain.com). -First we'll need to install their Python package: +Note that LangSmith is not needed, but it is helpful. +If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces: -```bash -pip install openai +```shell +export LANGCHAIN_TRACING_V2="true" +export LANGCHAIN_API_KEY="..." +``` + +## Building with LangChain + +LangChain enables building application that connect external sources of data and computation to LLMs. +In this quickstart, we will walk through a few different ways of doing that. +We will start with a simple LLM chain, which just relies on information in the prompt template to respond. +Next, we will build a retrieval chain, which fetches data from a separate database and passes that into the prompt template. +We will then add in chat history, to create a conversation retrieval chain. This allows you interact in a chat manner with this LLM, so it remembers previous questions. +Finally, we will build an agent - which utilizes and LLM to determine whether or not it needs to fetch data to answer questions. +We will cover these at a high level, but there are lot of details to all of these! +We will link to relevant docs. + +## LLM Chain + +For this getting started guide, we will provide two options: using OpenAI (a popular model available via API) or using a local open source model. + + + + +First we'll need to import the LangChain x OpenAI integration package. + +```shell +pip install langchain-openai ``` Accessing the API requires an API key, which you can get by creating an account and heading [here](https://platform.openai.com/account/api-keys). Once we have a key we'll want to set it as an environment variable by running: -```bash +```shell export OPENAI_API_KEY="..." ``` +We can then initialize the model: + +```python +from langchain_openai import ChatOpenAI + +llm = ChatOpenAI() +``` + If you'd prefer not to set an environment variable you can pass the key in directly via the `openai_api_key` named parameter when initiating the OpenAI LLM class: ```python -from langchain.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI llm = ChatOpenAI(openai_api_key="...") ``` -### LangSmith + + -Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls. -As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent. -The best way to do this is with [LangSmith](https://smith.langchain.com). +[Ollama](https://ollama.ai/) allows you to run open-source large language models, such as Llama 2, locally. -Note that LangSmith is not needed, but it is helpful. -If you do want to use LangSmith, after you sign up at the link above, make sure to set your environment variables to start logging traces: +First, follow [these instructions](https://github.com/jmorganca/ollama) to set up and run a local Ollama instance: -```shell -export LANGCHAIN_TRACING_V2="true" -export LANGCHAIN_API_KEY="..." +* [Download](https://ollama.ai/download) +* Fetch a model via `ollama pull llama2` + +Then, make sure the Ollama server is running. After that, you can do: +```python +from langchain_community.llms import Ollama +llm = Ollama(model="llama2") ``` -### LangServe + + -LangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we'll show how you can deploy your app with LangServe. +Once you've installed and initialized the LLM of your choice, we can try using it! +Let's ask it what LangSmith is - this is something that wasn't present in the training data so it shouldn't have a very good response. -Install with: -```bash -pip install "langserve[all]" +```python +llm.invoke("how can langsmith help with testing?") ``` -## Building with LangChain +We can also guide it's response with a prompt template. +Prompt templates are used to convert raw user input to a better input to the LLM. + +```python +from langchain_core.prompts import ChatPromptTemplate +prompt = ChatPromptTemplate.from_messages([ + ("system", "You are world class technical documentation writer."), + ("user", "{input}") +]) +``` + +We can now combine these into a simple LLM chain: + +```python +chain = prompt | llm +``` -LangChain provides many modules that can be used to build language model applications. -Modules can be used as standalones in simple applications and they can be composed for more complex use cases. -Composition is powered by **LangChain Expression Language** (LCEL), which defines a unified `Runnable` interface that many modules implement, making it possible to seamlessly chain components. +We can now invoke it and ask the same question. It still won't know the answer, but it should respond in a more proper tone for a technical writer! -The simplest and most common chain contains three things: -- LLM/Chat Model: The language model is the core reasoning engine here. In order to work with LangChain, you need to understand the different types of language models and how to work with them. -- Prompt Template: This provides instructions to the language model. This controls what the language model outputs, so understanding how to construct prompts and different prompting strategies is crucial. -- Output Parser: These translate the raw response from the language model to a more workable format, making it easy to use the output downstream. +```python +chain.invoke({"input": "how can langsmith help with testing?"}) +``` -In this guide we'll cover those three components individually, and then go over how to combine them. -Understanding these concepts will set you up well for being able to use and customize LangChain applications. -Most LangChain applications allow you to configure the model and/or the prompt, so knowing how to take advantage of this will be a big enabler. +The output of a ChatModel (and therefore, of this chain) is a message. However, it's often much more convenient to work with strings. Let's add a simple output parser to convert the chat message to a string. -### LLM / Chat Model +```python +from langchain_core.output_parsers import StrOutputParser -There are two types of language models: +output_parser = StrOutputParser() +``` -- `LLM`: underlying model takes a string as input and returns a string -- `ChatModel`: underlying model takes a list of messages as input and returns a message +We can now add this to the previous chain: -Strings are simple, but what exactly are messages? The base message interface is defined by `BaseMessage`, which has two required attributes: +```python +chain = prompt | llm | output_parser +``` -- `content`: The content of the message. Usually a string. -- `role`: The entity from which the `BaseMessage` is coming. +We can now invoke it and ask the same question. The answer will now be a string (rather than a ChatMessage). -LangChain provides several objects to easily distinguish between different roles: +```python +chain.invoke({"input": "how can langsmith help with testing?"}) +``` -- `HumanMessage`: A `BaseMessage` coming from a human/user. -- `AIMessage`: A `BaseMessage` coming from an AI/assistant. -- `SystemMessage`: A `BaseMessage` coming from the system. -- `FunctionMessage` / `ToolMessage`: A `BaseMessage` containing the output of a function or tool call. +### Diving Deeper -If none of those roles sound right, there is also a `ChatMessage` class where you can specify the role manually. +We've now successfully set up a basic LLM chain. We only touched on the basics of prompts, models, and output parsers - for a deeper dive into everything mentioned here, see [this section of documentation](/docs/modules/model_io). -LangChain provides a common interface that's shared by both `LLM`s and `ChatModel`s. -However it's useful to understand the difference in order to most effectively construct prompts for a given language model. -The simplest way to call an `LLM` or `ChatModel` is using `.invoke()`, the universal synchronous call method for all LangChain Expression Language (LCEL) objects: -- `LLM.invoke`: Takes in a string, returns a string. -- `ChatModel.invoke`: Takes in a list of `BaseMessage`, returns a `BaseMessage`. +## Retrieval Chain + +In order to properly answer the original question ("how can langsmith help with testing?"), we need to provide additional context to the LLM. +We can do this via *retrieval*. +Retrieval is useful when you have **too much data** to pass to the LLM directly. +You can then use a retriever to fetch only the most relevant pieces and pass those in. + +In this process, we will look up relevant documents from a *Retriever* and then pass them into the prompt. +A Retriever can be backed by anything - a SQL table, the internet, etc - but in this instance we will populate a vector store and use that as a retriever. For more information on vectorstores, see [this documentation](/docs/modules/data_connection/vectorstores). + +First, we need to load the data that we want to index: + + +```python +from langchain_community.document_loaders import WebBaseLoader +loader = WebBaseLoader("https://docs.smith.langchain.com/overview") + +docs = loader.load() +``` + +Next, we need to index it into a vectorstore. This requires a few components, namely an [embedding model](/docs/modules/data_connection/text_embedding) and a [vectorstore](/docs/modules/data_connection/vectorstores). -The input types for these methods are actually more general than this, but for simplicity here we can assume LLMs only take strings and Chat models only takes lists of messages. -Check out the "Go deeper" section below to learn more about model invocation. +For embedding models, we once again provide examples for accessing via OpenAI or via local models. -Let's see how to work with these different types of models and these different types of inputs. -First, let's import an LLM and a ChatModel. + + + +Make sure you have the `langchain_openai` package installed an the appropriate environment variables set (these are the same as needed for the LLM). ```python -from langchain.llms import OpenAI -from langchain.chat_models import ChatOpenAI +from langchain_openai import OpenAIEmbeddings -llm = OpenAI() -chat_model = ChatOpenAI() +embeddings = OpenAIEmbeddings() ``` -`LLM` and `ChatModel` objects are effectively configuration objects. -You can initialize them with parameters like `temperature` and others, and pass them around. + + + +Make sure you have Ollama running (same set up as with the LLM). ```python -from langchain.schema import HumanMessage +from langchain_community.embeddings import OllamaEmbeddings + +embeddings = OllamaEmbeddings() +``` + + + +Now, we can use this embedding model to ingest documents into a vectorstore. +We will use a simple local vectorstore, [FAISS](/docs/integrations/vectorstores/faiss), for simplicity's sake. + +First we need to install the required packages for that: + +```shell +pip install faiss-cpu +``` + +Then we can build our index: -text = "What would be a good company name for a company that makes colorful socks?" -messages = [HumanMessage(content=text)] +```python +from langchain_community.vectorstores import FAISS +from langchain.text_splitter import RecursiveCharacterTextSplitter -llm.invoke(text) -# >> Feetful of Fun -chat_model.invoke(messages) -# >> AIMessage(content="Socks O'Color") +text_splitter = RecursiveCharacterTextSplitter() +documents = text_splitter.split_documents(docs) +vector = FAISS.from_documents(documents, embeddings) ``` -
Go deeper +Now that we have this data indexed in a vectorstore, we will create a retrieval chain. +This chain will take an incoming question, look up relevant documents, then pass those documents along with the original question into an LLM and ask it to answer the original question. -`LLM.invoke` and `ChatModel.invoke` actually both support as input any of `Union[str, List[BaseMessage], PromptValue]`. -`PromptValue` is an object that defines its own custom logic for returning its inputs either as a string or as messages. -`LLM`s have logic for coercing any of these into a string, and `ChatModel`s have logic for coercing any of these to messages. -The fact that `LLM` and `ChatModel` accept the same inputs means that you can directly swap them for one another in most chains without breaking anything, -though it's of course important to think about how inputs are being coerced and how that may affect model performance. -To dive deeper on models head to the [Language models](/docs/modules/model_io/models) section. +First, let's set up the chain that takes a question and the retrieved documents and generates an answer. + +```python +from langchain.chains.combine_documents import create_stuff_documents_chain -
+prompt = ChatPromptTemplate.from_template("""Answer the following question based only on the provided context: -### Prompt templates + +{context} + -Most LLM applications do not pass user input directly into an LLM. Usually they will add the user input to a larger piece of text, called a prompt template, that provides additional context on the specific task at hand. +Question: {input}""") -In the previous example, the text we passed to the model contained instructions to generate a company name. For our application, it would be great if the user only had to provide the description of a company/product without worrying about giving the model instructions. +document_chain = create_stuff_documents_chain(llm, prompt) +``` -PromptTemplates help with exactly this! -They bundle up all the logic for going from user input into a fully formatted prompt. -This can start off very simple - for example, a prompt to produce the above string would just be: +If we wanted to, we could run this ourselves by passing in documents directly: ```python -from langchain.prompts import PromptTemplate +from langchain_core.documents import Document -prompt = PromptTemplate.from_template("What is a good name for a company that makes {product}?") -prompt.format(product="colorful socks") +document_chain.invoke({ + "input": "how can langsmith help with testing?", + "context": [Document(page_content="langsmith can let you visualize test results")] +}) ``` +However, we want the documents to first come from the retriever we just set up. +That way, for a given question we can use the retriever to dynamically select the most relevant documents and pass those in. + ```python -What is a good name for a company that makes colorful socks? +from langchain.chains import create_retrieval_chain + +retriever = vector.as_retriever() +retrieval_chain = create_retrieval_chain(retriever, document_chain) ``` -However, the advantages of using these over raw string formatting are several. -You can "partial" out variables - e.g. you can format only some of the variables at a time. -You can compose them together, easily combining different templates into a single prompt. -For explanations of these functionalities, see the [section on prompts](/docs/modules/model_io/prompts) for more detail. +We can now invoke this chain. This returns a dictionary - the response from the LLM is in the `answer` key -`PromptTemplate`s can also be used to produce a list of messages. -In this case, the prompt not only contains information about the content, but also each message (its role, its position in the list, etc.). -Here, what happens most often is a `ChatPromptTemplate` is a list of `ChatMessageTemplates`. -Each `ChatMessageTemplate` contains instructions for how to format that `ChatMessage` - its role, and then also its content. -Let's take a look at this below: +```python +response = retrieval_chain.invoke({"input": "how can langsmith help with testing?"}) +print(response["answer"]) + +# LangSmith offers several features that can help with testing:... +``` + +This answer should be much more accurate! + +### Diving Deeper + +We've now successfully set up a basic retrieval chain. We only touched on the basics of retrieval - for a deeper dive into everything mentioned here, see [this section of documentation](/docs/modules/data_connection). + +## Conversation Retrieval Chain + +The chain we've created so far can only answer single questions. One of the main types of LLM applications that people are building are chat bots. So how do we turn this chain into one that can answer follow up questions? + +We can still use the `create_retrieval_chain` function, but we need to change two things: + +1. The retrieval method should now not just work on the most recent input, but rather should take the whole history into account. +2. The final LLM chain should likewise take the whole history into account + +**Updating Retrieval** + +In order to update retrieval, we will create a new chain. This chain will take in the most recent input (`input`) and the conversation history (`chat_history`) and use an LLM to generate a search query. ```python -from langchain.prompts.chat import ChatPromptTemplate +from langchain.chains import create_history_aware_retriever +from langchain_core.prompts import MessagesPlaceholder -template = "You are a helpful assistant that translates {input_language} to {output_language}." -human_template = "{text}" +# First we need a prompt that we can pass into an LLM to generate this search query -chat_prompt = ChatPromptTemplate.from_messages([ - ("system", template), - ("human", human_template), +prompt = ChatPromptTemplate.from_messages([ + MessagesPlaceholder(variable_name="chat_history"), + ("user", "{input}"), + ("user", "Given the above conversation, generate a search query to look up in order to get information relevant to the conversation") ]) +retriever_chain = create_history_aware_retriever(llm, retriever, prompt) +``` + +We can test this out by passing in an instance where the user is asking a follow up question. -chat_prompt.format_messages(input_language="English", output_language="French", text="I love programming.") +```python +from langchain_core.messages import HumanMessage, AIMessage + +chat_history = [HumanMessage(content="Can LangSmith help test my LLM applications?"), AIMessage(content="Yes!")] +retriever_chain.invoke({ + "chat_history": chat_history, + "input": "Tell me how" +}) ``` +You should see that this returns documents about testing in LangSmith. This is because the LLM generated a new query, combining the chat history with the follow up question. + +Now that we have this new retriever, we can create a new chain to continue the conversation with these retrieved documents in mind. -```pycon -[ - SystemMessage(content="You are a helpful assistant that translates English to French.", additional_kwargs={}), - HumanMessage(content="I love programming.") -] +```python +prompt = ChatPromptTemplate.from_messages([ + ("system", "Answer the user's questions based on the below context:\n\n{context}"), + MessagesPlaceholder(variable_name="chat_history"), + ("user", "{input}"), +]) +document_chain = create_stuff_documents_chain(llm, prompt) + +retrieval_chain = create_retrieval_chain(retriever_chain, document_chain) ``` +We can now test this out end-to-end: -ChatPromptTemplates can also be constructed in other ways - see the [section on prompts](/docs/modules/model_io/prompts) for more detail. +```python +chat_history = [HumanMessage(content="Can LangSmith help test my LLM applications?"), AIMessage(content="Yes!")] +retrieval_chain.invoke({ + "chat_history": chat_history, + "input": "Tell me how" +}) +``` +We can see that this gives a coherent answer - we've successfully turned our retrieval chain into a chatbot! + +## Agent -### Output parsers +We've so far create examples of chains - where each step is known ahead of time. +The final thing we will create is an agent - where the LLM decides what steps to take. -`OutputParser`s convert the raw output of a language model into a format that can be used downstream. -There are a few main types of `OutputParser`s, including: +**NOTE: for this example we will only show how to create an agent using OpenAI models, as local models are not reliable enough yet.** -- Convert text from `LLM` into structured information (e.g. JSON) -- Convert a `ChatMessage` into just a string -- Convert the extra information returned from a call besides the message (like OpenAI function invocation) into a string. +One of the first things to do when building an agent is to decide what tools it should have access to. +For this example, we will give the agent access two tools: -For full information on this, see the [section on output parsers](/docs/modules/model_io/output_parsers). +1. The retriever we just created. This will let it easily answer questions about LangSmith +2. A search tool. This will let it easily answer questions that require up to date information. -In this getting started guide, we will write our own output parser - one that converts a comma separated list into a list. +First, let's set up a tool for the retriever we just created: ```python -from langchain.schema import BaseOutputParser +from langchain.tools.retriever import create_retriever_tool -class CommaSeparatedListOutputParser(BaseOutputParser): - """Parse the output of an LLM call to a comma-separated list.""" +retriever_tool = create_retriever_tool( + retriever, + "langsmith_search", + "Search for information about LangSmith. For any questions about LangSmith, you must use this tool!", +) +``` - def parse(self, text: str): - """Parse the output of an LLM call.""" - return text.strip().split(", ") +The search tool that we will use is [Tavily](/docs/integrations/retrievers/tavily). This will require an API key (they have generous free tier). After creating it on their platform, you need to set it as an environment variable: -CommaSeparatedListOutputParser().parse("hi, bye") -# >> ['hi', 'bye'] +```shell +export TAVILY_API_KEY=... ``` +If you do not want to set up an API key, you can skip creating this tool. -### Composing with LCEL +```python +from langchain_community.tools.tavily_search import TavilySearchResults + +search = TavilySearchResults() +``` -We can now combine all these into one chain. -This chain will take input variables, pass those to a prompt template to create a prompt, pass the prompt to a language model, and then pass the output through an (optional) output parser. -This is a convenient way to bundle up a modular piece of logic. -Let's see it in action! +We can now create a list of the tools we want to work with: ```python -from typing import List +tools = [retriever_tool, search] +``` -from langchain.chat_models import ChatOpenAI -from langchain.prompts import ChatPromptTemplate -from langchain.schema import BaseOutputParser +Now that we have the tools, we can create an agent to use them. We will go over this pretty quickly - for a deeper dive into what exactly is going on, check out the [Agent's Getting Started documentation](/docs/modules/agents) -class CommaSeparatedListOutputParser(BaseOutputParser[List[str]]): - """Parse the output of an LLM call to a comma-separated list.""" +Install langchain hub first +```bash +pip install langchainhub +``` +Now we can use it to get a predefined prompt - def parse(self, text: str) -> List[str]: - """Parse the output of an LLM call.""" - return text.strip().split(", ") +```python +from langchain_openai import ChatOpenAI +from langchain import hub +from langchain.agents import create_openai_functions_agent +from langchain.agents import AgentExecutor + +# Get the prompt to use - you can modify this! +prompt = hub.pull("hwchase17/openai-functions-agent") +llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) +agent = create_openai_functions_agent(llm, tools, prompt) +agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) +``` -template = """You are a helpful assistant who generates comma separated lists. -A user will pass in a category, and you should generate 5 objects in that category in a comma separated list. -ONLY return a comma separated list, and nothing more.""" -human_template = "{text}" +We can now invoke the agent and see how it responds! We can ask it questions about LangSmith: -chat_prompt = ChatPromptTemplate.from_messages([ - ("system", template), - ("human", human_template), -]) -chain = chat_prompt | ChatOpenAI() | CommaSeparatedListOutputParser() -chain.invoke({"text": "colors"}) -# >> ['red', 'blue', 'green', 'yellow', 'orange'] +```python +agent_executor.invoke({"input": "how can langsmith help with testing?"}) +``` + +We can ask it about the weather: + +```python +agent_executor.invoke({"input": "what is the weather in SF?"}) ``` -Note that we are using the `|` syntax to join these components together. -This `|` syntax is powered by the LangChain Expression Language (LCEL) and relies on the universal `Runnable` interface that all of these objects implement. -To learn more about LCEL, read the documentation [here](/docs/expression_language). +We can have conversations with it: -## Tracing with LangSmith +```python +chat_history = [HumanMessage(content="Can LangSmith help test my LLM applications?"), AIMessage(content="Yes!")] +agent_executor.invoke({ + "chat_history": chat_history, + "input": "Tell me how" +}) +``` -Assuming we've set our environment variables as shown in the beginning, all of the model and chain calls we've been making will have been automatically logged to LangSmith. -Once there, we can use LangSmith to debug and annotate our application traces, then turn them into datasets for evaluating future iterations of the application. +### Diving Deeper -Check out what the trace for the above chain would look like: -https://smith.langchain.com/public/09370280-4330-4eb4-a7e8-c91817f6aa13/r +We've now successfully set up a basic agent. We only touched on the basics of agents - for a deeper dive into everything mentioned here, see [this section of documentation](/docs/modules/agents). -For more on LangSmith [head here](/docs/langsmith/). ## Serving with LangServe Now that we've built an application, we need to serve it. That's where LangServe comes in. -LangServe helps developers deploy LCEL chains as a REST API. -The library is integrated with FastAPI and uses pydantic for data validation. +LangServe helps developers deploy LangChain chains as a REST API. You do not need to use LangServe to use LangChain, but in this guide we'll show how you can deploy your app with LangServe. + +While the first part of this guide was intended to be run in a Jupyter Notebook, we will now move out of that. We will be creating a Python file and then interacting with it from the command line. + +Install with: +```bash +pip install "langserve[all]" +``` ### Server -To create a server for our application we'll make a `serve.py` file with three things: -1. The definition of our chain (same as above) +To create a server for our application we'll make a `serve.py` file. This will contain our logic for serving our application. It consists of three things: +1. The definition of our chain that we just built above 2. Our FastAPI app 3. A definition of a route from which to serve the chain, which is done with `langserve.add_routes` @@ -314,44 +478,75 @@ To create a server for our application we'll make a `serve.py` file with three t from typing import List from fastapi import FastAPI -from langchain.prompts import ChatPromptTemplate -from langchain.chat_models import ChatOpenAI -from langchain.schema import BaseOutputParser +from langchain_core.prompts import ChatPromptTemplate +from langchain_openai import ChatOpenAI +from langchain_community.document_loaders import WebBaseLoader +from langchain_openai import OpenAIEmbeddings +from langchain_community.vectorstores import FAISS +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain.tools.retriever import create_retriever_tool +from langchain_community.tools.tavily_search import TavilySearchResults +from langchain_openai import ChatOpenAI +from langchain import hub +from langchain.agents import create_openai_functions_agent +from langchain.agents import AgentExecutor +from langchain.pydantic_v1 import BaseModel, Field +from langchain_core.messages import BaseMessage from langserve import add_routes -# 1. Chain definition - -class CommaSeparatedListOutputParser(BaseOutputParser[List[str]]): - """Parse the output of an LLM call to a comma-separated list.""" +# 1. Load Retriever +loader = WebBaseLoader("https://docs.smith.langchain.com/overview") +docs = loader.load() +text_splitter = RecursiveCharacterTextSplitter() +documents = text_splitter.split_documents(docs) +embeddings = OpenAIEmbeddings() +vector = FAISS.from_documents(documents, embeddings) +retriever = vector.as_retriever() + +# 2. Create Tools +retriever_tool = create_retriever_tool( + retriever, + "langsmith_search", + "Search for information about LangSmith. For any questions about LangSmith, you must use this tool!", +) +search = TavilySearchResults() +tools = [retriever_tool, search] - def parse(self, text: str) -> List[str]: - """Parse the output of an LLM call.""" - return text.strip().split(", ") +# 3. Create Agent +prompt = hub.pull("hwchase17/openai-functions-agent") +llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) +agent = create_openai_functions_agent(llm, tools, prompt) +agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True) -template = """You are a helpful assistant who generates comma separated lists. -A user will pass in a category, and you should generate 5 objects in that category in a comma separated list. -ONLY return a comma separated list, and nothing more.""" -human_template = "{text}" -chat_prompt = ChatPromptTemplate.from_messages([ - ("system", template), - ("human", human_template), -]) -category_chain = chat_prompt | ChatOpenAI() | CommaSeparatedListOutputParser() - -# 2. App definition +# 4. App definition app = FastAPI( title="LangChain Server", version="1.0", description="A simple API server using LangChain's Runnable interfaces", ) -# 3. Adding chain route +# 5. Adding chain route + +# We need to add these input/output schemas because the current AgentExecutor +# is lacking in schemas. + +class Input(BaseModel): + input: str + chat_history: List[BaseMessage] = Field( + ..., + extra={"widget": {"type": "chat", "input": "location"}}, + ) + + +class Output(BaseModel): + output: str + add_routes( app, - category_chain, - path="/category_chain", + agent_executor.with_types(input_type=Input, output_type=Output), + path="/agent", ) if __name__ == "__main__": @@ -369,19 +564,18 @@ we should see our chain being served at localhost:8000. ### Playground Every LangServe service comes with a simple built-in UI for configuring and invoking the application with streaming output and visibility into intermediate steps. -Head to http://localhost:8000/category_chain/playground/ to try it out! +Head to http://localhost:8000/agent/playground/ to try it out! Pass in the same question as before - "how can langsmith help with testing?" - and it should respond same as before. ### Client -Now let's set up a client for programmatically interacting with our service. We can easily do this with the `langserve.RemoteRunnable`. +Now let's set up a client for programmatically interacting with our service. We can easily do this with the `[langserve.RemoteRunnable](/docs/langserve#client)`. Using this, we can interact with the served chain as if it were running client-side. ```python from langserve import RemoteRunnable -remote_chain = RemoteRunnable("http://localhost:8000/category_chain/") -remote_chain.invoke({"text": "colors"}) -# >> ['red', 'blue', 'green', 'yellow', 'orange'] +remote_chain = RemoteRunnable("http://localhost:8000/agent/") +remote_chain.invoke({"input": "how can langsmith help with testing?"}) ``` To learn more about the many other features of LangServe [head here](/docs/langserve). @@ -390,10 +584,12 @@ To learn more about the many other features of LangServe [head here](/docs/langs We've touched on how to build an application with LangChain, how to trace it with LangSmith, and how to serve it with LangServe. There are a lot more features in all three of these than we can cover here. -To continue on your journey: +To continue on your journey, we recommend you read the following (in order): -- Read up on [LangChain Expression Language (LCEL)](/docs/expression_language) to learn how to chain these components together -- [Dive deeper](/docs/modules/model_io) into LLMs, prompts, and output parsers and learn the other [key components](/docs/modules) +- All of these features are backed by [LangChain Expression Language (LCEL)](/docs/expression_language) - a way to chain these components together. Check out that documentation to better understand how to create custom chains. +- [Model IO](/docs/modules/model_io) covers more details of prompts, LLMs, and output parsers. +- [Retrieval](/docs/modules/data_connection) covers more details of everything related to retrieval +- [Agents](/docs/modules/agents) covers details of everything related to agents - Explore common [end-to-end use cases](/docs/use_cases/qa_structured/sql) and [template applications](/docs/templates) - [Read up on LangSmith](/docs/langsmith/), the platform for debugging, testing, monitoring and more - Learn more about serving your applications with [LangServe](/docs/langserve) diff --git a/docs/docs/guides/debugging.md b/docs/docs/guides/debugging.md index a0ac5a5e894bf..e2607ad847c93 100644 --- a/docs/docs/guides/debugging.md +++ b/docs/docs/guides/debugging.md @@ -25,7 +25,7 @@ Let's suppose we have a simple agent, and want to visualize the actions it takes ```python from langchain.agents import AgentType, initialize_agent, load_tools -from langchain.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI llm = ChatOpenAI(model_name="gpt-4", temperature=0) tools = load_tools(["ddg-search", "llm-math"], llm=llm) @@ -656,6 +656,6 @@ agent.run("Who directed the 2023 film Oppenheimer and what is their age? What is ## Other callbacks -`Callbacks` are what we use to execute any functionality within a component outside the primary component logic. All of the above solutions use `Callbacks` under the hood to log intermediate steps of components. There are a number of `Callbacks` relevant for debugging that come with LangChain out of the box, like the [FileCallbackHandler](/docs/modules/callbacks/how_to/filecallbackhandler). You can also implement your own callbacks to execute custom functionality. +`Callbacks` are what we use to execute any functionality within a component outside the primary component logic. All of the above solutions use `Callbacks` under the hood to log intermediate steps of components. There are a number of `Callbacks` relevant for debugging that come with LangChain out of the box, like the [FileCallbackHandler](/docs/modules/callbacks/filecallbackhandler). You can also implement your own callbacks to execute custom functionality. See here for more info on [Callbacks](/docs/modules/callbacks/), how to use them, and customize them. diff --git a/docs/docs/guides/deployments/index.mdx b/docs/docs/guides/deployments/index.mdx index 92bf63641408e..c075c3b92ee92 100644 --- a/docs/docs/guides/deployments/index.mdx +++ b/docs/docs/guides/deployments/index.mdx @@ -20,11 +20,11 @@ This guide aims to provide a comprehensive overview of the requirements for depl Understanding these components is crucial when assessing serving systems. LangChain integrates with several open-source projects designed to tackle these issues, providing a robust framework for productionizing your LLM applications. Some notable frameworks include: -- [Ray Serve](/docs/ecosystem/integrations/ray_serve) +- [Ray Serve](/docs/integrations/providers/ray_serve) - [BentoML](https://github.com/bentoml/BentoML) -- [OpenLLM](/docs/ecosystem/integrations/openllm) -- [Modal](/docs/ecosystem/integrations/modal) -- [Jina](/docs/ecosystem/integrations/jina#deployment) +- [OpenLLM](/docs/integrations/providers/openllm) +- [Modal](/docs/integrations/providers/modal) +- [Jina](/docs/integrations/providers/jina) These links will provide further information on each ecosystem, assisting you in finding the best fit for your LLM deployment needs. diff --git a/docs/docs/guides/evaluation/comparison/custom.ipynb b/docs/docs/guides/evaluation/comparison/custom.ipynb index c4ed70bcfb51a..3b10f833e86b5 100644 --- a/docs/docs/guides/evaluation/comparison/custom.ipynb +++ b/docs/docs/guides/evaluation/comparison/custom.ipynb @@ -104,7 +104,7 @@ }, "outputs": [], "source": [ - "# %pip install anthropic\n", + "%pip install --upgrade --quiet anthropic\n", "# %env ANTHROPIC_API_KEY=YOUR_API_KEY" ] }, @@ -120,8 +120,8 @@ "from typing import Any, Optional\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatAnthropic\n", "from langchain.evaluation import PairwiseStringEvaluator\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "\n", "class CustomPreferenceEvaluator(PairwiseStringEvaluator):\n", diff --git a/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb b/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb index e0d5b75febffb..7a913ba1be281 100644 --- a/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb +++ b/docs/docs/guides/evaluation/comparison/pairwise_embedding_distance.ipynb @@ -156,7 +156,7 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "embedding_model = HuggingFaceEmbeddings()\n", "hf_evaluator = load_evaluator(\"pairwise_embedding_distance\", embeddings=embedding_model)" diff --git a/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb b/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb index cc197bf5e613b..259affbd2b2de 100644 --- a/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb +++ b/docs/docs/guides/evaluation/comparison/pairwise_string.ipynb @@ -236,7 +236,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "llm = ChatAnthropic(temperature=0)\n", "\n", diff --git a/docs/docs/guides/evaluation/examples/comparisons.ipynb b/docs/docs/guides/evaluation/examples/comparisons.ipynb index 09d44b828c120..150b8f7f29e27 100644 --- a/docs/docs/guides/evaluation/examples/comparisons.ipynb +++ b/docs/docs/guides/evaluation/examples/comparisons.ipynb @@ -23,6 +23,15 @@ "In this example, you will use gpt-4 to select which output is preferred." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -99,8 +108,8 @@ "outputs": [], "source": [ "from langchain.agents import AgentType, Tool, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.utilities import SerpAPIWrapper\n", + "from langchain_community.utilities import SerpAPIWrapper\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Initialize the language model\n", "# You can add your own OpenAI API key by adding openai_api_key=\"\"\n", diff --git a/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb b/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb index f09e7c3941ce5..9754094d4ff9e 100644 --- a/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb +++ b/docs/docs/guides/evaluation/string/criteria_eval_chain.ipynb @@ -318,7 +318,7 @@ }, "outputs": [], "source": [ - "# %pip install ChatAnthropic\n", + "%pip install --upgrade --quiet anthropic\n", "# %env ANTHROPIC_API_KEY=" ] }, @@ -331,7 +331,7 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "llm = ChatAnthropic(temperature=0)\n", "evaluator = load_evaluator(\"criteria\", llm=llm, criteria=\"conciseness\")" @@ -464,4 +464,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/docs/guides/evaluation/string/custom.ipynb b/docs/docs/guides/evaluation/string/custom.ipynb index 544ff98df06d4..0852f7b096d41 100644 --- a/docs/docs/guides/evaluation/string/custom.ipynb +++ b/docs/docs/guides/evaluation/string/custom.ipynb @@ -23,7 +23,7 @@ }, "outputs": [], "source": [ - "# %pip install evaluate > /dev/null" + "%pip install --upgrade --quiet evaluate > /dev/null" ] }, { diff --git a/docs/docs/guides/evaluation/string/embedding_distance.ipynb b/docs/docs/guides/evaluation/string/embedding_distance.ipynb index 3d9030ddd3280..5164469a44cae 100644 --- a/docs/docs/guides/evaluation/string/embedding_distance.ipynb +++ b/docs/docs/guides/evaluation/string/embedding_distance.ipynb @@ -142,7 +142,7 @@ }, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceEmbeddings\n", "\n", "embedding_model = HuggingFaceEmbeddings()\n", "hf_evaluator = load_evaluator(\"embedding_distance\", embeddings=embedding_model)" diff --git a/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb b/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb index bf035262d79f8..7072bdd6e68e7 100644 --- a/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb +++ b/docs/docs/guides/evaluation/string/scoring_eval_chain.ipynb @@ -18,14 +18,23 @@ "Below is an example demonstrating the usage of `LabeledScoreStringEvalChain` using the default prompt:\n" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.evaluation import load_evaluator\n", + "from langchain_openai import ChatOpenAI\n", "\n", "evaluator = load_evaluator(\"labeled_score_string\", llm=ChatOpenAI(model=\"gpt-4\"))" ] diff --git a/docs/docs/guides/evaluation/string/string_distance.ipynb b/docs/docs/guides/evaluation/string/string_distance.ipynb index 4d762d2172ce4..fbe1062951fcc 100644 --- a/docs/docs/guides/evaluation/string/string_distance.ipynb +++ b/docs/docs/guides/evaluation/string/string_distance.ipynb @@ -29,7 +29,7 @@ }, "outputs": [], "source": [ - "# %pip install rapidfuzz" + "%pip install --upgrade --quiet rapidfuzz" ] }, { diff --git a/docs/docs/guides/evaluation/trajectory/custom.ipynb b/docs/docs/guides/evaluation/trajectory/custom.ipynb index 4594f044c1681..c6be21a279366 100644 --- a/docs/docs/guides/evaluation/trajectory/custom.ipynb +++ b/docs/docs/guides/evaluation/trajectory/custom.ipynb @@ -14,6 +14,16 @@ "In this example, you will make a simple trajectory evaluator that uses an LLM to determine if any actions were unnecessary." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c96b340", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -24,9 +34,9 @@ "from typing import Any, Optional, Sequence, Tuple\n", "\n", "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.evaluation import AgentTrajectoryEvaluator\n", "from langchain.schema import AgentAction\n", + "from langchain_openai import ChatOpenAI\n", "\n", "\n", "class StepNecessityEvaluator(AgentTrajectoryEvaluator):\n", @@ -140,4 +150,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb index c40bf8dd87286..18e7630a5d404 100644 --- a/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb +++ b/docs/docs/guides/evaluation/trajectory/trajectory_eval.ipynb @@ -17,6 +17,16 @@ "For more information, check out the reference docs for the [TrajectoryEvalChain](https://api.python.langchain.com/en/latest/evaluation/langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain.html#langchain.evaluation.agents.trajectory_eval_chain.TrajectoryEvalChain) for more info." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "f4d22262", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -75,8 +85,8 @@ "from urllib.parse import urlparse\n", "\n", "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.tools import tool\n", + "from langchain_openai import ChatOpenAI\n", "from pydantic import HttpUrl\n", "\n", "\n", @@ -177,7 +187,7 @@ }, "outputs": [], "source": [ - "# %pip install anthropic\n", + "%pip install --upgrade --quiet anthropic\n", "# ANTHROPIC_API_KEY=" ] }, @@ -190,7 +200,7 @@ }, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic\n", + "from langchain_community.chat_models import ChatAnthropic\n", "\n", "eval_llm = ChatAnthropic(temperature=0)\n", "evaluator = load_evaluator(\"trajectory\", llm=eval_llm)" @@ -300,4 +310,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/docs/docs/guides/fallbacks.ipynb b/docs/docs/guides/fallbacks.ipynb index cde2b709271c4..ff99231673611 100644 --- a/docs/docs/guides/fallbacks.ipynb +++ b/docs/docs/guides/fallbacks.ipynb @@ -26,6 +26,16 @@ "IMPORTANT: By default, a lot of the LLM wrappers catch errors and retry. You will most likely want to turn those off when working with fallbacks. Otherwise the first wrapper will keep on retrying and not failing." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "3a449a2e", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -33,7 +43,8 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.chat_models import ChatAnthropic, ChatOpenAI" + "from langchain_community.chat_models import ChatAnthropic\n", + "from langchain_openai import ChatOpenAI" ] }, { @@ -143,7 +154,7 @@ } ], "source": [ - "from langchain.prompts import ChatPromptTemplate\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "\n", "prompt = ChatPromptTemplate.from_messages(\n", " [\n", @@ -205,8 +216,8 @@ "outputs": [], "source": [ "# Now lets create a chain with the normal OpenAI model\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_openai import OpenAI\n", "\n", "prompt_template = \"\"\"Instructions: You should always include a compliment in your response.\n", "\n", diff --git a/docs/docs/guides/local_llms.ipynb b/docs/docs/guides/local_llms.ipynb index d47ca549eb0e4..0298e4e6c3d76 100644 --- a/docs/docs/guides/local_llms.ipynb +++ b/docs/docs/guides/local_llms.ipynb @@ -94,7 +94,7 @@ } ], "source": [ - "from langchain.llms import Ollama\n", + "from langchain_community.llms import Ollama\n", "\n", "llm = Ollama(model=\"llama2\")\n", "llm(\"The first man on the moon was ...\")" @@ -222,7 +222,7 @@ } ], "source": [ - "from langchain.llms import Ollama\n", + "from langchain_community.llms import Ollama\n", "\n", "llm = Ollama(model=\"llama2:13b\")\n", "llm(\"The first man on the moon was ... think step by step\")" @@ -277,7 +277,7 @@ "source": [ "%env CMAKE_ARGS=\"-DLLAMA_METAL=on\"\n", "%env FORCE_CMAKE=1\n", - "%pip install -U llama-cpp-python --no-cache-dirclear" + "%pip install --upgrade --quiet llama-cpp-python --no-cache-dirclear" ] }, { @@ -289,7 +289,7 @@ "source": [ "from langchain.callbacks.manager import CallbackManager\n", "from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n", - "from langchain.llms import LlamaCpp\n", + "from langchain_community.llms import LlamaCpp\n", "\n", "llm = LlamaCpp(\n", " model_path=\"/Users/rlm/Desktop/Code/llama.cpp/models/openorca-platypus2-13b.gguf.q4_0.bin\",\n", @@ -400,7 +400,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import GPT4All\n", + "from langchain_community.llms import GPT4All\n", "\n", "llm = GPT4All(\n", " model=\"/Users/rlm/Desktop/Code/gpt4all/models/nous-hermes-13b.ggmlv3.q4_0.bin\"\n", diff --git a/docs/docs/guides/model_laboratory.ipynb b/docs/docs/guides/model_laboratory.ipynb index c3c650feaa5a6..b6533de4c0d07 100644 --- a/docs/docs/guides/model_laboratory.ipynb +++ b/docs/docs/guides/model_laboratory.ipynb @@ -12,6 +12,16 @@ "LangChain provides the concept of a ModelLaboratory to test out and try different models." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "12ebae56", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai" + ] + }, { "cell_type": "code", "execution_count": 1, @@ -19,9 +29,10 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.llms import Cohere, HuggingFaceHub, OpenAI\n", "from langchain.model_laboratory import ModelLaboratory\n", - "from langchain.prompts import PromptTemplate" + "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import Cohere, HuggingFaceHub\n", + "from langchain_openai import OpenAI" ] }, { @@ -141,7 +152,7 @@ "outputs": [], "source": [ "from langchain.chains import SelfAskWithSearchChain\n", - "from langchain.utilities import SerpAPIWrapper\n", + "from langchain_community.utilities import SerpAPIWrapper\n", "\n", "open_ai_llm = OpenAI(temperature=0)\n", "search = SerpAPIWrapper()\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb index 1d64d64ec55e0..1ec5b2a3ae6c6 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/index.ipynb @@ -28,15 +28,23 @@ "Below you will find the use case on how to leverage anonymization in LangChain." ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai langchain-experimental presidio-analyzer presidio-anonymizer spacy Faker" + ] + }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ - "# Install necessary packages\n", - "# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n", - "# ! python -m spacy download en_core_web_lg" + "# Download model\n", + "!python -m spacy download en_core_web_lg" ] }, { @@ -129,8 +137,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.prompt import PromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "anonymizer = PresidioAnonymizer()\n", "\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb index 240005c9d3c0b..868d11ef80863 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/multi_language.ipynb @@ -41,15 +41,21 @@ "\n" ] }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "%pip install --upgrade --quiet langchain langchain-openai langchain-experimental presidio-analyzer presidio-anonymizer spacy Faker" + ] + }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ - "# Install necessary packages\n", - "# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n", - "# ! python -m spacy download en_core_web_lg" + "# Download model\n", + "!python -m spacy download en_core_web_lg" ] }, { @@ -239,7 +245,7 @@ "outputs": [], "source": [ "# Install necessary packages\n", - "# ! pip install fasttext langdetect" + "%pip install --upgrade --quiet fasttext langdetect" ] }, { diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb index fe68b361d79c0..8c7f4574ef3af 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/qa_privacy_protection.ipynb @@ -31,15 +31,21 @@ "### Iterative process of upgrading the anonymizer" ] }, + { + "cell_type": "raw", + "metadata": {}, + "source": [ + "%pip install --upgrade --quiet langchain langchain-experimental langchain-openai presidio-analyzer presidio-anonymizer spacy Faker faiss-cpu tiktoken" + ] + }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ - "# Install necessary packages\n", - "# !pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker faiss-cpu tiktoken\n", - "# ! python -m spacy download en_core_web_lg" + "# Download model\n", + "! python -m spacy download en_core_web_lg" ] }, { @@ -637,9 +643,9 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings.openai import OpenAIEmbeddings\n", "from langchain.text_splitter import RecursiveCharacterTextSplitter\n", - "from langchain.vectorstores import FAISS\n", + "from langchain_community.vectorstores import FAISS\n", + "from langchain_openai import OpenAIEmbeddings\n", "\n", "# 2. Load the data: In our case data's already loaded\n", "# 3. Anonymize the data before indexing\n", @@ -664,14 +670,14 @@ "source": [ "from operator import itemgetter\n", "\n", - "from langchain.chat_models.openai import ChatOpenAI\n", - "from langchain.prompts import ChatPromptTemplate\n", "from langchain_core.output_parsers import StrOutputParser\n", + "from langchain_core.prompts import ChatPromptTemplate\n", "from langchain_core.runnables import (\n", " RunnableLambda,\n", " RunnableParallel,\n", " RunnablePassthrough,\n", ")\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# 6. Create anonymizer chain\n", "template = \"\"\"Answer the question based only on the following context:\n", @@ -822,7 +828,7 @@ "metadata": {}, "outputs": [], "source": [ - "from langchain.embeddings import HuggingFaceBgeEmbeddings\n", + "from langchain_community.embeddings import HuggingFaceBgeEmbeddings\n", "\n", "model_name = \"BAAI/bge-base-en-v1.5\"\n", "# model_kwargs = {'device': 'cuda'}\n", diff --git a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb index b68e74e0f9e65..87c5a444e1bea 100644 --- a/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb +++ b/docs/docs/guides/privacy/presidio_data_anonymization/reversible.ipynb @@ -56,7 +56,7 @@ "outputs": [], "source": [ "# Install necessary packages\n", - "# ! pip install langchain langchain-experimental openai presidio-analyzer presidio-anonymizer spacy Faker\n", + "%pip install --upgrade --quiet langchain langchain-experimental langchain-openai presidio-analyzer presidio-anonymizer spacy Faker\n", "# ! python -m spacy download en_core_web_lg" ] }, @@ -207,8 +207,8 @@ } ], "source": [ - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts.prompt import PromptTemplate\n", + "from langchain_openai import ChatOpenAI\n", "\n", "anonymizer = PresidioReversibleAnonymizer()\n", "\n", diff --git a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb index b4bf7de26c483..0a509cf337d56 100644 --- a/docs/docs/guides/safety/amazon_comprehend_chain.ipynb +++ b/docs/docs/guides/safety/amazon_comprehend_chain.ipynb @@ -24,7 +24,7 @@ }, "outputs": [], "source": [ - "%pip install boto3 nltk" + "%pip install --upgrade --quiet boto3 nltk" ] }, { @@ -37,7 +37,7 @@ }, "outputs": [], "source": [ - "%pip install -U langchain_experimental" + "%pip install --upgrade --quiet langchain_experimental" ] }, { @@ -50,7 +50,7 @@ }, "outputs": [], "source": [ - "%pip install -U langchain pydantic" + "%pip install --upgrade --quiet langchain pydantic" ] }, { @@ -105,8 +105,8 @@ }, "outputs": [], "source": [ - "from langchain.llms.fake import FakeListLLM\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.fake import FakeListLLM\n", "from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (\n", " ModerationPiiError,\n", ")\n", @@ -242,8 +242,8 @@ }, "outputs": [], "source": [ - "from langchain.llms.fake import FakeListLLM\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.fake import FakeListLLM\n", "\n", "template = \"\"\"Question: {question}\n", "\n", @@ -405,8 +405,8 @@ }, "outputs": [], "source": [ - "from langchain.llms.fake import FakeListLLM\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms.fake import FakeListLLM\n", "\n", "template = \"\"\"Question: {question}\n", "\n", @@ -527,7 +527,7 @@ }, "outputs": [], "source": [ - "%pip install huggingface_hub" + "%pip install --upgrade --quiet huggingface_hub" ] }, { @@ -566,8 +566,8 @@ }, "outputs": [], "source": [ - "from langchain.llms import HuggingFaceHub\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import HuggingFaceHub\n", "\n", "template = \"\"\"{question}\"\"\"\n", "\n", @@ -696,9 +696,9 @@ "source": [ "import json\n", "\n", - "from langchain.llms import SagemakerEndpoint\n", - "from langchain.llms.sagemaker_endpoint import LLMContentHandler\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_community.llms import SagemakerEndpoint\n", + "from langchain_community.llms.sagemaker_endpoint import LLMContentHandler\n", "\n", "\n", "class ContentHandler(LLMContentHandler):\n", diff --git a/docs/docs/guides/safety/constitutional_chain.mdx b/docs/docs/guides/safety/constitutional_chain.mdx index 3f96559e98116..4b982501315f4 100644 --- a/docs/docs/guides/safety/constitutional_chain.mdx +++ b/docs/docs/guides/safety/constitutional_chain.mdx @@ -12,7 +12,7 @@ content that may violate guidelines, be offensive, or deviate from the desired c ```python # Imports -from langchain.llms import OpenAI +from langchain_openai import OpenAI from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain.chains.constitutional_ai.base import ConstitutionalChain diff --git a/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb b/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb index be41c203346b2..c138f1a2d3284 100644 --- a/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb +++ b/docs/docs/guides/safety/hugging_face_prompt_injection.ipynb @@ -28,12 +28,10 @@ "cell_type": "code", "execution_count": null, "id": "9bdbfdc7c949a9c1", - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ - "!pip install \"optimum[onnxruntime]\"" + "%pip install --upgrade --quiet \"optimum[onnxruntime]\" langchain transformers langchain-experimental langchain-openai" ] }, { @@ -44,8 +42,7 @@ "ExecuteTime": { "end_time": "2023-12-18T11:41:24.738278Z", "start_time": "2023-12-18T11:41:20.842567Z" - }, - "collapsed": false + } }, "outputs": [], "source": [ @@ -80,7 +77,9 @@ "outputs": [ { "data": { - "text/plain": "'hugging_face_injection_identifier'" + "text/plain": [ + "'hugging_face_injection_identifier'" + ] }, "execution_count": 10, "metadata": {}, @@ -119,7 +118,9 @@ "outputs": [ { "data": { - "text/plain": "'Name 5 cities with the biggest number of inhabitants'" + "text/plain": [ + "'Name 5 cities with the biggest number of inhabitants'" + ] }, "execution_count": 11, "metadata": {}, @@ -206,7 +207,7 @@ ], "source": [ "from langchain.agents import AgentType, initialize_agent\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(temperature=0)\n", "agent = initialize_agent(\n", @@ -374,7 +375,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.1" + "version": "3.10.1" } }, "nbformat": 4, diff --git a/docs/docs/guides/safety/index.mdx b/docs/docs/guides/safety/index.mdx index 8b97fdda7865b..b5d047d771ed4 100644 --- a/docs/docs/guides/safety/index.mdx +++ b/docs/docs/guides/safety/index.mdx @@ -4,6 +4,6 @@ One of the key concerns with using LLMs is that they may generate harmful or une - [Amazon Comprehend moderation chain](/docs/guides/safety/amazon_comprehend_chain): Use [Amazon Comprehend](https://aws.amazon.com/comprehend/) to detect and handle Personally Identifiable Information (PII) and toxicity. - [Constitutional chain](/docs/guides/safety/constitutional_chain): Prompt the model with a set of principles which should guide the model behavior. -- [Hugging Face prompt injection identification](/docs/guides/safety/huggingface_prompt_injection_identification): Detect and handle prompt injection attacks. +- [Hugging Face prompt injection identification](/docs/guides/safety/hugging_face_prompt_injection): Detect and handle prompt injection attacks. - [Logical Fallacy chain](/docs/guides/safety/logical_fallacy_chain): Checks the model output against logical fallacies to correct any deviation. - [Moderation chain](/docs/guides/safety/moderation): Check if any output text is harmful and flag it. diff --git a/docs/docs/guides/safety/logical_fallacy_chain.mdx b/docs/docs/guides/safety/logical_fallacy_chain.mdx index 1d785623d8bcd..d25dd37cd3a47 100644 --- a/docs/docs/guides/safety/logical_fallacy_chain.mdx +++ b/docs/docs/guides/safety/logical_fallacy_chain.mdx @@ -21,7 +21,7 @@ Therefore, it is crucial that model developers proactively address logical falla ```python # Imports -from langchain.llms import OpenAI +from langchain_openai import OpenAI from langchain.prompts import PromptTemplate from langchain.chains.llm import LLMChain from langchain_experimental.fallacy_removal.base import FallacyChain diff --git a/docs/docs/guides/safety/moderation.mdx b/docs/docs/guides/safety/moderation.mdx index 8b3701582774f..94b6a7dc642e1 100644 --- a/docs/docs/guides/safety/moderation.mdx +++ b/docs/docs/guides/safety/moderation.mdx @@ -22,7 +22,7 @@ We'll show: ```python -from langchain.llms import OpenAI +from langchain_openai import OpenAI from langchain.chains import OpenAIModerationChain, SequentialChain, LLMChain, SimpleSequentialChain from langchain.prompts import PromptTemplate ``` diff --git a/docs/docs/integrations/callbacks/argilla.ipynb b/docs/docs/integrations/callbacks/argilla.ipynb index 9e89cb5da9249..dab956786df2e 100644 --- a/docs/docs/integrations/callbacks/argilla.ipynb +++ b/docs/docs/integrations/callbacks/argilla.ipynb @@ -43,8 +43,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install argilla --upgrade\n", - "!pip install openai" + "%pip install --upgrade --quiet langchain langchain-openai argilla" ] }, { @@ -215,7 +214,7 @@ ], "source": [ "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", @@ -280,8 +279,8 @@ "source": [ "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_openai import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", @@ -363,7 +362,7 @@ "source": [ "from langchain.agents import AgentType, initialize_agent, load_tools\n", "from langchain.callbacks import ArgillaCallbackHandler, StdOutCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "argilla_callback = ArgillaCallbackHandler(\n", " dataset_name=\"langchain-dataset\",\n", diff --git a/docs/docs/integrations/callbacks/confident.ipynb b/docs/docs/integrations/callbacks/confident.ipynb index b0d567633913b..f00338256903b 100644 --- a/docs/docs/integrations/callbacks/confident.ipynb +++ b/docs/docs/integrations/callbacks/confident.ipynb @@ -42,7 +42,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install deepeval --upgrade" + "%pip install --upgrade --quiet langchain langchain-openai deepeval" ] }, { @@ -152,7 +152,7 @@ } ], "source": [ - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(\n", " temperature=0,\n", @@ -215,11 +215,10 @@ "source": [ "import requests\n", "from langchain.chains import RetrievalQA\n", - "from langchain.document_loaders import TextLoader\n", - "from langchain.embeddings.openai import OpenAIEmbeddings\n", - "from langchain.llms import OpenAI\n", "from langchain.text_splitter import CharacterTextSplitter\n", - "from langchain.vectorstores import Chroma\n", + "from langchain_community.document_loaders import TextLoader\n", + "from langchain_community.vectorstores import Chroma\n", + "from langchain_openai import OpenAI, OpenAIEmbeddings\n", "\n", "text_file_url = \"https://raw.githubusercontent.com/hwchase17/chat-your-data/master/state_of_the_union.txt\"\n", "\n", diff --git a/docs/docs/integrations/callbacks/context.ipynb b/docs/docs/integrations/callbacks/context.ipynb index b250f439b6164..29913097eeb90 100644 --- a/docs/docs/integrations/callbacks/context.ipynb +++ b/docs/docs/integrations/callbacks/context.ipynb @@ -36,7 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install context-python --upgrade" + "%pip install --upgrade --quiet langchain langchain-openai context-python" ] }, { @@ -100,11 +100,11 @@ "import os\n", "\n", "from langchain.callbacks import ContextCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", " SystemMessage,\n", ")\n", + "from langchain_openai import ChatOpenAI\n", "\n", "token = os.environ[\"CONTEXT_API_TOKEN\"]\n", "\n", @@ -157,12 +157,12 @@ "\n", "from langchain.callbacks import ContextCallbackHandler\n", "from langchain.chains import LLMChain\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.prompts import PromptTemplate\n", "from langchain.prompts.chat import (\n", " ChatPromptTemplate,\n", " HumanMessagePromptTemplate,\n", ")\n", + "from langchain_openai import ChatOpenAI\n", "\n", "token = os.environ[\"CONTEXT_API_TOKEN\"]\n", "\n", diff --git a/docs/docs/integrations/callbacks/infino.ipynb b/docs/docs/integrations/callbacks/infino.ipynb index 367f3a2f2d88a..f12fd1909c5b2 100644 --- a/docs/docs/integrations/callbacks/infino.ipynb +++ b/docs/docs/integrations/callbacks/infino.ipynb @@ -34,9 +34,9 @@ "outputs": [], "source": [ "# Install necessary dependencies.\n", - "!pip install -q infinopy\n", - "!pip install -q matplotlib\n", - "!pip install -q tiktoken" + "%pip install --upgrade --quiet infinopy\n", + "%pip install --upgrade --quiet matplotlib\n", + "%pip install --upgrade --quiet tiktoken" ] }, { @@ -54,7 +54,7 @@ "import matplotlib.pyplot as plt\n", "from infinopy import InfinoClient\n", "from langchain.callbacks import InfinoCallbackHandler\n", - "from langchain.llms import OpenAI" + "from langchain_openai import OpenAI" ] }, { @@ -316,8 +316,8 @@ "# os.environ[\"OPENAI_API_KEY\"] = \"YOUR_API_KEY\"\n", "\n", "from langchain.chains.summarize import load_summarize_chain\n", - "from langchain.chat_models import ChatOpenAI\n", - "from langchain.document_loaders import WebBaseLoader\n", + "from langchain_community.document_loaders import WebBaseLoader\n", + "from langchain_openai import ChatOpenAI\n", "\n", "# Create callback handler. This logs latency, errors, token usage, prompts, as well as prompt responses to Infino.\n", "handler = InfinoCallbackHandler(\n", diff --git a/docs/docs/integrations/callbacks/labelstudio.ipynb b/docs/docs/integrations/callbacks/labelstudio.ipynb index bb733f0dc15e2..91507b0b046ab 100644 --- a/docs/docs/integrations/callbacks/labelstudio.ipynb +++ b/docs/docs/integrations/callbacks/labelstudio.ipynb @@ -56,7 +56,7 @@ }, "outputs": [], "source": [ - "!pip install -U label-studio label-studio-sdk openai" + "%pip install --upgrade --quiet langchain label-studio label-studio-sdk langchain-openai" ] }, { @@ -171,7 +171,7 @@ "outputs": [], "source": [ "from langchain.callbacks import LabelStudioCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "llm = OpenAI(\n", " temperature=0, callbacks=[LabelStudioCallbackHandler(project_name=\"My Project\")]\n", @@ -242,8 +242,8 @@ "outputs": [], "source": [ "from langchain.callbacks import LabelStudioCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import HumanMessage, SystemMessage\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chat_llm = ChatOpenAI(\n", " callbacks=[\n", diff --git a/docs/docs/integrations/callbacks/llmonitor.md b/docs/docs/integrations/callbacks/llmonitor.md index 4ee85429f6213..266332a7e3438 100644 --- a/docs/docs/integrations/callbacks/llmonitor.md +++ b/docs/docs/integrations/callbacks/llmonitor.md @@ -27,8 +27,8 @@ handler = LLMonitorCallbackHandler(app_id="...") ## Usage with LLM/Chat models ```python -from langchain.llms import OpenAI -from langchain.chat_models import ChatOpenAI +from langchain_openai import OpenAI +from langchain_openai import ChatOpenAI from langchain.callbacks import LLMonitorCallbackHandler handler = LLMonitorCallbackHandler() @@ -52,7 +52,7 @@ It is also recommended to pass `agent_name` in the metadata to be able to distin Example: ```python -from langchain.chat_models import ChatOpenAI +from langchain_openai import ChatOpenAI from langchain.schema import SystemMessage, HumanMessage from langchain.agents import OpenAIFunctionsAgent, AgentExecutor, tool from langchain.callbacks import LLMonitorCallbackHandler @@ -85,7 +85,7 @@ Another example: ```python from langchain.agents import load_tools, initialize_agent, AgentType -from langchain.llms import OpenAI +from langchain_openai import OpenAI from langchain.callbacks import LLMonitorCallbackHandler handler = LLMonitorCallbackHandler() diff --git a/docs/docs/integrations/callbacks/promptlayer.ipynb b/docs/docs/integrations/callbacks/promptlayer.ipynb index 28d5977b7485b..538d43eb86135 100644 --- a/docs/docs/integrations/callbacks/promptlayer.ipynb +++ b/docs/docs/integrations/callbacks/promptlayer.ipynb @@ -32,7 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "!pip install promptlayer --upgrade" + "%pip install --upgrade --quiet promptlayer --upgrade" ] }, { @@ -76,10 +76,10 @@ "source": [ "import promptlayer # Don't forget this 🍰\n", "from langchain.callbacks import PromptLayerCallbackHandler\n", - "from langchain.chat_models import ChatOpenAI\n", "from langchain.schema import (\n", " HumanMessage,\n", ")\n", + "from langchain_openai import ChatOpenAI\n", "\n", "chat_llm = ChatOpenAI(\n", " temperature=0,\n", @@ -110,7 +110,7 @@ "source": [ "import promptlayer # Don't forget this 🍰\n", "from langchain.callbacks import PromptLayerCallbackHandler\n", - "from langchain.llms import GPT4All\n", + "from langchain_community.llms import GPT4All\n", "\n", "model = GPT4All(model=\"./models/gpt4all-model.bin\", n_ctx=512, n_threads=8)\n", "\n", @@ -142,7 +142,7 @@ "source": [ "import promptlayer # Don't forget this 🍰\n", "from langchain.callbacks import PromptLayerCallbackHandler\n", - "from langchain.llms import OpenAI\n", + "from langchain_openai import OpenAI\n", "\n", "\n", "def pl_id_callback(promptlayer_request_id):\n", diff --git a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb index 7b88a910b8119..63902f47e3204 100644 --- a/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb +++ b/docs/docs/integrations/callbacks/sagemaker_tracking.ipynb @@ -38,9 +38,9 @@ }, "outputs": [], "source": [ - "!pip install sagemaker\n", - "!pip install openai\n", - "!pip install google-search-results" + "%pip install --upgrade --quiet sagemaker\n", + "%pip install --upgrade --quiet langchain-openai\n", + "%pip install --upgrade --quiet google-search-results" ] }, { @@ -82,8 +82,8 @@ "from langchain.agents import initialize_agent, load_tools\n", "from langchain.callbacks import SageMakerCallbackHandler\n", "from langchain.chains import LLMChain, SimpleSequentialChain\n", - "from langchain.llms import OpenAI\n", "from langchain.prompts import PromptTemplate\n", + "from langchain_openai import OpenAI\n", "from sagemaker.analytics import ExperimentAnalytics\n", "from sagemaker.experiments.run import Run\n", "from sagemaker.session import Session" diff --git a/docs/docs/integrations/callbacks/streamlit.md b/docs/docs/integrations/callbacks/streamlit.md index 4704d9d5579ce..776f0f6d9c26a 100644 --- a/docs/docs/integrations/callbacks/streamlit.md +++ b/docs/docs/integrations/callbacks/streamlit.md @@ -7,7 +7,7 @@ [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/langchain-ai/streamlit-agent?quickstart=1) In this guide we will demonstrate how to use `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an -interactive Streamlit app. Try it out with the running app below using the [MRKL agent](/docs/modules/agents/how_to/mrkl/): +interactive Streamlit app. Try it out with the running app below using the MRKL agent: