From f2ffb8b4ed99d0a0065ea686ac14da12f6edaa4a Mon Sep 17 00:00:00 2001
From: Bagatur <22008038+baskaryan@users.noreply.github.com>
Date: Mon, 16 Oct 2023 11:34:31 -0700
Subject: [PATCH] Add LCEL to prompt doc (#11875)
---
.../prompts/prompt_templates/index.ipynb | 386 ++++++++++++++++++
.../prompts/prompt_templates/index.mdx | 133 ------
2 files changed, 386 insertions(+), 133 deletions(-)
create mode 100644 docs/docs/modules/model_io/prompts/prompt_templates/index.ipynb
delete mode 100644 docs/docs/modules/model_io/prompts/prompt_templates/index.mdx
diff --git a/docs/docs/modules/model_io/prompts/prompt_templates/index.ipynb b/docs/docs/modules/model_io/prompts/prompt_templates/index.ipynb
new file mode 100644
index 0000000000000..a7c4d497e08f1
--- /dev/null
+++ b/docs/docs/modules/model_io/prompts/prompt_templates/index.ipynb
@@ -0,0 +1,386 @@
+{
+ "cells": [
+ {
+ "cell_type": "raw",
+ "id": "77dd0c90-94d7-4acd-a360-e977b39d0a8f",
+ "metadata": {},
+ "source": [
+ "---\n",
+ "sidebar_position: 0\n",
+ "title: Prompt templates\n",
+ "---"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2d98412d-fc53-42c1-aed8-f1f8eb9ada58",
+ "metadata": {},
+ "source": [
+ "Prompt templates are pre-defined recipes for generating prompts for language models.\n",
+ "\n",
+ "A template may include instructions, few-shot examples, and specific context and\n",
+ "questions appropriate for a given task.\n",
+ "\n",
+ "LangChain provides tooling to create and work with prompt templates.\n",
+ "\n",
+ "LangChain strives to create model agnostic templates to make it easy to reuse\n",
+ "existing templates across different language models.\n",
+ "\n",
+ "Typically, language models expect the prompt to either be a string or else a list of chat messages.\n",
+ "\n",
+ "## `PromptTemplate`\n",
+ "\n",
+ "Use `PromptTemplate` to create a template for a string prompt.\n",
+ "\n",
+ "By default, `PromptTemplate` uses [Python's str.format](https://docs.python.org/3/library/stdtypes.html#str.format)\n",
+ "syntax for templating."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 17,
+ "id": "a5bc258b-87d2-486b-9785-edf5b23fd179",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'Tell me a funny joke about chickens.'"
+ ]
+ },
+ "execution_count": 17,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain.prompts import PromptTemplate\n",
+ "\n",
+ "prompt_template = PromptTemplate.from_template(\n",
+ " \"Tell me a {adjective} joke about {content}.\"\n",
+ ")\n",
+ "prompt_template.format(adjective=\"funny\", content=\"chickens\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d54c803c-0f80-412d-9156-b8390e0265c0",
+ "metadata": {},
+ "source": [
+ "The template supports any number of variables, including no variables:n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 18,
+ "id": "63bd7ac3-5cf6-4eb2-8205-d1a01029b56a",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'Tell me a joke'"
+ ]
+ },
+ "execution_count": 18,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain.prompts import PromptTemplate\n",
+ "\n",
+ "prompt_template = PromptTemplate.from_template(\n",
+ "\"Tell me a joke\"\n",
+ ")\n",
+ "prompt_template.format()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "69f7c948-9f78-431a-a466-8038e6b6f856",
+ "metadata": {},
+ "source": [
+ "For additional validation, specify `input_variables` explicitly. These variables\n",
+ "will be compared against the variables present in the template string during instantiation, **raising an exception if\n",
+ "there is a mismatch**. For example:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 19,
+ "id": "617d7b2c-7308-4e74-9cc9-96ee0b7a13ac",
+ "metadata": {},
+ "outputs": [
+ {
+ "ename": "ValidationError",
+ "evalue": "1 validation error for PromptTemplate\n__root__\n Invalid prompt schema; check for mismatched or missing input parameters. 'content' (type=value_error)",
+ "output_type": "error",
+ "traceback": [
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[19], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mlangchain\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mprompts\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m PromptTemplate\n\u001b[0;32m----> 3\u001b[0m invalid_prompt \u001b[38;5;241m=\u001b[39m \u001b[43mPromptTemplate\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 4\u001b[0m \u001b[43m \u001b[49m\u001b[43minput_variables\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43madjective\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 5\u001b[0m \u001b[43m \u001b[49m\u001b[43mtemplate\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mTell me a \u001b[39;49m\u001b[38;5;132;43;01m{adjective}\u001b[39;49;00m\u001b[38;5;124;43m joke about \u001b[39;49m\u001b[38;5;132;43;01m{content}\u001b[39;49;00m\u001b[38;5;124;43m.\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\n\u001b[1;32m 6\u001b[0m \u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m~/langchain/libs/langchain/langchain/load/serializable.py:97\u001b[0m, in \u001b[0;36mSerializable.__init__\u001b[0;34m(self, **kwargs)\u001b[0m\n\u001b[1;32m 96\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m---> 97\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 98\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_lc_kwargs \u001b[38;5;241m=\u001b[39m kwargs\n",
+ "File \u001b[0;32m~/langchain/.venv/lib/python3.9/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
+ "\u001b[0;31mValidationError\u001b[0m: 1 validation error for PromptTemplate\n__root__\n Invalid prompt schema; check for mismatched or missing input parameters. 'content' (type=value_error)"
+ ]
+ }
+ ],
+ "source": [
+ "from langchain.prompts import PromptTemplate\n",
+ "\n",
+ "invalid_prompt = PromptTemplate(\n",
+ " input_variables=[\"adjective\"],\n",
+ " template=\"Tell me a {adjective} joke about {content}.\"\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "2715fd80-e294-49ca-9fc2-5a012949ed8a",
+ "metadata": {},
+ "source": [
+ "You can create custom prompt templates that format the prompt in any way you want.\n",
+ "For more information, see [Custom Prompt Templates](./custom_prompt_template.html).\n",
+ "\n",
+ "## `ChatPromptTemplate`\n",
+ "\n",
+ "The prompt to [chat models](../models/chat) is a list of chat messages.\n",
+ "\n",
+ "Each chat message is associated with content, and an additional parameter called `role`.\n",
+ "For example, in the OpenAI [Chat Completions API](https://platform.openai.com/docs/guides/chat/introduction), a chat message can be associated with an AI assistant, a human or a system role.\n",
+ "\n",
+ "Create a chat prompt template like this:"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 22,
+ "id": "d088d53c-0e20-4fb9-9d54-b0e989b998b0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from langchain.prompts import ChatPromptTemplate\n",
+ "\n",
+ "chat_template = ChatPromptTemplate.from_messages([\n",
+ " (\"system\", \"You are a helpful AI bot. Your name is {name}.\"),\n",
+ " (\"human\", \"Hello, how are you doing?\"),\n",
+ " (\"ai\", \"I'm doing well, thanks!\"),\n",
+ " (\"human\", \"{user_input}\"),\n",
+ "])\n",
+ "\n",
+ "messages = chat_template.format_messages(\n",
+ " name=\"Bob\",\n",
+ " user_input=\"What is your name?\"\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d1e7e3ef-ba7d-4ca5-a95c-a0488c9679e5",
+ "metadata": {},
+ "source": [
+ "`ChatPromptTemplate.from_messages` accepts a variety of message representations.\n",
+ "\n",
+ "For example, in addition to using the 2-tuple representation of (type, content) used\n",
+ "above, you could pass in an instance of `MessagePromptTemplate` or `BaseMessage`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 23,
+ "id": "f6632eda-582f-4f29-882f-108587f0397c",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "AIMessage(content='I absolutely love indulging in delicious treats!')"
+ ]
+ },
+ "execution_count": 23,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "from langchain.chat_models import ChatOpenAI\n",
+ "from langchain.prompts import HumanMessagePromptTemplate\n",
+ "from langchain.schema.messages import SystemMessage\n",
+ "\n",
+ "chat_template = ChatPromptTemplate.from_messages(\n",
+ " [\n",
+ " SystemMessage(\n",
+ " content=(\n",
+ " \"You are a helpful assistant that re-writes the user's text to \"\n",
+ " \"sound more upbeat.\"\n",
+ " )\n",
+ " ),\n",
+ " HumanMessagePromptTemplate.from_template(\"{text}\"),\n",
+ " ]\n",
+ ")\n",
+ "\n",
+ "llm = ChatOpenAI()\n",
+ "llm(chat_template.format_messages(text='i dont like eating tasty things.'))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "8c4b46da-d51b-4801-955f-ba4bf139162f",
+ "metadata": {},
+ "source": [
+ "This provides you with a lot of flexibility in how you construct your chat prompts."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3a5fe78c-572c-4e87-b02f-7d33126fb605",
+ "metadata": {},
+ "source": [
+ "## LCEL\n",
+ "\n",
+ "`PromptTemplate` and `ChatPromptTemplate` implement the [Runnable interface](/docs/expression_language/interface), the basic building block of the [LangChain Expression Language (LCEL)](/docs/expression_language/). This means they support `invoke`, `ainvoke`, `stream`, `astream`, `batch`, `abatch`, `astream_log` calls.\n",
+ "\n",
+ "`PromptTemplate` accepts a dictionary (of the prompt variables) and returns a `StringPromptValue`. A `ChatPromptTemplate` accepts a dictionary and returns a `ChatPromptValue`."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 24,
+ "id": "0f0e860b-95e0-4653-8bab-c5d58b0f7d67",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "StringPromptValue(text='Tell me a joke')"
+ ]
+ },
+ "execution_count": 24,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "prompt_val = prompt_template.invoke({\"adjective\": \"funny\", \"content\": \"chickens\"})\n",
+ "prompt_val"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 25,
+ "id": "c0dac782-5144-4489-8d77-eba47f1cd1c4",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "'Tell me a joke'"
+ ]
+ },
+ "execution_count": 25,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "prompt_val.to_string()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 26,
+ "id": "a8e3ac32-f690-4d3d-bcb2-27b7931beab2",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[HumanMessage(content='Tell me a joke')]"
+ ]
+ },
+ "execution_count": 26,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "prompt_val.to_messages()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 30,
+ "id": "4516257f-0c3b-4851-9e82-8c9e09111444",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "chat_val = chat_template.invoke({\"text\": 'i dont like eating tasty things.'})"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 31,
+ "id": "7adfe927-ba1d-425f-904c-0328e1a10c18",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "[SystemMessage(content=\"You are a helpful assistant that re-writes the user's text to sound more upbeat.\"),\n",
+ " HumanMessage(content='i dont like eating tasty things.')]"
+ ]
+ },
+ "execution_count": 31,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "chat_val.to_messages()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "id": "37c9e2e4-a2e8-48a9-a732-01c025a21362",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "\"System: You are a helpful assistant that re-writes the user's text to sound more upbeat.\\nHuman: i dont like eating tasty things.\""
+ ]
+ },
+ "execution_count": 32,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "chat_val.to_string()"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "poetry-venv",
+ "language": "python",
+ "name": "poetry-venv"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.9.1"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/docs/docs/modules/model_io/prompts/prompt_templates/index.mdx b/docs/docs/modules/model_io/prompts/prompt_templates/index.mdx
deleted file mode 100644
index 6f6d36124c960..0000000000000
--- a/docs/docs/modules/model_io/prompts/prompt_templates/index.mdx
+++ /dev/null
@@ -1,133 +0,0 @@
----
-sidebar_position: 0
----
-
-# Prompt templates
-
-Prompt templates are pre-defined recipes for generating prompts for language models.
-
-A template may include instructions, few-shot examples, and specific context and
-questions appropriate for a given task.
-
-LangChain provides tooling to create and work with prompt templates.
-
-LangChain strives to create model agnostic templates to make it easy to reuse
-existing templates across different language models.
-
-Typically, language models expect the prompt to either be a string or else a list of chat messages.
-
-## Prompt template
-
-Use `PromptTemplate` to create a template for a string prompt.
-
-By default, `PromptTemplate` uses [Python's str.format](https://docs.python.org/3/library/stdtypes.html#str.format)
-syntax for templating; however other templating syntax is available (e.g., `jinja2`).
-
-```python
-from langchain.prompts import PromptTemplate
-
-prompt_template = PromptTemplate.from_template(
- "Tell me a {adjective} joke about {content}."
-)
-prompt_template.format(adjective="funny", content="chickens")
-```
-
-
-
-```
-"Tell me a funny joke about chickens."
-```
-
-
-
-The template supports any number of variables, including no variables:
-
-```python
-from langchain.prompts import PromptTemplate
-
-prompt_template = PromptTemplate.from_template(
-"Tell me a joke"
-)
-prompt_template.format()
-```
-
-For additional validation, specify `input_variables` explicitly. These variables
-will be compared against the variables present in the template string during instantiation, raising an exception if
-there is a mismatch; for example,
-
-```python
-from langchain.prompts import PromptTemplate
-
-invalid_prompt = PromptTemplate(
- input_variables=["adjective"],
- template="Tell me a {adjective} joke about {content}."
-)
-```
-
-You can create custom prompt templates that format the prompt in any way you want.
-For more information, see [Custom Prompt Templates](./custom_prompt_template.html).
-
-
-
-## Chat prompt template
-
-The prompt to [chat models](../models/chat) is a list of chat messages.
-
-Each chat message is associated with content, and an additional parameter called `role`.
-For example, in the OpenAI [Chat Completions API](https://platform.openai.com/docs/guides/chat/introduction), a chat message can be associated with an AI assistant, a human or a system role.
-
-Create a chat prompt template like this:
-
-```python
-from langchain.prompts import ChatPromptTemplate
-
-template = ChatPromptTemplate.from_messages([
- ("system", "You are a helpful AI bot. Your name is {name}."),
- ("human", "Hello, how are you doing?"),
- ("ai", "I'm doing well, thanks!"),
- ("human", "{user_input}"),
-])
-
-messages = template.format_messages(
- name="Bob",
- user_input="What is your name?"
-)
-```
-
-`ChatPromptTemplate.from_messages` accepts a variety of message representations.
-
-For example, in addition to using the 2-tuple representation of (type, content) used
-above, you could pass in an instance of `MessagePromptTemplate` or `BaseMessage`.
-
-```python
-from langchain.prompts import ChatPromptTemplate
-from langchain.prompts.chat import SystemMessage, HumanMessagePromptTemplate
-
-template = ChatPromptTemplate.from_messages(
- [
- SystemMessage(
- content=(
- "You are a helpful assistant that re-writes the user's text to "
- "sound more upbeat."
- )
- ),
- HumanMessagePromptTemplate.from_template("{text}"),
- ]
-)
-
-from langchain.chat_models import ChatOpenAI
-
-llm = ChatOpenAI()
-llm(template.format_messages(text='i dont like eating tasty things.'))
-```
-
-
-
-```
-AIMessage(content='I absolutely adore indulging in delicious treats!', additional_kwargs={}, example=False)
-```
-
-
-
-This provides you with a lot of flexibility in how you construct your chat prompts.
-