Skip to content

Commit

Permalink
Initialize GuardrailsOutputParser documentation
Browse files Browse the repository at this point in the history
  • Loading branch information
irgolic committed Dec 13, 2023
1 parent 1989142 commit b34088c
Showing 1 changed file with 226 additions and 0 deletions.
226 changes: 226 additions & 0 deletions docs/docs/modules/model_io/output_parsers/guardrails.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "a1ae632a",
"metadata": {},
"source": [
"# Guardrails parser\n",
"\n",
"This output parser invokes [Guardrails](https://docs.guardrailsai.com/) to parse the output of a language model, given a schema defined either in XML or as a pydantic model. Guardrails validates strings, or fields in a JSON, using [its predefined validators](https://docs.guardrailsai.com/api_reference/validators/), or a custom validator you define."
]
},
{
"cell_type": "code",
"execution_count": 10,
"outputs": [],
"source": [
"from guardrails import Guard\n",
"from guardrails.validators import LowerCase, TwoWords, UpperCase\n",
"from langchain.chat_models import ChatOpenAI\n",
"from langchain.output_parsers import GuardrailsOutputParser\n",
"from langchain.prompts import PromptTemplate\n",
"from pydantic import BaseModel, Field"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2023-12-13T17:40:28.263352Z",
"start_time": "2023-12-13T17:40:28.258570Z"
}
}
},
{
"cell_type": "markdown",
"source": [
"## With strings"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 11,
"outputs": [
{
"data": {
"text/plain": "'colorful comforts'"
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"guard = Guard.from_string([LowerCase(on_fail=\"fix\")])\n",
"\n",
"guardrails_parser = GuardrailsOutputParser.from_guard(guard)\n",
"\n",
"prompt = PromptTemplate.from_template(\n",
" \"What is a good name for a company that makes {product}?\"\n",
")\n",
"\n",
"runnable = prompt | ChatOpenAI() | guardrails_parser\n",
"\n",
"runnable.invoke({\"product\": \"colorful socks\"})"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"end_time": "2023-12-13T17:40:29.345599Z",
"start_time": "2023-12-13T17:40:28.664661Z"
}
}
},
{
"cell_type": "markdown",
"source": [
"## With pydantic"
],
"metadata": {
"collapsed": false
}
},
{
"cell_type": "code",
"execution_count": 20,
"id": "b3f16168",
"metadata": {
"ExecuteTime": {
"end_time": "2023-12-13T17:42:45.201398Z",
"start_time": "2023-12-13T17:42:44.058209Z"
}
},
"outputs": [
{
"data": {
"text/plain": "{'pet_type': 'DOG', 'name': 'Buddy'}"
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Here's an example of a Guard constructed from a pydantic model\n",
"\n",
"\n",
"class Pet(BaseModel):\n",
" pet_type: str = Field(validators=[UpperCase(on_fail=\"fix\")])\n",
" name: str = Field(validators=[TwoWords(on_fail=\"fix\")])\n",
"\n",
"\n",
"prompt = \"\"\"\n",
" What kind of pet should I get and what should I name it?\n",
"\n",
" ${gr.complete_json_suffix_v2}\n",
"\"\"\"\n",
"\n",
"guard = Guard.from_pydantic(Pet, prompt=prompt)\n",
"parser = GuardrailsOutputParser.from_guard(guard)\n",
"\n",
"prompt_template = PromptTemplate(\n",
" template=\"Answer the user query.\\n{format_instructions}\\n{prompt}\\n\",\n",
" input_variables=[\"prompt\"],\n",
" partial_variables={\"format_instructions\": parser.get_format_instructions()},\n",
")\n",
"\n",
"runnable = prompt_template | ChatOpenAI() | parser\n",
"\n",
"runnable.invoke(\n",
" {\n",
" \"product\": \"colorful socks\",\n",
" \"prompt\": prompt,\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "fd8c3347-4d0b-459b-ab7b-cf5443297026",
"metadata": {
"collapsed": false
},
"source": [
"### With XML"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "03049f88",
"metadata": {
"ExecuteTime": {
"end_time": "2023-12-13T17:42:35.469586Z",
"start_time": "2023-12-13T17:42:34.578496Z"
}
},
"outputs": [
{
"data": {
"text/plain": "{'pet_type': 'DOG', 'name': 'Buddy'}"
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Here's another example, of a Guard constructed from an XML RAIL schema\n",
"\n",
"rail_spec = \"\"\"\n",
"<rail version=\"0.1\">\n",
"<output>\n",
" <string name=\"pet_type\" validators=\"uppercase\" />\n",
" <string name=\"name\" validators=\"two-words\" />\n",
"</output>\n",
"<prompt>\n",
" What kind of pet should I get and what should I name it?\n",
" ${gr.complete_json_suffix_v2}\n",
"</prompt>\n",
"</rail>\n",
"\"\"\"\n",
"\n",
"guard = Guard.from_rail_string(rail_spec)\n",
"parser = GuardrailsOutputParser.from_guard(guard)\n",
"\n",
"prompt_template = PromptTemplate(\n",
" template=\"Answer the user query.\\n{format_instructions}\\n{prompt}\\n\",\n",
" input_variables=[\"prompt\"],\n",
" partial_variables={\"format_instructions\": parser.get_format_instructions()},\n",
")\n",
"\n",
"runnable = prompt_template | ChatOpenAI() | parser\n",
"\n",
"runnable.invoke(\n",
" {\n",
" \"product\": \"colorful socks\",\n",
" \"prompt\": prompt,\n",
" }\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

0 comments on commit b34088c

Please sign in to comment.