From b1809ea7120f6444290576814758a24a88c17ed3 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 8 Oct 2024 15:49:54 +1300 Subject: [PATCH] Add docs and lint files --- .../tools/azure_content_safety.ipynb | 143 ++++++++++++++++++ .../azure_ai_services/test_content_safety.py | 2 +- 2 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 docs/docs/integrations/tools/azure_content_safety.ipynb diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb new file mode 100644 index 0000000000000..9dbbc4275735f --- /dev/null +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -0,0 +1,143 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# `AzureContentSafetyTextTool`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ">The `AzureContentSafetyTextTool` acts as a wrapper around the Azure AI Content Safety Service/API.\n", + ">The Tool will detect harmful content according to Azure's Content Safety Policy." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example\n", + "\n", + "Get the required dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from langchain import hub" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will use a prompt to tell the model what to do. LangChain Prompts can be configured, however for sake of simplicity we will use a premade prompt from LangSmith. This requires an API key which can be setup [here](https://www.langchain.com/langsmith) after registration." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "LANGSMITH_KEY = os.environ[\"LANGSMITH_KEY\"]\n", + "prompt = hub.pull(\"hwchase17/structured-chat-agent\", api_key=LANGSMITH_KEY)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can use the `AzureContentSafetyTextTool` combine with a model, using `create_structured_chat_agent`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.agents import AgentExecutor, create_structured_chat_agent\n", + "from langchain_community.tools.azure_ai_services.content_safety import (\n", + " AzureContentSafetyTextTool,\n", + ")\n", + "from langchain_openai import AzureChatOpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tools = [\n", + " AzureContentSafetyTextTool(\n", + " content_safety_key=os.environ[\"CONTENT_SAFETY_KEY\"],\n", + " content_safety_endpoint=os.environ[\"CONTENT_SAFETY_ENDPOINT\"],\n", + " )\n", + "]\n", + "\n", + "model = AzureChatOpenAI(\n", + " openai_api_version=os.environ[\"OPENAI_API_VERSION\"],\n", + " azure_deployment=os.environ[\"COMPLETIONS_MODEL\"],\n", + " azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"],\n", + " api_key=os.environ[\"AZURE_OPENAI_API_KEY\"],\n", + ")\n", + "\n", + "agent = create_structured_chat_agent(model, tools, prompt)\n", + "\n", + "agent_executor = AgentExecutor(\n", + " agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then by using `.invoke`, the model can be told what to do and assess if using the tools it was given would assist in it's response." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "input = \"I hate you\"\n", + "agent_executor.invoke(\n", + " {\"input\": f\"Can you check the following text for harmful content : {input}\"}\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py b/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py index 11e414fa8e420..d2971a8d464fd 100644 --- a/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py +++ b/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py @@ -69,4 +69,4 @@ def test_no_harmful_content_detected(mocker: Any) -> None: output = "Harm: 0\n" result = tool._run(input) - assert result == output \ No newline at end of file + assert result == output