From 9a7d45a03bdf437b2bc88315e04ad2723005faca Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Sep 2024 17:18:43 +1200 Subject: [PATCH 01/20] work on first version of content safety tool --- .../content_safety.py | 136 ++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py new file mode 100644 index 0000000000000..3e67a9b2387d3 --- /dev/null +++ b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +import logging +from typing import Any, Dict, Optional + +from langchain_core.callbacks import CallbackManagerForToolRun +from langchain_core.pydantic_v1 import root_validator +from langchain_core.tools import BaseTool +from langchain_core.utils import get_from_dict_or_env + +logger = logging.getLogger(__name__) + + +class AzureContentSafetyTextTool(BaseTool): + """ + A tool that interacts with the Azure AI Content Safety API. + + This tool queries the Azure AI Content Safety API to analyze text for harmful + content and identify sentiment. It requires an API key and endpoint, + which can be set up as described in the following guide: + + https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python + + Attributes: + content_safety_key (str): + The API key used to authenticate requests with Azure Content Safety API. + content_safety_endpoint (str): + The endpoint URL for the Azure Content Safety API. + content_safety_client (Any): + An instance of the Azure Content Safety Client used for making API requests. + + Methods: + validate_environment(values: Dict) -> Dict: + Validates the presence of API key and endpoint in the environment + and initializes the Content Safety Client. + + _sentiment_analysis(text: str) -> Dict: + Analyzes the provided text to assess its sentiment and safety, + returning the analysis results. + + _run(query: str, + run_manager: Optional[CallbackManagerForToolRun] = None) -> str: + Uses the tool to analyze the given query and returns the result. + Raises a RuntimeError if an exception occurs. + """ + + content_safety_key: str = "" #: :meta private: + content_safety_endpoint: str = "" #: :meta private: + content_safety_client: Any #: :meta private: + + name: str = "azure_content_safety_tool" + description: str = ( + "A wrapper around Azure AI Content Safety. " + '''Useful for when you need to identify the sentiment of text + and whether or not a text is harmful. ''' + "Input should be text." + ) + + @root_validator(pre=True) + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and endpoint exists in environment.""" + content_safety_key = get_from_dict_or_env( + values, "content_safety_key", "CONTENT_SAFETY_API_KEY" + ) + + content_safety_endpoint = get_from_dict_or_env( + values, "content_safety_endpoint", "CONTENT_SAFETY_ENDPOINT" + ) + + try: + import azure.ai.contentsafety as sdk + from azure.core.credentials import AzureKeyCredential + + values["content_safety_client"] = sdk.ContentSafetyClient( + endpoint=content_safety_endpoint, + credential=AzureKeyCredential(content_safety_key), + ) + + except ImportError: + raise ImportError( + "azure-ai-contentsafety is not installed. " + "Run `pip install azure-ai-contentsafety` to install." + ) + + return values + + def _sentiment_analysis(self, text: str) -> Dict: + """ + Perform sentiment analysis on the provided text. + + This method uses the Azure Content Safety Client to analyze the text and determine its sentiment + and safety categories. + + Args: + text (str): The text to be analyzed. + + Returns: + Dict: The analysis results containing sentiment and safety categories. + """ + try: + from azure.ai.contentsafety.models import AnalyzeTextOptions + except: + pass + + request = AnalyzeTextOptions(text=text) + response = self.content_safety_client.analyze_text(request) + result = response.categories_analysis + return result + + def _run( + self, + query: str, + run_manager: Optional[CallbackManagerForToolRun] = None, + ) -> str: + """ + Analyze the given query using the tool. + + This method calls `_sentiment_analysis` to process the query and returns the result. It raises + a RuntimeError if an exception occurs during analysis. + + Args: + query (str): The query text to be analyzed. + run_manager (Optional[CallbackManagerForToolRun], optional): A callback manager for tracking the tool run. Defaults to None. + + Returns: + str: The result of the sentiment analysis. + + Raises: + RuntimeError: If an error occurs while running the tool. + """ + try: + return self._sentiment_analysis(query) + except Exception as e: + raise RuntimeError( + f"Error while running AzureContentSafetyTextTool: {e}" + ) \ No newline at end of file From e04b3c8bfa36be515f47fbac20b6088754b30620 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Sep 2024 17:23:09 +1200 Subject: [PATCH 02/20] lint file --- .../content_safety.py | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py index 3e67a9b2387d3..afc2690960ad1 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py @@ -88,8 +88,8 @@ def _sentiment_analysis(self, text: str) -> Dict: """ Perform sentiment analysis on the provided text. - This method uses the Azure Content Safety Client to analyze the text and determine its sentiment - and safety categories. + This method uses the Azure Content Safety Client to analyze + the text and determine its sentiment and safety categories. Args: text (str): The text to be analyzed. @@ -97,10 +97,8 @@ def _sentiment_analysis(self, text: str) -> Dict: Returns: Dict: The analysis results containing sentiment and safety categories. """ - try: - from azure.ai.contentsafety.models import AnalyzeTextOptions - except: - pass + + from azure.ai.contentsafety.models import AnalyzeTextOptions request = AnalyzeTextOptions(text=text) response = self.content_safety_client.analyze_text(request) @@ -115,12 +113,15 @@ def _run( """ Analyze the given query using the tool. - This method calls `_sentiment_analysis` to process the query and returns the result. It raises - a RuntimeError if an exception occurs during analysis. + This method calls `_sentiment_analysis` to process the + query and returns the result. It raises a RuntimeError if an + exception occurs during analysis. Args: - query (str): The query text to be analyzed. - run_manager (Optional[CallbackManagerForToolRun], optional): A callback manager for tracking the tool run. Defaults to None. + query (str): + The query text to be analyzed. + run_manager (Optional[CallbackManagerForToolRun], optional): + A callback manager for tracking the tool run. Defaults to None. Returns: str: The result of the sentiment analysis. From f3e6cfa2c923e8ab68ae3a0c15b9c4e8b1694bfe Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Sep 2024 17:31:24 +1200 Subject: [PATCH 03/20] update init --- .../tools/azure_cognitive_services/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/__init__.py b/libs/community/langchain_community/tools/azure_cognitive_services/__init__.py index 1121e4e89d1f4..b495cd3ecd96c 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/__init__.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/__init__.py @@ -15,6 +15,9 @@ from langchain_community.tools.azure_cognitive_services.text_analytics_health import ( AzureCogsTextAnalyticsHealthTool, ) +from langchain_community.tools.azure_cognitive_services.content_safety import ( + AzureContentSafetyTextTool, +) __all__ = [ "AzureCogsImageAnalysisTool", @@ -22,4 +25,5 @@ "AzureCogsSpeech2TextTool", "AzureCogsText2SpeechTool", "AzureCogsTextAnalyticsHealthTool", + "AzureContentSafetyTextTool", ] From c18d000387885a97aa300c048875d188087dde67 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Sep 2024 18:01:17 +1200 Subject: [PATCH 04/20] update class to use `__init__` to validate environment instead of `root_validator` --- .../azure_cognitive_services/__init__.py | 4 -- .../content_safety.py | 54 ++++++++++++------- 2 files changed, 36 insertions(+), 22 deletions(-) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/__init__.py b/libs/community/langchain_community/tools/azure_cognitive_services/__init__.py index b495cd3ecd96c..1121e4e89d1f4 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/__init__.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/__init__.py @@ -15,9 +15,6 @@ from langchain_community.tools.azure_cognitive_services.text_analytics_health import ( AzureCogsTextAnalyticsHealthTool, ) -from langchain_community.tools.azure_cognitive_services.content_safety import ( - AzureContentSafetyTextTool, -) __all__ = [ "AzureCogsImageAnalysisTool", @@ -25,5 +22,4 @@ "AzureCogsSpeech2TextTool", "AzureCogsText2SpeechTool", "AzureCogsTextAnalyticsHealthTool", - "AzureContentSafetyTextTool", ] diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py index afc2690960ad1..9be95c1a8acc0 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py @@ -1,12 +1,11 @@ from __future__ import annotations import logging +import os from typing import Any, Dict, Optional from langchain_core.callbacks import CallbackManagerForToolRun -from langchain_core.pydantic_v1 import root_validator from langchain_core.tools import BaseTool -from langchain_core.utils import get_from_dict_or_env logger = logging.getLogger(__name__) @@ -30,10 +29,6 @@ class AzureContentSafetyTextTool(BaseTool): An instance of the Azure Content Safety Client used for making API requests. Methods: - validate_environment(values: Dict) -> Dict: - Validates the presence of API key and endpoint in the environment - and initializes the Content Safety Client. - _sentiment_analysis(text: str) -> Dict: Analyzes the provided text to assess its sentiment and safety, returning the analysis results. @@ -56,22 +51,44 @@ class AzureContentSafetyTextTool(BaseTool): "Input should be text." ) - @root_validator(pre=True) - def validate_environment(cls, values: Dict) -> Dict: - """Validate that api key and endpoint exists in environment.""" - content_safety_key = get_from_dict_or_env( - values, "content_safety_key", "CONTENT_SAFETY_API_KEY" - ) + def __init__( + self, + *, + content_safety_key: Optional[str] = None, + content_safety_endpoint: Optional[str] = None, + ) -> None: + """ + Initialize the AzureContentSafetyTextTool with the given API key and endpoint. + + This constructor sets up the API key and endpoint, and initializes + the Azure Content Safety Client. If API key or endpoint is not provided, + they are fetched from environment variables. - content_safety_endpoint = get_from_dict_or_env( - values, "content_safety_endpoint", "CONTENT_SAFETY_ENDPOINT" - ) + Args: + content_safety_key (Optional[str]): + The API key for Azure Content Safety API. If not provided, + it will be fetched from the environment + variable 'CONTENT_SAFETY_API_KEY'. + content_safety_endpoint (Optional[str]): + The endpoint URL for Azure Content Safety API. If not provided, + it will be fetched from the environment + variable 'CONTENT_SAFETY_ENDPOINT'. + Raises: + ImportError: If the 'azure-ai-contentsafety' package is not installed. + ValueError: + If API key or endpoint is not provided + and environment variables are missing. + """ + content_safety_key = (content_safety_key or + os.environ['CONTENT_SAFETY_API_KEY']) + content_safety_endpoint = (content_safety_endpoint or + os.environ['CONTENT_SAFETY_ENDPOINT']) try: import azure.ai.contentsafety as sdk from azure.core.credentials import AzureKeyCredential - values["content_safety_client"] = sdk.ContentSafetyClient( + content_safety_client = sdk.ContentSafetyClient( endpoint=content_safety_endpoint, credential=AzureKeyCredential(content_safety_key), ) @@ -81,8 +98,9 @@ def validate_environment(cls, values: Dict) -> Dict: "azure-ai-contentsafety is not installed. " "Run `pip install azure-ai-contentsafety` to install." ) - - return values + super().__init__(content_safety_key=content_safety_key, + content_safety_endpoint=content_safety_endpoint, + content_safety_client=content_safety_client) def _sentiment_analysis(self, text: str) -> Dict: """ From e0860549da6a4a9ba7b57cdf2ea626491618e3f6 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Sep 2024 18:08:50 +1200 Subject: [PATCH 05/20] adhere to linting recommendations --- .../content_safety.py | 97 +++++++++---------- 1 file changed, 47 insertions(+), 50 deletions(-) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py index 9be95c1a8acc0..d6c7780858dbd 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py @@ -9,33 +9,32 @@ logger = logging.getLogger(__name__) - class AzureContentSafetyTextTool(BaseTool): """ A tool that interacts with the Azure AI Content Safety API. - This tool queries the Azure AI Content Safety API to analyze text for harmful + This tool queries the Azure AI Content Safety API to analyze text for harmful content and identify sentiment. It requires an API key and endpoint, which can be set up as described in the following guide: - + https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python Attributes: - content_safety_key (str): + content_safety_key (str): The API key used to authenticate requests with Azure Content Safety API. - content_safety_endpoint (str): + content_safety_endpoint (str): The endpoint URL for the Azure Content Safety API. - content_safety_client (Any): - An instance of the Azure Content Safety Client used for making API requests. + content_safety_client (Any): + An instance of the Azure Content Safety Client used for making API + requests. Methods: _sentiment_analysis(text: str) -> Dict: - Analyzes the provided text to assess its sentiment and safety, + Analyzes the provided text to assess its sentiment and safety, returning the analysis results. - _run(query: str, - run_manager: Optional[CallbackManagerForToolRun] = None) -> str: - Uses the tool to analyze the given query and returns the result. + _run(query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: + Uses the tool to analyze the given query and returns the result. Raises a RuntimeError if an exception occurs. """ @@ -46,44 +45,41 @@ class AzureContentSafetyTextTool(BaseTool): name: str = "azure_content_safety_tool" description: str = ( "A wrapper around Azure AI Content Safety. " - '''Useful for when you need to identify the sentiment of text - and whether or not a text is harmful. ''' - "Input should be text." + "Useful for when you need to identify the sentiment of text and whether" + " or not a text is harmful. Input should be text." ) def __init__( - self, - *, - content_safety_key: Optional[str] = None, - content_safety_endpoint: Optional[str] = None, - ) -> None: + self, + *, + content_safety_key: Optional[str] = None, + content_safety_endpoint: Optional[str] = None, + ) -> None: """ Initialize the AzureContentSafetyTextTool with the given API key and endpoint. - This constructor sets up the API key and endpoint, and initializes - the Azure Content Safety Client. If API key or endpoint is not provided, - they are fetched from environment variables. + If not provided, the API key and endpoint are fetched from environment + variables. Args: - content_safety_key (Optional[str]): - The API key for Azure Content Safety API. If not provided, - it will be fetched from the environment - variable 'CONTENT_SAFETY_API_KEY'. - content_safety_endpoint (Optional[str]): - The endpoint URL for Azure Content Safety API. If not provided, - it will be fetched from the environment - variable 'CONTENT_SAFETY_ENDPOINT'. + content_safety_key (Optional[str]): + The API key for Azure Content Safety API. If not provided, it will + be fetched from the environment variable 'CONTENT_SAFETY_API_KEY'. + content_safety_endpoint (Optional[str]): + The endpoint URL for Azure Content Safety API. If not provided, it + will be fetched from the environment variable 'CONTENT_SAFETY_ENDPOINT'. Raises: ImportError: If the 'azure-ai-contentsafety' package is not installed. - ValueError: - If API key or endpoint is not provided - and environment variables are missing. + ValueError: If API key or endpoint is not provided and environment + variables are missing. """ - content_safety_key = (content_safety_key or - os.environ['CONTENT_SAFETY_API_KEY']) - content_safety_endpoint = (content_safety_endpoint or - os.environ['CONTENT_SAFETY_ENDPOINT']) + content_safety_key = content_safety_key or os.environ[ + "CONTENT_SAFETY_API_KEY" + ] + content_safety_endpoint = content_safety_endpoint or os.environ[ + "CONTENT_SAFETY_ENDPOINT" + ] try: import azure.ai.contentsafety as sdk from azure.core.credentials import AzureKeyCredential @@ -98,16 +94,18 @@ def __init__( "azure-ai-contentsafety is not installed. " "Run `pip install azure-ai-contentsafety` to install." ) - super().__init__(content_safety_key=content_safety_key, - content_safety_endpoint=content_safety_endpoint, - content_safety_client=content_safety_client) + super().__init__( + content_safety_key=content_safety_key, + content_safety_endpoint=content_safety_endpoint, + content_safety_client=content_safety_client, + ) def _sentiment_analysis(self, text: str) -> Dict: """ Perform sentiment analysis on the provided text. - This method uses the Azure Content Safety Client to analyze - the text and determine its sentiment and safety categories. + This method uses the Azure Content Safety Client to analyze the text and + determine its sentiment and safety categories. Args: text (str): The text to be analyzed. @@ -115,7 +113,6 @@ def _sentiment_analysis(self, text: str) -> Dict: Returns: Dict: The analysis results containing sentiment and safety categories. """ - from azure.ai.contentsafety.models import AnalyzeTextOptions request = AnalyzeTextOptions(text=text) @@ -131,14 +128,13 @@ def _run( """ Analyze the given query using the tool. - This method calls `_sentiment_analysis` to process the - query and returns the result. It raises a RuntimeError if an - exception occurs during analysis. + This method calls `_sentiment_analysis` to process the query and returns + the result. It raises a RuntimeError if an exception occurs during + analysis. Args: - query (str): - The query text to be analyzed. - run_manager (Optional[CallbackManagerForToolRun], optional): + query (str): The query text to be analyzed. + run_manager (Optional[CallbackManagerForToolRun], optional): A callback manager for tracking the tool run. Defaults to None. Returns: @@ -152,4 +148,5 @@ def _run( except Exception as e: raise RuntimeError( f"Error while running AzureContentSafetyTextTool: {e}" - ) \ No newline at end of file + ) + From c1943e4ccc307aba0daa4dcdda20472cd06a3d26 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Sep 2024 18:30:54 +1200 Subject: [PATCH 06/20] change description to ensure model's give correct input --- .../tools/azure_cognitive_services/content_safety.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py index d6c7780858dbd..b410ae71977f5 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py @@ -9,6 +9,7 @@ logger = logging.getLogger(__name__) + class AzureContentSafetyTextTool(BaseTool): """ A tool that interacts with the Azure AI Content Safety API. @@ -33,7 +34,8 @@ class AzureContentSafetyTextTool(BaseTool): Analyzes the provided text to assess its sentiment and safety, returning the analysis results. - _run(query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: + _run(query: str, + run_manager: Optional[CallbackManagerForToolRun] = None) -> str: Uses the tool to analyze the given query and returns the result. Raises a RuntimeError if an exception occurs. """ @@ -46,7 +48,8 @@ class AzureContentSafetyTextTool(BaseTool): description: str = ( "A wrapper around Azure AI Content Safety. " "Useful for when you need to identify the sentiment of text and whether" - " or not a text is harmful. Input should be text." + " or not a text is harmful." + "Input must be text (str)." ) def __init__( @@ -67,7 +70,8 @@ def __init__( be fetched from the environment variable 'CONTENT_SAFETY_API_KEY'. content_safety_endpoint (Optional[str]): The endpoint URL for Azure Content Safety API. If not provided, it - will be fetched from the environment variable 'CONTENT_SAFETY_ENDPOINT'. + will be fetched from the environment variable + 'CONTENT_SAFETY_ENDPOINT'. Raises: ImportError: If the 'azure-ai-contentsafety' package is not installed. From fe863b3aa841364c19389e86d5726f24caf65d51 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Sep 2024 18:35:35 +1200 Subject: [PATCH 07/20] reformat file with ruff --- .../content_safety.py | 23 ++++++++----------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py index b410ae71977f5..25100caba15be 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py @@ -15,7 +15,7 @@ class AzureContentSafetyTextTool(BaseTool): A tool that interacts with the Azure AI Content Safety API. This tool queries the Azure AI Content Safety API to analyze text for harmful - content and identify sentiment. It requires an API key and endpoint, + content and identify sentiment. It requires an API key and endpoint, which can be set up as described in the following guide: https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python @@ -34,7 +34,7 @@ class AzureContentSafetyTextTool(BaseTool): Analyzes the provided text to assess its sentiment and safety, returning the analysis results. - _run(query: str, + _run(query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: Uses the tool to analyze the given query and returns the result. Raises a RuntimeError if an exception occurs. @@ -48,7 +48,7 @@ class AzureContentSafetyTextTool(BaseTool): description: str = ( "A wrapper around Azure AI Content Safety. " "Useful for when you need to identify the sentiment of text and whether" - " or not a text is harmful." + " or not a text is harmful." "Input must be text (str)." ) @@ -70,7 +70,7 @@ def __init__( be fetched from the environment variable 'CONTENT_SAFETY_API_KEY'. content_safety_endpoint (Optional[str]): The endpoint URL for Azure Content Safety API. If not provided, it - will be fetched from the environment variable + will be fetched from the environment variable 'CONTENT_SAFETY_ENDPOINT'. Raises: @@ -78,12 +78,10 @@ def __init__( ValueError: If API key or endpoint is not provided and environment variables are missing. """ - content_safety_key = content_safety_key or os.environ[ - "CONTENT_SAFETY_API_KEY" - ] - content_safety_endpoint = content_safety_endpoint or os.environ[ - "CONTENT_SAFETY_ENDPOINT" - ] + content_safety_key = content_safety_key or os.environ["CONTENT_SAFETY_API_KEY"] + content_safety_endpoint = ( + content_safety_endpoint or os.environ["CONTENT_SAFETY_ENDPOINT"] + ) try: import azure.ai.contentsafety as sdk from azure.core.credentials import AzureKeyCredential @@ -150,7 +148,4 @@ def _run( try: return self._sentiment_analysis(query) except Exception as e: - raise RuntimeError( - f"Error while running AzureContentSafetyTextTool: {e}" - ) - + raise RuntimeError(f"Error while running AzureContentSafetyTextTool: {e}") From 28fb0e7ae79f136f407633388505f4ae29eb3f5c Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Sep 2024 18:39:41 +1200 Subject: [PATCH 08/20] change return type of function --- .../tools/azure_cognitive_services/content_safety.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py index 25100caba15be..9964760e7ccc5 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py @@ -126,7 +126,7 @@ def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> str: + ) -> Dict: """ Analyze the given query using the tool. From 61a815f0f60782a29fcb3632f95cf79efde7d723 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Fri, 4 Oct 2024 14:16:20 +1300 Subject: [PATCH 09/20] Update class to use new v3 `pydantic` validation methods --- .../content_safety.py | 74 ++++++------------- 1 file changed, 24 insertions(+), 50 deletions(-) rename libs/community/langchain_community/tools/{azure_cognitive_services => azure_ai_services}/content_safety.py (61%) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py similarity index 61% rename from libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py rename to libs/community/langchain_community/tools/azure_ai_services/content_safety.py index 9964760e7ccc5..5c60c3291977b 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py @@ -1,11 +1,12 @@ from __future__ import annotations import logging -import os from typing import Any, Dict, Optional from langchain_core.callbacks import CallbackManagerForToolRun from langchain_core.tools import BaseTool +from langchain_core.utils import get_from_dict_or_env +from pydantic import model_validator logger = logging.getLogger(__name__) @@ -28,16 +29,6 @@ class AzureContentSafetyTextTool(BaseTool): content_safety_client (Any): An instance of the Azure Content Safety Client used for making API requests. - - Methods: - _sentiment_analysis(text: str) -> Dict: - Analyzes the provided text to assess its sentiment and safety, - returning the analysis results. - - _run(query: str, - run_manager: Optional[CallbackManagerForToolRun] = None) -> str: - Uses the tool to analyze the given query and returns the result. - Raises a RuntimeError if an exception occurs. """ content_safety_key: str = "" #: :meta private: @@ -52,41 +43,20 @@ class AzureContentSafetyTextTool(BaseTool): "Input must be text (str)." ) - def __init__( - self, - *, - content_safety_key: Optional[str] = None, - content_safety_endpoint: Optional[str] = None, - ) -> None: - """ - Initialize the AzureContentSafetyTextTool with the given API key and endpoint. - - If not provided, the API key and endpoint are fetched from environment - variables. - - Args: - content_safety_key (Optional[str]): - The API key for Azure Content Safety API. If not provided, it will - be fetched from the environment variable 'CONTENT_SAFETY_API_KEY'. - content_safety_endpoint (Optional[str]): - The endpoint URL for Azure Content Safety API. If not provided, it - will be fetched from the environment variable - 'CONTENT_SAFETY_ENDPOINT'. - - Raises: - ImportError: If the 'azure-ai-contentsafety' package is not installed. - ValueError: If API key or endpoint is not provided and environment - variables are missing. - """ - content_safety_key = content_safety_key or os.environ["CONTENT_SAFETY_API_KEY"] - content_safety_endpoint = ( - content_safety_endpoint or os.environ["CONTENT_SAFETY_ENDPOINT"] + @model_validator(mode="before") + @classmethod + def validate_environment(cls, values: Dict) -> Any: + content_safety_key = get_from_dict_or_env( + values, "content_safety_key", "CONTENT_SAFETY_API_KEY" + ) + content_safety_endpoint = get_from_dict_or_env( + values, "content_safety_endpoint", "CONTENT_SAFETY_ENDPOINT" ) try: import azure.ai.contentsafety as sdk from azure.core.credentials import AzureKeyCredential - content_safety_client = sdk.ContentSafetyClient( + values["content_safety_client"] = sdk.ContentSafetyClient( endpoint=content_safety_endpoint, credential=AzureKeyCredential(content_safety_key), ) @@ -96,15 +66,12 @@ def __init__( "azure-ai-contentsafety is not installed. " "Run `pip install azure-ai-contentsafety` to install." ) - super().__init__( - content_safety_key=content_safety_key, - content_safety_endpoint=content_safety_endpoint, - content_safety_client=content_safety_client, - ) - def _sentiment_analysis(self, text: str) -> Dict: + return values + + def _detect_harmful_content(self, text: str) -> list: """ - Perform sentiment analysis on the provided text. + Detect harful content in the provided text. This method uses the Azure Content Safety Client to analyze the text and determine its sentiment and safety categories. @@ -122,11 +89,17 @@ def _sentiment_analysis(self, text: str) -> Dict: result = response.categories_analysis return result + def _format_response(self, result: list) -> str: + formatted_result = "" + for c in result: + formatted_result += f"{c.category}: {c.severity}\n" + return formatted_result + def _run( self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None, - ) -> Dict: + ) -> str: """ Analyze the given query using the tool. @@ -146,6 +119,7 @@ def _run( RuntimeError: If an error occurs while running the tool. """ try: - return self._sentiment_analysis(query) + result = self._detect_harmful_content(query) + return self._format_response(result) except Exception as e: raise RuntimeError(f"Error while running AzureContentSafetyTextTool: {e}") From 2afbff54ba1c0c316385698f10356ff54604900b Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 8 Oct 2024 15:34:14 +1300 Subject: [PATCH 10/20] Add unit tests and required dependencies --- libs/community/extended_testing_deps.txt | 1 + .../tools/azure_ai_services/content_safety.py | 2 +- .../azure_ai_services/test_content_safety.py | 72 +++++++++++++++++++ 3 files changed, 74 insertions(+), 1 deletion(-) create mode 100644 libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py diff --git a/libs/community/extended_testing_deps.txt b/libs/community/extended_testing_deps.txt index 0b2cea22981a6..e41a4103e2911 100644 --- a/libs/community/extended_testing_deps.txt +++ b/libs/community/extended_testing_deps.txt @@ -4,6 +4,7 @@ anthropic>=0.3.11,<0.4 arxiv>=1.4,<2 assemblyai>=0.17.0,<0.18 atlassian-python-api>=3.36.0,<4 +azure-ai-contentsafety>=1.0.0 azure-ai-documentintelligence>=1.0.0b1,<2 azure-identity>=1.15.0,<2 azure-search-documents==11.4.0 diff --git a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py index 5c60c3291977b..b96eb46672cce 100644 --- a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py @@ -92,7 +92,7 @@ def _detect_harmful_content(self, text: str) -> list: def _format_response(self, result: list) -> str: formatted_result = "" for c in result: - formatted_result += f"{c.category}: {c.severity}\n" + formatted_result += f"{c['category']}: {c['severity']}\n" return formatted_result def _run( diff --git a/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py b/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py new file mode 100644 index 0000000000000..11e414fa8e420 --- /dev/null +++ b/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py @@ -0,0 +1,72 @@ +"""Tests for the Azure AI Content Safety Text Tool.""" + +from typing import Any + +import pytest + +from langchain_community.tools.azure_ai_services.content_safety import ( + AzureContentSafetyTextTool, +) + + +@pytest.mark.requires("azure.ai.contentsafety") +def test_content_safety(mocker: Any) -> None: + mocker.patch("azure.ai.contentsafety.ContentSafetyClient", autospec=True) + mocker.patch("azure.core.credentials.AzureKeyCredential", autospec=True) + + key = "key" + endpoint = "endpoint" + + tool = AzureContentSafetyTextTool( + content_safety_key=key, content_safety_endpoint=endpoint + ) + assert tool.content_safety_key == key + assert tool.content_safety_endpoint == endpoint + + +@pytest.mark.requires("azure.ai.contentsafety") +def test_harmful_content_detected(mocker: Any) -> None: + key = "key" + endpoint = "endpoint" + + mocker.patch("azure.core.credentials.AzureKeyCredential", autospec=True) + mocker.patch("azure.ai.contentsafety.ContentSafetyClient", autospec=True) + tool = AzureContentSafetyTextTool( + content_safety_key=key, content_safety_endpoint=endpoint + ) + + mock_content_client = mocker.Mock() + mock_content_client.analyze_text.return_value.categories_analysis = [ + {"category": "Harm", "severity": 1} + ] + + tool.content_safety_client = mock_content_client + + input = "This text contains harmful content" + output = "Harm: 1\n" + + result = tool._run(input) + assert result == output + + +@pytest.mark.requires("azure.ai.contentsafety") +def test_no_harmful_content_detected(mocker: Any) -> None: + key = "key" + endpoint = "endpoint" + + tool = AzureContentSafetyTextTool( + content_safety_key=key, content_safety_endpoint=endpoint + ) + + mock_content_client = mocker.Mock() + mock_content_client.analyze_text.return_value.categories_analysis = [ + {"category": "Harm", "severity": 0} + ] + + tool.content_safety_client = mock_content_client + + input = "This text contains harmful content" + output = "Harm: 0\n" + + result = tool._run(input) + assert result == output \ No newline at end of file From b1809ea7120f6444290576814758a24a88c17ed3 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 8 Oct 2024 15:49:54 +1300 Subject: [PATCH 11/20] Add docs and lint files --- .../tools/azure_content_safety.ipynb | 143 ++++++++++++++++++ .../azure_ai_services/test_content_safety.py | 2 +- 2 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 docs/docs/integrations/tools/azure_content_safety.ipynb diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb new file mode 100644 index 0000000000000..9dbbc4275735f --- /dev/null +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -0,0 +1,143 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# `AzureContentSafetyTextTool`" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + ">The `AzureContentSafetyTextTool` acts as a wrapper around the Azure AI Content Safety Service/API.\n", + ">The Tool will detect harmful content according to Azure's Content Safety Policy." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Example\n", + "\n", + "Get the required dependencies" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from langchain import hub" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will use a prompt to tell the model what to do. LangChain Prompts can be configured, however for sake of simplicity we will use a premade prompt from LangSmith. This requires an API key which can be setup [here](https://www.langchain.com/langsmith) after registration." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "LANGSMITH_KEY = os.environ[\"LANGSMITH_KEY\"]\n", + "prompt = hub.pull(\"hwchase17/structured-chat-agent\", api_key=LANGSMITH_KEY)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we can use the `AzureContentSafetyTextTool` combine with a model, using `create_structured_chat_agent`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.agents import AgentExecutor, create_structured_chat_agent\n", + "from langchain_community.tools.azure_ai_services.content_safety import (\n", + " AzureContentSafetyTextTool,\n", + ")\n", + "from langchain_openai import AzureChatOpenAI" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tools = [\n", + " AzureContentSafetyTextTool(\n", + " content_safety_key=os.environ[\"CONTENT_SAFETY_KEY\"],\n", + " content_safety_endpoint=os.environ[\"CONTENT_SAFETY_ENDPOINT\"],\n", + " )\n", + "]\n", + "\n", + "model = AzureChatOpenAI(\n", + " openai_api_version=os.environ[\"OPENAI_API_VERSION\"],\n", + " azure_deployment=os.environ[\"COMPLETIONS_MODEL\"],\n", + " azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"],\n", + " api_key=os.environ[\"AZURE_OPENAI_API_KEY\"],\n", + ")\n", + "\n", + "agent = create_structured_chat_agent(model, tools, prompt)\n", + "\n", + "agent_executor = AgentExecutor(\n", + " agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Then by using `.invoke`, the model can be told what to do and assess if using the tools it was given would assist in it's response." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "input = \"I hate you\"\n", + "agent_executor.invoke(\n", + " {\"input\": f\"Can you check the following text for harmful content : {input}\"}\n", + ")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py b/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py index 11e414fa8e420..d2971a8d464fd 100644 --- a/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py +++ b/libs/community/tests/unit_tests/tools/azure_ai_services/test_content_safety.py @@ -69,4 +69,4 @@ def test_no_harmful_content_detected(mocker: Any) -> None: output = "Harm: 0\n" result = tool._run(input) - assert result == output \ No newline at end of file + assert result == output From ef328e7478011395eb78c0136a2df051975fc731 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 8 Oct 2024 16:43:26 +1300 Subject: [PATCH 12/20] Add missing headers to docs and update attributes in class --- .../tools/azure_content_safety.ipynb | 41 +++++++++++++++++-- .../tools/azure_ai_services/content_safety.py | 2 +- 2 files changed, 39 insertions(+), 4 deletions(-) diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index 9dbbc4275735f..648a41876e8cc 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -7,6 +7,13 @@ "# `AzureContentSafetyTextTool`" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Overview" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -19,7 +26,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Example\n", + "## Setup\n", "\n", "Get the required dependencies" ] @@ -72,6 +79,13 @@ "from langchain_openai import AzureChatOpenAI" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Credentials" + ] + }, { "cell_type": "code", "execution_count": null, @@ -90,8 +104,22 @@ " azure_deployment=os.environ[\"COMPLETIONS_MODEL\"],\n", " azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"],\n", " api_key=os.environ[\"AZURE_OPENAI_API_KEY\"],\n", - ")\n", - "\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "agent = create_structured_chat_agent(model, tools, prompt)\n", "\n", "agent_executor = AgentExecutor(\n", @@ -99,6 +127,13 @@ ")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Invocation" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py index b96eb46672cce..b4e87c150606b 100644 --- a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py @@ -33,7 +33,7 @@ class AzureContentSafetyTextTool(BaseTool): content_safety_key: str = "" #: :meta private: content_safety_endpoint: str = "" #: :meta private: - content_safety_client: Any #: :meta private: + content_safety_client: Any = None #: :meta private: name: str = "azure_content_safety_tool" description: str = ( From 7bc9d2ad9bdf8422e69476d79bfa33f35eae5e78 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:10:53 +1300 Subject: [PATCH 13/20] Add remaining missing headers according to CI --- .../tools/azure_content_safety.ipynb | 127 ++++++++++++++++-- .../tools/azure_ai_services/content_safety.py | 2 +- 2 files changed, 117 insertions(+), 12 deletions(-) diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index 648a41876e8cc..610cf2f865393 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -26,9 +26,25 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Setup\n", + "### Tool Features\n", "\n", - "Get the required dependencies" + "This integration allows for the detection of harmful or offensive content in text using Azure's Content Safety API. It supports four categories of harmful content: Sexual, Harm, Self-Harm, and Violence." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Integration details\n", + "\n", + "This section provides details about how the Azure AI Content Safety integration works, including setup." ] }, { @@ -46,7 +62,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We will use a prompt to tell the model what to do. LangChain Prompts can be configured, however for sake of simplicity we will use a premade prompt from LangSmith. This requires an API key which can be setup [here](https://www.langchain.com/langsmith) after registration." + "We will use a prompt to instruct the model. LangChain prompts can be configured, but for simplicity, we will use a premade prompt from LangSmith. This requires an API key, which can be set up [here](https://www.langchain.com/langsmith) after registration." ] }, { @@ -86,18 +102,56 @@ "### Credentials" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Credentials can be set by being passed as parameters and should be stored locally as environment variables." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "content_endpoint = os.environ[\"CONTENT_SAFETY_ENDPOINT\"]\n", + "content_key = os.environ[\"CONTENT_SAFETY_KEY\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Instantiation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Credentials can be passed directly, but they can also be retrieved automatically by the constructor if environment variables named `CONTENT_SAFETY_ENDPOINT` and `CONTENT_SAFETY_KEY` are set." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cs = AzureContentSafetyTextTool(\n", + " content_safety_key=content_key,\n", + " content_safety_endpoint=content_endpoint,\n", + ")" + ] + }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "tools = [\n", - " AzureContentSafetyTextTool(\n", - " content_safety_key=os.environ[\"CONTENT_SAFETY_KEY\"],\n", - " content_safety_endpoint=os.environ[\"CONTENT_SAFETY_ENDPOINT\"],\n", - " )\n", - "]\n", + "tools = [cs]\n", "\n", "model = AzureChatOpenAI(\n", " openai_api_version=os.environ[\"OPENAI_API_VERSION\"],\n", @@ -111,7 +165,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Instantiation" + "## Chaining" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Creating an `AgentExecutor` chain allows a model to use tools to assist in it's response." ] }, { @@ -138,7 +199,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Then by using `.invoke`, the model can be told what to do and assess if using the tools it was given would assist in it's response." + "### [Invoke with ToolCall](/docs/concepts/#invoke-with-toolcall)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By using `.invoke`, the model can be told what to do and assess if using the tools it was given would assist in it's response." ] }, { @@ -152,6 +220,43 @@ " {\"input\": f\"Can you check the following text for harmful content : {input}\"}\n", ")" ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### [Invoke directly with args](/docs/concepts/#invoke-with-just-the-arguments)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Additionally, the tool can be used by directly passing input as an argument. However, this is discouraged as the tool is intended to be used in an executor chain." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cs._run(input)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API reference" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "[Azure AI Content Safety Overview](https://learn.microsoft.com/azure/ai-services/content-safety/overview) | [Azure AI Content Safety Python SDK](https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python)" + ] } ], "metadata": { diff --git a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py index b4e87c150606b..67b6de44124f4 100644 --- a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py @@ -33,7 +33,7 @@ class AzureContentSafetyTextTool(BaseTool): content_safety_key: str = "" #: :meta private: content_safety_endpoint: str = "" #: :meta private: - content_safety_client: Any = None #: :meta private: + content_safety_client: Any = None #: :meta private: name: str = "azure_content_safety_tool" description: str = ( From ac350e36a8efd01417a363392524a641f48cb2b3 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:14:01 +1300 Subject: [PATCH 14/20] Rearrange headers to try fix CI error --- docs/docs/integrations/tools/azure_content_safety.ipynb | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index 610cf2f865393..abec339b4a766 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -26,8 +26,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Tool Features\n", - "\n", + "### Tool Features" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ "This integration allows for the detection of harmful or offensive content in text using Azure's Content Safety API. It supports four categories of harmful content: Sexual, Harm, Self-Harm, and Violence." ] }, From 3fb48a504fbc0c79454eb1182de5413445bcaccb Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:21:35 +1300 Subject: [PATCH 15/20] Rearrange headers --- .../tools/azure_content_safety.ipynb | 51 ++++++++++++++----- 1 file changed, 38 insertions(+), 13 deletions(-) diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index abec339b4a766..e3e4d6ff0402a 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -14,6 +14,13 @@ "## Overview" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Integration details" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -47,8 +54,6 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Integration details\n", - "\n", "This section provides details about how the Azure AI Content Safety integration works, including setup." ] }, @@ -166,13 +171,6 @@ ")" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Chaining" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -200,6 +198,29 @@ "## Invocation" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### [Invoke directly with args](/docs/concepts/#invoke-with-just-the-arguments)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Firstly, the tool can be used by directly passing input as an argument. However, this is discouraged as the tool is intended to be used in an executor chain." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cs._run(input)" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -211,7 +232,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "By using `.invoke`, the model can be told what to do and assess if using the tools it was given would assist in it's response." + "By using `.invoke`, the model can be told what to do and assess if using the tools it was given would assist in it's response. This is the intended use." ] }, { @@ -230,14 +251,14 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### [Invoke directly with args](/docs/concepts/#invoke-with-just-the-arguments)" + "## Chaining" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Additionally, the tool can be used by directly passing input as an argument. However, this is discouraged as the tool is intended to be used in an executor chain." + "When creating an `AgentExecutor` as described earlier, an execution chain is formed. The sequence of events will be printed using the given prompt, and actions will occur in a chain-like manner." ] }, { @@ -246,7 +267,11 @@ "metadata": {}, "outputs": [], "source": [ - "cs._run(input)" + "agent = create_structured_chat_agent(model, tools, prompt)\n", + "\n", + "agent_executor = AgentExecutor(\n", + " agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n", + ")" ] }, { From 1f30d14dd5bb56d60ffd7bf62c312d53b121f9e9 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 8 Oct 2024 17:23:13 +1300 Subject: [PATCH 16/20] Change Tool Functions to Tool functions --- docs/docs/integrations/tools/azure_content_safety.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index e3e4d6ff0402a..0ba8f3e5a6f22 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -33,7 +33,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Tool Features" + "### Tool features" ] }, { From e5fb3635edc85b7233ebee1ebd58131a9513258d Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 8 Oct 2024 19:52:30 +1300 Subject: [PATCH 17/20] Change order of cells --- .../tools/azure_content_safety.ipynb | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index 0ba8f3e5a6f22..80392f351a232 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -89,7 +89,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now we can use the `AzureContentSafetyTextTool` combine with a model, using `create_structured_chat_agent`." + "Now we can use the `AzureContentSafetyTextTool` combined with a model, using `create_structured_chat_agent`." ] }, { @@ -198,6 +198,22 @@ "## Invocation" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Input must be in the form of a string (`str`)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "input = \"I hate you\"" + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -241,7 +257,6 @@ "metadata": {}, "outputs": [], "source": [ - "input = \"I hate you\"\n", "agent_executor.invoke(\n", " {\"input\": f\"Can you check the following text for harmful content : {input}\"}\n", ")" From 4915fe093fb31ac8d8ccf7506e31f7593014cc4d Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Tue, 15 Oct 2024 01:25:37 +0000 Subject: [PATCH 18/20] Add outputs to docs --- .../tools/azure_content_safety.ipynb | 77 +++++++++++++++---- 1 file changed, 62 insertions(+), 15 deletions(-) diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index 80392f351a232..15012d6f0dec2 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -77,7 +77,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -121,7 +121,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -145,7 +145,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -157,7 +157,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ @@ -180,7 +180,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -207,7 +207,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -230,11 +230,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'Hate: 2\\nSelfHarm: 0\\nSexual: 0\\nViolence: 0\\n'" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "cs._run(input)" + "cs.invoke({\"query\": input})" ] }, { @@ -253,9 +264,45 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", + "\u001b[32;1m\u001b[1;3m{\n", + " \"action\": \"azure_content_safety_tool\",\n", + " \"action_input\": {\n", + " \"query\": \"I hate you\"\n", + " }\n", + "}\u001b[0m\u001b[36;1m\u001b[1;3mHate: 2\n", + "SelfHarm: 0\n", + "Sexual: 0\n", + "Violence: 0\n", + "\u001b[0m\u001b[32;1m\u001b[1;3m{\n", + " \"action\": \"Final Answer\",\n", + " \"action_input\": \"The text contains hate sentiment with a severity level of 2. There is no indication of self-harm, sexual content, or violence.\"\n", + "}\u001b[0m\n", + "\n", + "\u001b[1m> Finished chain.\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "{'input': 'Can you check the following text for harmful content : I hate you',\n", + " 'output': 'The text contains hate sentiment with a severity level of 2. There is no indication of self-harm, sexual content, or violence.'}" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "agent_executor.invoke(\n", " {\"input\": f\"Can you check the following text for harmful content : {input}\"}\n", @@ -278,7 +325,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "metadata": {}, "outputs": [], "source": [ @@ -320,7 +367,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.7" + "version": "3.11.4" } }, "nbformat": 4, From 71ae22153e6a6b5482ff194e5131b4fec7a92bbf Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:32:37 +1300 Subject: [PATCH 19/20] Add suggested changes to guide and class code --- .../tools/azure_content_safety.ipynb | 163 ++++++------------ .../tools/azure_ai_services/content_safety.py | 4 +- 2 files changed, 50 insertions(+), 117 deletions(-) diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index 15012d6f0dec2..5a678da1c1a68 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -64,7 +64,7 @@ "outputs": [], "source": [ "import os\n", - "\n", + "import getpass\n", "from langchain import hub" ] }, @@ -72,34 +72,25 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We will use a prompt to instruct the model. LangChain prompts can be configured, but for simplicity, we will use a premade prompt from LangSmith. This requires an API key, which can be set up [here](https://www.langchain.com/langsmith) after registration." + "Now we can use the `AzureContentSafetyTextTool` combined with a model, using `create_react_agent`." ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ - "LANGSMITH_KEY = os.environ[\"LANGSMITH_KEY\"]\n", - "prompt = hub.pull(\"hwchase17/structured-chat-agent\", api_key=LANGSMITH_KEY)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now we can use the `AzureContentSafetyTextTool` combined with a model, using `create_structured_chat_agent`." + "from langgraph.prebuilt import create_react_agent" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ - "from langchain.agents import AgentExecutor, create_structured_chat_agent\n", - "from langchain_community.tools.azure_ai_services.content_safety import (\n", + "from libs.community.langchain_community.tools.azure_ai_services.content_safety import (\n", " AzureContentSafetyTextTool,\n", ")\n", "from langchain_openai import AzureChatOpenAI" @@ -121,12 +112,22 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "os.environ[\"CONTENT_SAFETY_ENDPOINT\"] = getpass.getpass()\n", + "os.environ[\"CONTENT_SAFETY_KEY\"] = getpass.getpass()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "content_endpoint = os.environ[\"CONTENT_SAFETY_ENDPOINT\"]\n", - "content_key = os.environ[\"CONTENT_SAFETY_KEY\"]" + "content_key = os.environ[\"CONTENT_SAFETY_API_KEY\"]" ] }, { @@ -145,37 +146,49 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "cs = AzureContentSafetyTextTool(\n", - " content_safety_key=content_key,\n", - " content_safety_endpoint=content_endpoint,\n", - ")" + "os.environ[\"AZURE_OPENAI_API_KEY\"] = getpass.getpass()\n", + "os.environ[\"OPENAI_API_VERSION\"] = getpass.getpass()\n", + "os.environ[\"GPT_MODEL\"] = getpass.getpass()\n", + "os.environ[\"AZURE_OPENAI_ENDPOINT\"] = getpass.getpass()" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ - "tools = [cs]\n", - "\n", "model = AzureChatOpenAI(\n", " openai_api_version=os.environ[\"OPENAI_API_VERSION\"],\n", - " azure_deployment=os.environ[\"COMPLETIONS_MODEL\"],\n", + " azure_deployment=os.environ[\"GPT_MODEL\"],\n", " azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"],\n", " api_key=os.environ[\"AZURE_OPENAI_API_KEY\"],\n", ")" ] }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "cs = AzureContentSafetyTextTool(\n", + " content_safety_key=content_key,\n", + " content_safety_endpoint=content_endpoint,\n", + ")\n", + "\n", + "tools = [cs]" + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Creating an `AgentExecutor` chain allows a model to use tools to assist in it's response." + "Create a react agent to invoke the tool. " ] }, { @@ -184,11 +197,7 @@ "metadata": {}, "outputs": [], "source": [ - "agent = create_structured_chat_agent(model, tools, prompt)\n", - "\n", - "agent_executor = AgentExecutor(\n", - " agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n", - ")" + "agent = create_react_agent(model, tools)" ] }, { @@ -230,20 +239,9 @@ }, { "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Hate: 2\\nSelfHarm: 0\\nSexual: 0\\nViolence: 0\\n'" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "cs.invoke({\"query\": input})" ] @@ -264,76 +262,11 @@ }, { "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "\n", - "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", - "\u001b[32;1m\u001b[1;3m{\n", - " \"action\": \"azure_content_safety_tool\",\n", - " \"action_input\": {\n", - " \"query\": \"I hate you\"\n", - " }\n", - "}\u001b[0m\u001b[36;1m\u001b[1;3mHate: 2\n", - "SelfHarm: 0\n", - "Sexual: 0\n", - "Violence: 0\n", - "\u001b[0m\u001b[32;1m\u001b[1;3m{\n", - " \"action\": \"Final Answer\",\n", - " \"action_input\": \"The text contains hate sentiment with a severity level of 2. There is no indication of self-harm, sexual content, or violence.\"\n", - "}\u001b[0m\n", - "\n", - "\u001b[1m> Finished chain.\u001b[0m\n" - ] - }, - { - "data": { - "text/plain": [ - "{'input': 'Can you check the following text for harmful content : I hate you',\n", - " 'output': 'The text contains hate sentiment with a severity level of 2. There is no indication of self-harm, sexual content, or violence.'}" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "agent_executor.invoke(\n", - " {\"input\": f\"Can you check the following text for harmful content : {input}\"}\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Chaining" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When creating an `AgentExecutor` as described earlier, an execution chain is formed. The sequence of events will be printed using the given prompt, and actions will occur in a chain-like manner." - ] - }, - { - "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "agent = create_structured_chat_agent(model, tools, prompt)\n", - "\n", - "agent_executor = AgentExecutor(\n", - " agent=agent, tools=tools, verbose=True, handle_parsing_errors=True\n", - ")" + "agent.invoke({\"messages\": [(\"user\", f\"Can you check the following text for harmful content : {input}\")]})" ] }, { @@ -347,7 +280,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "[Azure AI Content Safety Overview](https://learn.microsoft.com/azure/ai-services/content-safety/overview) | [Azure AI Content Safety Python SDK](https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python)" + "[Azure AI Content Safety Overview](https://learn.microsoft.com/azure/ai-services/content-safety/overview) | [Azure AI Content Safety Python API](https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python)" ] } ], @@ -367,7 +300,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.12.8" } }, "nbformat": 4, diff --git a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py index 67b6de44124f4..2d478afb45678 100644 --- a/libs/community/langchain_community/tools/azure_ai_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_ai_services/content_safety.py @@ -31,8 +31,8 @@ class AzureContentSafetyTextTool(BaseTool): requests. """ - content_safety_key: str = "" #: :meta private: - content_safety_endpoint: str = "" #: :meta private: + content_safety_key: Optional[str] = None #: :meta private: + content_safety_endpoint: Optional[str] = None #: :meta private: content_safety_client: Any = None #: :meta private: name: str = "azure_content_safety_tool" From 75bcf2ad0afd1e08f6551c3d5d4a27257f72fc0b Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:35:36 +1300 Subject: [PATCH 20/20] Lint file --- .../tools/azure_content_safety.ipynb | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/docs/docs/integrations/tools/azure_content_safety.ipynb b/docs/docs/integrations/tools/azure_content_safety.ipynb index 5a678da1c1a68..73420ca5129de 100644 --- a/docs/docs/integrations/tools/azure_content_safety.ipynb +++ b/docs/docs/integrations/tools/azure_content_safety.ipynb @@ -63,9 +63,8 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import getpass\n", - "from langchain import hub" + "import os" ] }, { @@ -90,10 +89,11 @@ "metadata": {}, "outputs": [], "source": [ + "from langchain_openai import AzureChatOpenAI\n", + "\n", "from libs.community.langchain_community.tools.azure_ai_services.content_safety import (\n", " AzureContentSafetyTextTool,\n", - ")\n", - "from langchain_openai import AzureChatOpenAI" + ")" ] }, { @@ -266,7 +266,13 @@ "metadata": {}, "outputs": [], "source": [ - "agent.invoke({\"messages\": [(\"user\", f\"Can you check the following text for harmful content : {input}\")]})" + "agent.invoke(\n", + " {\n", + " \"messages\": [\n", + " (\"user\", f\"Can you check the following text for harmful content : {input}\")\n", + " ]\n", + " }\n", + ")" ] }, {