From 9555ec938eceb8742594999f720d0dbe669bfe7d Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 15:05:27 +0200 Subject: [PATCH 01/21] added bedrock --- requirements.txt | 3 +- textgrad/engine/__init__.py | 9 ++++ textgrad/engine/bedrock.py | 89 +++++++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 textgrad/engine/bedrock.py diff --git a/requirements.txt b/requirements.txt index 82b52e6..f83ecdd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,4 +6,5 @@ platformdirs>=3.11.0 datasets>=2.14.6 diskcache>=5.6.3 graphviz>=0.20.3 -gdown>=5.2.0 \ No newline at end of file +gdown>=5.2.0 +boto3>=1.34.133 \ No newline at end of file diff --git a/textgrad/engine/__init__.py b/textgrad/engine/__init__.py index c098afb..f0f7e06 100644 --- a/textgrad/engine/__init__.py +++ b/textgrad/engine/__init__.py @@ -5,6 +5,11 @@ "haiku": "claude-3-haiku-20240307", "sonnet": "claude-3-sonnet-20240229", "together-llama-3-70b": "together-meta-llama/Llama-3-70b-chat-hf", + "bedrock-sonnet-3": "bedrock-anthropic.claude-3-sonnet-20240229-v1:0", + "bedrock-sonnet-3.5": "bedrock-anthropic.claude-3-5-sonnet-20240620-v1:0", + "bedrock-opus": "bedrock-anthropic.claude-3-opus-20240229-v1:0", + "bedrock-haiku": "bedrock-anthropic.claude-3-haiku-20240307-v1:0", + "bedrock-mistral-large": "bedrock-mistral.mistral-large-2402-v1:0" } def get_engine(engine_name: str, **kwargs) -> EngineLM: @@ -30,5 +35,9 @@ def get_engine(engine_name: str, **kwargs) -> EngineLM: elif engine_name in ["command-r-plus", "command-r", "command", "command-light"]: from .cohere import ChatCohere return ChatCohere(model_string=engine_name, **kwargs) + elif "bedrock" in engine_name: + from .bedrock import ChatBedrock + engine_name = engine_name.replace("bedrock-", "") + return ChatBedrock(model_string=engine_name, **kwargs) else: raise ValueError(f"Engine {engine_name} not supported") \ No newline at end of file diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py new file mode 100644 index 0000000..299a53d --- /dev/null +++ b/textgrad/engine/bedrock.py @@ -0,0 +1,89 @@ +try: + import boto3 + +except ImportError: + raise ImportError("If you'd like to use Amazon Bedrock models, please install the boto3 package by running `pip install boto3`") + +import os +import platformdirs +from tenacity import ( + retry, + stop_after_attempt, + wait_random_exponential, +) +from .base import EngineLM, CachedEngine + + +class ChatBedrock(EngineLM, CachedEngine): + SYSTEM_PROMPT = "You are a helpful, creative, and smart assistant." + + def __init__( + self, + model_string="anthropic.claude-3-sonnet-20240229-v1:0", + system_prompt=SYSTEM_PROMPT, + **kwargs + ): + + root = platformdirs.user_cache_dir("textgrad") + cache_path = os.path.join(root, f"cache_bedrock_{model_string}.db") + super().__init__(cache_path=cache_path) + + self.model_string = model_string + self.system_prompt = system_prompt + self.client = boto3.client(service_name='bedrock-runtime') + assert isinstance(self.system_prompt, str) + + @retry(wait=wait_random_exponential(min=1, max=5), stop=stop_after_attempt(5)) + def __call__(self, prompt, **kwargs): + return self.generate(prompt, **kwargs) + + def generate_conversation(self, model_id="", system_prompt="", messages=[], temperature=0.5, top_k=200, top_p=0.99, max_tokens=4096): + """ + Sends messages to a model. + Args: + bedrock_client: The Boto3 Bedrock runtime client. + model_id (str): The model ID to use. + system_prompts (JSON) : The system prompts for the model to use. + messages (JSON) : The messages to send to the model. + + Returns: + response (JSON): The conversation that the model generated. + + """ + + # Base inference parameters to use. + inference_config = {"temperature": temperature, "top_p": top_p, "maxTokens": max_tokens} + # Additional inference parameters to use. + additional_model_fields = {"top_k": top_k} + + # Send the message. + response = self.client.converse( + modelId=model_id, + messages=messages, + system=system_prompt, + inferenceConfig=inference_config, + additionalModelRequestFields=additional_model_fields + ) + + return response + + def generate( + self, prompt, system_prompt=None, temperature=0, max_tokens=4096, top_p=0.99 + ): + + sys_prompt_arg = system_prompt if system_prompt else self.system_prompt + cache_or_none = self._check_cache(sys_prompt_arg + prompt) + if cache_or_none is not None: + return cache_or_none + + messages = [{ + "role": "user", + "content": [{"text": prompt}] + }] + + response = self.generate_conversation(self.model_string, system_prompt=sys_prompt_arg, messages=messages, temperature=temperature, top_p=top_p, max_tokens=max_tokens) + + + response = response.text + self._save_cache(sys_prompt_arg + prompt, response) + return response \ No newline at end of file From fc32bc82871990bc56a95aa30e7c23451a4e96b3 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 15:19:28 +0200 Subject: [PATCH 02/21] added bedrock --- textgrad/engine/__init__.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/textgrad/engine/__init__.py b/textgrad/engine/__init__.py index f0f7e06..50f0819 100644 --- a/textgrad/engine/__init__.py +++ b/textgrad/engine/__init__.py @@ -22,6 +22,11 @@ def get_engine(engine_name: str, **kwargs) -> EngineLM: if (("gpt-4" in engine_name) or ("gpt-3.5" in engine_name)): from .openai import ChatOpenAI return ChatOpenAI(model_string=engine_name, **kwargs) + # bedrock incluedes most of the models so first check + elif "bedrock" in engine_name: + from .bedrock import ChatBedrock + engine_name = engine_name.replace("bedrock-", "") + return ChatBedrock(model_string=engine_name, **kwargs) elif "claude" in engine_name: from .anthropic import ChatAnthropic return ChatAnthropic(model_string=engine_name, **kwargs) @@ -35,9 +40,5 @@ def get_engine(engine_name: str, **kwargs) -> EngineLM: elif engine_name in ["command-r-plus", "command-r", "command", "command-light"]: from .cohere import ChatCohere return ChatCohere(model_string=engine_name, **kwargs) - elif "bedrock" in engine_name: - from .bedrock import ChatBedrock - engine_name = engine_name.replace("bedrock-", "") - return ChatBedrock(model_string=engine_name, **kwargs) else: raise ValueError(f"Engine {engine_name} not supported") \ No newline at end of file From eb12b2a1533b0664e81a8530846fdb673c413c90 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 15:27:22 +0200 Subject: [PATCH 03/21] added bedrock --- textgrad/engine/bedrock.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 299a53d..ed21817 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -37,7 +37,7 @@ def __init__( def __call__(self, prompt, **kwargs): return self.generate(prompt, **kwargs) - def generate_conversation(self, model_id="", system_prompt="", messages=[], temperature=0.5, top_k=200, top_p=0.99, max_tokens=4096): + def generate_conversation(self, model_id="", system_prompts=[], messages=[], temperature=0.5, top_k=200, top_p=0.99, max_tokens=4096): """ Sends messages to a model. Args: @@ -52,7 +52,7 @@ def generate_conversation(self, model_id="", system_prompt="", messages=[], temp """ # Base inference parameters to use. - inference_config = {"temperature": temperature, "top_p": top_p, "maxTokens": max_tokens} + inference_config = {"temperature": temperature, "topP": top_p, "maxTokens": max_tokens} # Additional inference parameters to use. additional_model_fields = {"top_k": top_k} @@ -60,7 +60,7 @@ def generate_conversation(self, model_id="", system_prompt="", messages=[], temp response = self.client.converse( modelId=model_id, messages=messages, - system=system_prompt, + system=system_prompts, inferenceConfig=inference_config, additionalModelRequestFields=additional_model_fields ) @@ -72,6 +72,7 @@ def generate( ): sys_prompt_arg = system_prompt if system_prompt else self.system_prompt + sys_prompt_args = [{"text": sys_prompt_arg}] cache_or_none = self._check_cache(sys_prompt_arg + prompt) if cache_or_none is not None: return cache_or_none @@ -81,7 +82,7 @@ def generate( "content": [{"text": prompt}] }] - response = self.generate_conversation(self.model_string, system_prompt=sys_prompt_arg, messages=messages, temperature=temperature, top_p=top_p, max_tokens=max_tokens) + response = self.generate_conversation(self.model_string, system_prompts=sys_prompt_args, messages=messages, temperature=temperature, top_p=top_p, max_tokens=max_tokens) response = response.text From 48219a6cab7b3e80bf7db57c33c595c7af83d838 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 15:32:58 +0200 Subject: [PATCH 04/21] added bedrock --- textgrad/engine/bedrock.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index ed21817..52b0247 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -84,7 +84,6 @@ def generate( response = self.generate_conversation(self.model_string, system_prompts=sys_prompt_args, messages=messages, temperature=temperature, top_p=top_p, max_tokens=max_tokens) - - response = response.text + response = response.choices[0].message.content self._save_cache(sys_prompt_arg + prompt, response) return response \ No newline at end of file From 7ed2bf6edb799ffd3384a7dd2c28984a602ad2a0 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 15:38:02 +0200 Subject: [PATCH 05/21] added bedrock --- textgrad/engine/bedrock.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 52b0247..4d23302 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -84,6 +84,6 @@ def generate( response = self.generate_conversation(self.model_string, system_prompts=sys_prompt_args, messages=messages, temperature=temperature, top_p=top_p, max_tokens=max_tokens) - response = response.choices[0].message.content + response = response["output"]["message"]["content"][0]["text"] self._save_cache(sys_prompt_arg + prompt, response) return response \ No newline at end of file From 5dccda80a9c435d9c0c2b445c91e8e2b8979bbc1 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 16:02:38 +0200 Subject: [PATCH 06/21] added bedrock --- textgrad/engine/__init__.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/textgrad/engine/__init__.py b/textgrad/engine/__init__.py index 50f0819..fc90524 100644 --- a/textgrad/engine/__init__.py +++ b/textgrad/engine/__init__.py @@ -4,12 +4,7 @@ "opus": "claude-3-opus-20240229", "haiku": "claude-3-haiku-20240307", "sonnet": "claude-3-sonnet-20240229", - "together-llama-3-70b": "together-meta-llama/Llama-3-70b-chat-hf", - "bedrock-sonnet-3": "bedrock-anthropic.claude-3-sonnet-20240229-v1:0", - "bedrock-sonnet-3.5": "bedrock-anthropic.claude-3-5-sonnet-20240620-v1:0", - "bedrock-opus": "bedrock-anthropic.claude-3-opus-20240229-v1:0", - "bedrock-haiku": "bedrock-anthropic.claude-3-haiku-20240307-v1:0", - "bedrock-mistral-large": "bedrock-mistral.mistral-large-2402-v1:0" + "together-llama-3-70b": "together-meta-llama/Llama-3-70b-chat-hf" } def get_engine(engine_name: str, **kwargs) -> EngineLM: From 6d95c8525543f5f5778ad0dc456250ef8b0d75f0 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 16:19:22 +0200 Subject: [PATCH 07/21] added bedrock --- textgrad/engine/bedrock.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 4d23302..e5e62cc 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -53,8 +53,11 @@ def generate_conversation(self, model_id="", system_prompts=[], messages=[], tem # Base inference parameters to use. inference_config = {"temperature": temperature, "topP": top_p, "maxTokens": max_tokens} - # Additional inference parameters to use. - additional_model_fields = {"top_k": top_k} + if("anthropic" in model_id): + # Additional inference parameters to use. + additional_model_fields = {"top_k": top_k} + else: + additional_model_fields = {} # Send the message. response = self.client.converse( From 1fbdb142cc770ce19d7e98dbe2246a7f5e097922 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 16:26:44 +0200 Subject: [PATCH 08/21] added bedrock --- textgrad/engine/bedrock.py | 54 ++++++++++++++++++++++++++++++-------- 1 file changed, 43 insertions(+), 11 deletions(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index e5e62cc..403031e 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -23,6 +23,20 @@ def __init__( system_prompt=SYSTEM_PROMPT, **kwargs ): + + if "anthropic" in model_string: + self.system_prompt_supported = True + if "meta" in model_string: + self.system_prompt_supported = True + if "cohere" in model_string: + self.system_prompt_supported = True + if "mistral" in model_string: + if "instruct" in model_string: + self.system_prompt_supported = False + else: + self.system_prompt_supported = True + if "amazon" in model_string: + self.system_prompt_supported = False root = platformdirs.user_cache_dir("textgrad") cache_path = os.path.join(root, f"cache_bedrock_{model_string}.db") @@ -60,13 +74,21 @@ def generate_conversation(self, model_id="", system_prompts=[], messages=[], tem additional_model_fields = {} # Send the message. - response = self.client.converse( - modelId=model_id, - messages=messages, - system=system_prompts, - inferenceConfig=inference_config, - additionalModelRequestFields=additional_model_fields - ) + if self.system_prompt_supported: + response = self.client.converse( + modelId=model_id, + messages=messages, + system=system_prompts, + inferenceConfig=inference_config, + additionalModelRequestFields=additional_model_fields + ) + else: + response = self.client.converse( + modelId=model_id, + messages=messages, + inferenceConfig=inference_config, + additionalModelRequestFields=additional_model_fields + ) return response @@ -80,10 +102,20 @@ def generate( if cache_or_none is not None: return cache_or_none - messages = [{ - "role": "user", - "content": [{"text": prompt}] - }] + if self.system_prompt_supported: + messages = [{ + "role": "user", + "content": [{"text": prompt}] + }] + else: + messages = [{ + "role": "user", + "content": [{"text": sys_prompt_arg}] + }, + { + "role": "user", + "content": [{"text": prompt}] + }] response = self.generate_conversation(self.model_string, system_prompts=sys_prompt_args, messages=messages, temperature=temperature, top_p=top_p, max_tokens=max_tokens) From ca698e5f8970567ad10efb848ccd21db7b356eb9 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 16:31:51 +0200 Subject: [PATCH 09/21] added bedrock --- textgrad/engine/bedrock.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 403031e..c31fe44 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -108,13 +108,10 @@ def generate( "content": [{"text": prompt}] }] else: - messages = [{ - "role": "user", - "content": [{"text": sys_prompt_arg}] - }, + messages = [ { "role": "user", - "content": [{"text": prompt}] + "content": [{"text": sys_prompt_arg + "\n\n" + prompt}] }] response = self.generate_conversation(self.model_string, system_prompts=sys_prompt_args, messages=messages, temperature=temperature, top_p=top_p, max_tokens=max_tokens) From d27a95182e6ed49e163c5e7169126c770240124e Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 16:42:44 +0200 Subject: [PATCH 10/21] added bedrock --- textgrad/engine/bedrock.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index c31fe44..eb630de 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -37,6 +37,11 @@ def __init__( self.system_prompt_supported = True if "amazon" in model_string: self.system_prompt_supported = False + + if kwargs["max_tokens"]: + self.max_tokens = kwargs["max_tokens"] + if kwargs["region"]: + self.aws_region = kwargs["region"] root = platformdirs.user_cache_dir("textgrad") cache_path = os.path.join(root, f"cache_bedrock_{model_string}.db") @@ -66,7 +71,7 @@ def generate_conversation(self, model_id="", system_prompts=[], messages=[], tem """ # Base inference parameters to use. - inference_config = {"temperature": temperature, "topP": top_p, "maxTokens": max_tokens} + inference_config = {"temperature": temperature, "topP": top_p, "maxTokens": self.max_tokens if self.max_tokens else max_tokens} if("anthropic" in model_id): # Additional inference parameters to use. additional_model_fields = {"top_k": top_k} From e12560015109013c7e83c131cbb6d7029fec2e67 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 16:45:37 +0200 Subject: [PATCH 11/21] amazon bedrock support added --- textgrad/engine/bedrock.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index eb630de..c641ea4 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -38,9 +38,9 @@ def __init__( if "amazon" in model_string: self.system_prompt_supported = False - if kwargs["max_tokens"]: + if kwargs.get("max_tokens"): self.max_tokens = kwargs["max_tokens"] - if kwargs["region"]: + if kwargs.get("region"): self.aws_region = kwargs["region"] root = platformdirs.user_cache_dir("textgrad") From 1ccbe17f450b334c330378bb16593fe8bb970955 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 16:46:55 +0200 Subject: [PATCH 12/21] amazon bedrock support added --- textgrad/engine/bedrock.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index c641ea4..70b57d3 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -38,10 +38,8 @@ def __init__( if "amazon" in model_string: self.system_prompt_supported = False - if kwargs.get("max_tokens"): - self.max_tokens = kwargs["max_tokens"] - if kwargs.get("region"): - self.aws_region = kwargs["region"] + self.max_tokens = kwargs.get("max_tokens", None) + self.aws_region = kwargs.get("region", None) root = platformdirs.user_cache_dir("textgrad") cache_path = os.path.join(root, f"cache_bedrock_{model_string}.db") From 0967f3140c57d4a2e60202c3872cefad96787c19 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 18:14:08 +0200 Subject: [PATCH 13/21] amazon bedrock support added --- textgrad/engine/bedrock.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 70b57d3..f60da3d 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -1,5 +1,6 @@ try: import boto3 + from botocore.config import Config except ImportError: raise ImportError("If you'd like to use Amazon Bedrock models, please install the boto3 package by running `pip install boto3`") @@ -37,24 +38,30 @@ def __init__( self.system_prompt_supported = True if "amazon" in model_string: self.system_prompt_supported = False - + self.max_tokens = kwargs.get("max_tokens", None) self.aws_region = kwargs.get("region", None) + if self.aws_region: + self.my_config = Config(region_name = self.aws_region) + self.client = boto3.client(service_name='bedrock-runtime', config=my_config) + else: + self.client = boto3.client(service_name='bedrock-runtime') + root = platformdirs.user_cache_dir("textgrad") cache_path = os.path.join(root, f"cache_bedrock_{model_string}.db") super().__init__(cache_path=cache_path) self.model_string = model_string self.system_prompt = system_prompt - self.client = boto3.client(service_name='bedrock-runtime') + assert isinstance(self.system_prompt, str) @retry(wait=wait_random_exponential(min=1, max=5), stop=stop_after_attempt(5)) def __call__(self, prompt, **kwargs): return self.generate(prompt, **kwargs) - def generate_conversation(self, model_id="", system_prompts=[], messages=[], temperature=0.5, top_k=200, top_p=0.99, max_tokens=4096): + def generate_conversation(self, model_id="", system_prompts=[], messages=[], temperature=0.5, top_k=200, top_p=0.99, max_tokens=2048): """ Sends messages to a model. Args: @@ -96,7 +103,7 @@ def generate_conversation(self, model_id="", system_prompts=[], messages=[], tem return response def generate( - self, prompt, system_prompt=None, temperature=0, max_tokens=4096, top_p=0.99 + self, prompt, system_prompt=None, temperature=0, max_tokens=2048, top_p=0.99 ): sys_prompt_arg = system_prompt if system_prompt else self.system_prompt From 8e1621c74ceccbc720eff568c6995813128b72a2 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 18:16:26 +0200 Subject: [PATCH 14/21] amazon bedrock support added --- textgrad/engine/bedrock.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index f60da3d..71732b7 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -38,6 +38,9 @@ def __init__( self.system_prompt_supported = True if "amazon" in model_string: self.system_prompt_supported = False + if "ai21" in model_string: + self.system_prompt_supported = False + raise ValueError("ai21 not supported yet") self.max_tokens = kwargs.get("max_tokens", None) self.aws_region = kwargs.get("region", None) From 8805e290d588b003bb13407af82227f5a3fee2ca Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 18:25:57 +0200 Subject: [PATCH 15/21] amazon bedrock support added --- textgrad/engine/bedrock.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 71732b7..566e2e1 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -24,7 +24,7 @@ def __init__( system_prompt=SYSTEM_PROMPT, **kwargs ): - + self.system_prompt_supported = True if "anthropic" in model_string: self.system_prompt_supported = True if "meta" in model_string: @@ -38,6 +38,8 @@ def __init__( self.system_prompt_supported = True if "amazon" in model_string: self.system_prompt_supported = False + if "premier" in model_string: + raise ValueError("amazon-titan-premier not supported yet") if "ai21" in model_string: self.system_prompt_supported = False raise ValueError("ai21 not supported yet") From 96bc45a061c0dd7834d2473b094938b4a02a1d89 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 26 Jun 2024 18:29:27 +0200 Subject: [PATCH 16/21] amazon bedrock support added --- textgrad/engine/bedrock.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 566e2e1..2885e16 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -49,7 +49,7 @@ def __init__( if self.aws_region: self.my_config = Config(region_name = self.aws_region) - self.client = boto3.client(service_name='bedrock-runtime', config=my_config) + self.client = boto3.client(service_name='bedrock-runtime', config=self.my_config) else: self.client = boto3.client(service_name='bedrock-runtime') From 33e36bb9348ab92f5eb5a23e130e83af05cc8899 Mon Sep 17 00:00:00 2001 From: cerrix Date: Thu, 27 Jun 2024 15:23:30 +0200 Subject: [PATCH 17/21] amazon bedrock support added --- textgrad/engine/bedrock.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 2885e16..764b0a6 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -47,11 +47,31 @@ def __init__( self.max_tokens = kwargs.get("max_tokens", None) self.aws_region = kwargs.get("region", None) - if self.aws_region: - self.my_config = Config(region_name = self.aws_region) - self.client = boto3.client(service_name='bedrock-runtime', config=self.my_config) + if boto3._get_default_session().get_credentials() is not None: + if self.aws_region: + self.my_config = Config(region_name = self.aws_region) + self.client = boto3.client(service_name='bedrock-runtime', config=self.my_config) + else: + self.client = boto3.client(service_name='bedrock-runtime') else: - self.client = boto3.client(service_name='bedrock-runtime') + access_key_id = os.getenv("AWS_ACCESS_KEY_ID", None) + secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY", None) + session_token = os.getenv("AWS_SESSION_TOKEN", None) + if self.aws_region is None: + self.aws_region = os.getenv("AWS_DEFAULT_REGION", None) + if self.aws_region is None: + raise ValueError("AWS region not specified. Please add it in get_engine parameters or has AWS_DEFAULT_REGION var") + if access_key_id is None: + raise ValueError("AWS access key ID cannot be 'None'.") + if secret_access_key is None: + raise ValueError("AWS secret access key cannot be 'None'.") + session = boto3.Session( + aws_access_key_id=access_key_id, + aws_secret_access_key=secret_access_key, + aws_session_token=session_token + ) + self.my_config = Config(region_name = self.aws_region) + self.client = session.client(service_name='bedrock-runtime', config=self.my_config) root = platformdirs.user_cache_dir("textgrad") cache_path = os.path.join(root, f"cache_bedrock_{model_string}.db") @@ -133,4 +153,4 @@ def generate( response = response["output"]["message"]["content"][0]["text"] self._save_cache(sys_prompt_arg + prompt, response) - return response \ No newline at end of file + return response From c4d8a342ddc1640828e2419a5665241f6bc76d51 Mon Sep 17 00:00:00 2001 From: cerrix Date: Thu, 27 Jun 2024 15:49:50 +0200 Subject: [PATCH 18/21] added bedrock support --- textgrad/engine/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/textgrad/engine/__init__.py b/textgrad/engine/__init__.py index fc90524..5f5204f 100644 --- a/textgrad/engine/__init__.py +++ b/textgrad/engine/__init__.py @@ -17,7 +17,7 @@ def get_engine(engine_name: str, **kwargs) -> EngineLM: if (("gpt-4" in engine_name) or ("gpt-3.5" in engine_name)): from .openai import ChatOpenAI return ChatOpenAI(model_string=engine_name, **kwargs) - # bedrock incluedes most of the models so first check + # bedrock incluedes most of the models so first check if the request is for it elif "bedrock" in engine_name: from .bedrock import ChatBedrock engine_name = engine_name.replace("bedrock-", "") From a3a1290b9d2bb3c16e13d2fc659de492b8de0e5f Mon Sep 17 00:00:00 2001 From: cerrix Date: Thu, 27 Jun 2024 15:51:22 +0200 Subject: [PATCH 19/21] added bedrock support --- textgrad/engine/bedrock.py | 1 + 1 file changed, 1 insertion(+) diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 764b0a6..9c1c778 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -47,6 +47,7 @@ def __init__( self.max_tokens = kwargs.get("max_tokens", None) self.aws_region = kwargs.get("region", None) + # handle both AWS interaction options: with default credential or providing AWS ACCESS KEY and SECRET KEY if boto3._get_default_session().get_credentials() is not None: if self.aws_region: self.my_config = Config(region_name = self.aws_region) From 8cebc58fb75618dbdb5a8454de648960ec3ddb03 Mon Sep 17 00:00:00 2001 From: cerrix Date: Wed, 10 Jul 2024 11:05:19 +0200 Subject: [PATCH 20/21] removed boto3 from requirements, added clear error messages in bedrock.py --- requirements.txt | 3 +-- textgrad/engine/bedrock.py | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/requirements.txt b/requirements.txt index f83ecdd..82b52e6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,5 +6,4 @@ platformdirs>=3.11.0 datasets>=2.14.6 diskcache>=5.6.3 graphviz>=0.20.3 -gdown>=5.2.0 -boto3>=1.34.133 \ No newline at end of file +gdown>=5.2.0 \ No newline at end of file diff --git a/textgrad/engine/bedrock.py b/textgrad/engine/bedrock.py index 9c1c778..80b9d14 100644 --- a/textgrad/engine/bedrock.py +++ b/textgrad/engine/bedrock.py @@ -61,11 +61,11 @@ def __init__( if self.aws_region is None: self.aws_region = os.getenv("AWS_DEFAULT_REGION", None) if self.aws_region is None: - raise ValueError("AWS region not specified. Please add it in get_engine parameters or has AWS_DEFAULT_REGION var") + raise ValueError("AWS region not specified. Please add it in get_engine parameters or as AWS_DEFAULT_REGION env var. You can also provide an AWS role to this environment to use default session credentials") if access_key_id is None: - raise ValueError("AWS access key ID cannot be 'None'.") + raise ValueError("AWS access key ID cannot be 'None'. You can also provide an AWS role to this environment to use default session credentials") if secret_access_key is None: - raise ValueError("AWS secret access key cannot be 'None'.") + raise ValueError("AWS secret access key cannot be 'None'. You can also provide an AWS role to this environment to use default session credentials") session = boto3.Session( aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, From e1a70ef432293b5de648d8fe5b4a2743c1202ccc Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Wed, 10 Jul 2024 09:45:43 +0000 Subject: [PATCH 21/21] docs(contributor): contrib-readme-action has updated readme --- README.md | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0f19824..3211e6c 100644 --- a/README.md +++ b/README.md @@ -321,6 +321,13 @@ We are grateful for all the help we got from our contributors! Mert Yuksekgonul + + + Cerrix +
+ Francesco +
+ sugatoray @@ -342,6 +349,8 @@ We are grateful for all the help we got from our contributors! David Ruan + + sanowl @@ -349,8 +358,6 @@ We are grateful for all the help we got from our contributors! San - - huangzhii