From e6dbe5aa73d339af0246ba3c65c9576bc2c3d365 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Mon, 29 Jul 2024 09:40:26 -0400 Subject: [PATCH 1/3] remove mixtral and update llama to 3.1 --- .../tests/integration_tests/test_standard.py | 22 +------------------ 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/libs/partners/groq/tests/integration_tests/test_standard.py b/libs/partners/groq/tests/integration_tests/test_standard.py index c43eb30cf7621..64cf895d95667 100644 --- a/libs/partners/groq/tests/integration_tests/test_standard.py +++ b/libs/partners/groq/tests/integration_tests/test_standard.py @@ -21,31 +21,11 @@ def test_tool_message_histories_list_content(self, model: BaseChatModel) -> None super().test_tool_message_histories_list_content(model) -class TestGroqMixtral(BaseTestGroq): - @property - def chat_model_params(self) -> dict: - return { - "temperature": 0, - } - - @pytest.mark.xfail( - reason=("Fails with 'Failed to call a function. Please adjust your prompt.'") - ) - def test_structured_output(self, model: BaseChatModel) -> None: - super().test_structured_output(model) - - @pytest.mark.xfail( - reason=("May pass arguments: {'properties': {}, 'type': 'object'}") - ) - def test_tool_calling_with_no_arguments(self, model: BaseChatModel) -> None: - super().test_tool_calling_with_no_arguments(model) - - class TestGroqLlama(BaseTestGroq): @property def chat_model_params(self) -> dict: return { - "model": "llama3-8b-8192", + "model": "llama-3.1-8b-instant", "temperature": 0, } From f7d139f07d05dc2358e1272d28e954edde8e1b56 Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Mon, 29 Jul 2024 09:42:35 -0400 Subject: [PATCH 2/3] .schema is deprecated --- .../langchain_standard_tests/integration_tests/chat_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py b/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py index 4c141501b12ad..3e3fadb756c80 100644 --- a/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py +++ b/libs/standard-tests/langchain_standard_tests/integration_tests/chat_models.py @@ -237,7 +237,7 @@ class Joke(BaseModelProper): assert isinstance(chunk, Joke) # Schema - chat = model.with_structured_output(Joke.schema()) + chat = model.with_structured_output(Joke.model_json_schema()) result = chat.invoke("Tell me a joke about cats.") assert isinstance(result, dict) assert set(result.keys()) == {"setup", "punchline"} From 8e057fbebedab2ed57165e0a8d35e8b49293a41a Mon Sep 17 00:00:00 2001 From: Chester Curme Date: Mon, 29 Jul 2024 09:49:58 -0400 Subject: [PATCH 3/3] add rate limiter for llama 3.1 --- .../groq/tests/integration_tests/test_standard.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/libs/partners/groq/tests/integration_tests/test_standard.py b/libs/partners/groq/tests/integration_tests/test_standard.py index 64cf895d95667..82f23697a3d45 100644 --- a/libs/partners/groq/tests/integration_tests/test_standard.py +++ b/libs/partners/groq/tests/integration_tests/test_standard.py @@ -4,12 +4,15 @@ import pytest from langchain_core.language_models import BaseChatModel -from langchain_standard_tests.integration_tests import ( # type: ignore[import-not-found] - ChatModelIntegrationTests, # type: ignore[import-not-found] +from langchain_core.rate_limiters import InMemoryRateLimiter +from langchain_standard_tests.integration_tests import ( + ChatModelIntegrationTests, ) from langchain_groq import ChatGroq +rate_limiter = InMemoryRateLimiter(requests_per_second=0.45) + class BaseTestGroq(ChatModelIntegrationTests): @property @@ -27,6 +30,7 @@ def chat_model_params(self) -> dict: return { "model": "llama-3.1-8b-instant", "temperature": 0, + "rate_limiter": rate_limiter, } @pytest.mark.xfail(