From 54a0573e7c0c047b37a70877cce8ff49f09924e8 Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Tue, 10 Dec 2024 14:55:18 +0100 Subject: [PATCH] chore: use text instead of content for ChatMessage in Llama.cpp, Langfuse and Mistral (#1238) --- integrations/langfuse/tests/test_tracing.py | 2 +- .../llama_cpp/chat/chat_generator.py | 2 +- .../llama_cpp/tests/test_chat_generator.py | 30 +++++++++---------- .../tests/test_mistral_chat_generator.py | 4 +-- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/integrations/langfuse/tests/test_tracing.py b/integrations/langfuse/tests/test_tracing.py index e5737b861..75c1b7a13 100644 --- a/integrations/langfuse/tests/test_tracing.py +++ b/integrations/langfuse/tests/test_tracing.py @@ -49,7 +49,7 @@ def test_tracing_integration(llm_class, env_var, expected_trace): "tracer": {"invocation_context": {"user_id": "user_42"}}, } ) - assert "Berlin" in response["llm"]["replies"][0].content + assert "Berlin" in response["llm"]["replies"][0].text assert response["tracer"]["trace_url"] trace_url = response["tracer"]["trace_url"] diff --git a/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py b/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py index d43700215..014dd7169 100644 --- a/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py +++ b/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py @@ -17,7 +17,7 @@ def _convert_message_to_llamacpp_format(message: ChatMessage) -> Dict[str, str]: - `content` - `name` (optional) """ - formatted_msg = {"role": message.role.value, "content": message.content} + formatted_msg = {"role": message.role.value, "content": message.text} if message.name: formatted_msg["name"] = message.name diff --git a/integrations/llama_cpp/tests/test_chat_generator.py b/integrations/llama_cpp/tests/test_chat_generator.py index 802fe9128..0ddd78c4f 100644 --- a/integrations/llama_cpp/tests/test_chat_generator.py +++ b/integrations/llama_cpp/tests/test_chat_generator.py @@ -163,7 +163,7 @@ def test_run_with_valid_message(self, generator_mock): assert isinstance(result["replies"], list) assert len(result["replies"]) == 1 assert isinstance(result["replies"][0], ChatMessage) - assert result["replies"][0].content == "Generated text" + assert result["replies"][0].text == "Generated text" assert result["replies"][0].role == ChatRole.ASSISTANT def test_run_with_generation_kwargs(self, generator_mock): @@ -183,7 +183,7 @@ def test_run_with_generation_kwargs(self, generator_mock): mock_model.create_chat_completion.return_value = mock_output generation_kwargs = {"max_tokens": 128} result = generator.run([ChatMessage.from_system("Write a 200 word paragraph.")], generation_kwargs) - assert result["replies"][0].content == "Generated text" + assert result["replies"][0].text == "Generated text" assert result["replies"][0].meta["finish_reason"] == "length" @pytest.mark.integration @@ -206,7 +206,7 @@ def test_run(self, generator): assert "replies" in result assert isinstance(result["replies"], list) assert len(result["replies"]) > 0 - assert any(answer.lower() in reply.content.lower() for reply in result["replies"]) + assert any(answer.lower() in reply.text.lower() for reply in result["replies"]) @pytest.mark.integration def test_run_rag_pipeline(self, generator): @@ -270,7 +270,7 @@ def test_run_rag_pipeline(self, generator): replies = result["llm"]["replies"] assert len(replies) > 0 - assert any("bioluminescent waves" in reply.content for reply in replies) + assert any("bioluminescent waves" in reply.text.lower() for reply in replies) assert all(reply.role == ChatRole.ASSISTANT for reply in replies) @pytest.mark.integration @@ -308,15 +308,15 @@ def test_json_constraining(self, generator): assert len(result["replies"]) > 0 assert all(reply.role == ChatRole.ASSISTANT for reply in result["replies"]) for reply in result["replies"]: - assert json.loads(reply.content) - assert isinstance(json.loads(reply.content), dict) - assert "people" in json.loads(reply.content) - assert isinstance(json.loads(reply.content)["people"], list) - assert all(isinstance(person, dict) for person in json.loads(reply.content)["people"]) - assert all("name" in person for person in json.loads(reply.content)["people"]) - assert all("age" in person for person in json.loads(reply.content)["people"]) - assert all(isinstance(person["name"], str) for person in json.loads(reply.content)["people"]) - assert all(isinstance(person["age"], int) for person in json.loads(reply.content)["people"]) + assert json.loads(reply.text) + assert isinstance(json.loads(reply.text), dict) + assert "people" in json.loads(reply.text) + assert isinstance(json.loads(reply.text)["people"], list) + assert all(isinstance(person, dict) for person in json.loads(reply.text)["people"]) + assert all("name" in person for person in json.loads(reply.text)["people"]) + assert all("age" in person for person in json.loads(reply.text)["people"]) + assert all(isinstance(person["name"], str) for person in json.loads(reply.text)["people"]) + assert all(isinstance(person["age"], int) for person in json.loads(reply.text)["people"]) class TestLlamaCppChatGeneratorFunctionary: @@ -431,8 +431,8 @@ def test_function_call_and_execute(self, generator): second_response = generator.run(messages=messages) assert "replies" in second_response assert len(second_response["replies"]) > 0 - assert any("San Francisco" in reply.content for reply in second_response["replies"]) - assert any("72" in reply.content for reply in second_response["replies"]) + assert any("San Francisco" in reply.text for reply in second_response["replies"]) + assert any("72" in reply.text for reply in second_response["replies"]) class TestLlamaCppChatGeneratorChatML: diff --git a/integrations/mistral/tests/test_mistral_chat_generator.py b/integrations/mistral/tests/test_mistral_chat_generator.py index 3c95f19db..6277b9c36 100644 --- a/integrations/mistral/tests/test_mistral_chat_generator.py +++ b/integrations/mistral/tests/test_mistral_chat_generator.py @@ -214,7 +214,7 @@ def test_live_run(self): results = component.run(chat_messages) assert len(results["replies"]) == 1 message: ChatMessage = results["replies"][0] - assert "Paris" in message.content + assert "Paris" in message.text assert "mistral-tiny" in message.meta["model"] assert message.meta["finish_reason"] == "stop" @@ -249,7 +249,7 @@ def __call__(self, chunk: StreamingChunk) -> None: assert len(results["replies"]) == 1 message: ChatMessage = results["replies"][0] - assert "Paris" in message.content + assert "Paris" in message.text assert "mistral-tiny" in message.meta["model"] assert message.meta["finish_reason"] == "stop"