From e9e9c04228b85a0dbe0d794eefb97fc145fe5cc5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 18:10:41 +0100 Subject: [PATCH 1/7] chore(deps): bump readmeio/rdme from 8 to 9 (#1234) Bumps [readmeio/rdme](https://github.com/readmeio/rdme) from 8 to 9. - [Release notes](https://github.com/readmeio/rdme/releases) - [Changelog](https://github.com/readmeio/rdme/blob/next/CHANGELOG.md) - [Commits](https://github.com/readmeio/rdme/compare/v8...v9) --- updated-dependencies: - dependency-name: readmeio/rdme dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/CI_readme_sync.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/CI_readme_sync.yml b/.github/workflows/CI_readme_sync.yml index c6204a9be..958cc12f6 100644 --- a/.github/workflows/CI_readme_sync.yml +++ b/.github/workflows/CI_readme_sync.yml @@ -81,6 +81,6 @@ jobs: ls tmp - name: Sync API docs with Haystack docs version ${{ matrix.hs-docs-version }} - uses: readmeio/rdme@v8 + uses: readmeio/rdme@v9 with: rdme: docs ${{ steps.pathfinder.outputs.project_path }}/tmp --key=${{ secrets.README_API_KEY }} --version=${{ matrix.hs-docs-version }} From 7a1297b483f93ee0fa669cfcbbdeb06531ee27b9 Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Mon, 9 Dec 2024 18:38:41 +0100 Subject: [PATCH 2/7] use text instead of content in Cohere and Anthropic (#1237) --- .../anthropic/tests/test_chat_generator.py | 14 +++++++------- .../anthropic/tests/test_vertex_chat_generator.py | 4 ++-- .../generators/cohere/chat/chat_generator.py | 6 +++--- .../components/generators/cohere/generator.py | 2 +- .../cohere/tests/test_cohere_chat_generator.py | 14 +++++++------- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/integrations/anthropic/tests/test_chat_generator.py b/integrations/anthropic/tests/test_chat_generator.py index 155cf7950..9a111fc9d 100644 --- a/integrations/anthropic/tests/test_chat_generator.py +++ b/integrations/anthropic/tests/test_chat_generator.py @@ -188,9 +188,9 @@ def test_default_inference_params(self, chat_messages): first_reply = replies[0] assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance" - assert first_reply.content, "First reply has no content" + assert first_reply.text, "First reply has no text" assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant" - assert "paris" in first_reply.content.lower(), "First reply does not contain 'paris'" + assert "paris" in first_reply.text.lower(), "First reply does not contain 'paris'" assert first_reply.meta, "First reply has no metadata" @pytest.mark.skipif( @@ -221,9 +221,9 @@ def streaming_callback(chunk: StreamingChunk): first_reply = replies[0] assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance" - assert first_reply.content, "First reply has no content" + assert first_reply.text, "First reply has no text" assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant" - assert "paris" in first_reply.content.lower(), "First reply does not contain 'paris'" + assert "paris" in first_reply.text.lower(), "First reply does not contain 'paris'" assert first_reply.meta, "First reply has no metadata" @pytest.mark.skipif( @@ -255,11 +255,11 @@ def test_tools_use(self): first_reply = replies[0] assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance" - assert first_reply.content, "First reply has no content" + assert first_reply.text, "First reply has no text" assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant" - assert "get_stock_price" in first_reply.content.lower(), "First reply does not contain get_stock_price" + assert "get_stock_price" in first_reply.text.lower(), "First reply does not contain get_stock_price" assert first_reply.meta, "First reply has no metadata" - fc_response = json.loads(first_reply.content) + fc_response = json.loads(first_reply.text) assert "name" in fc_response, "First reply does not contain name of the tool" assert "input" in fc_response, "First reply does not contain input of the tool" diff --git a/integrations/anthropic/tests/test_vertex_chat_generator.py b/integrations/anthropic/tests/test_vertex_chat_generator.py index a67e801ad..fefb508ac 100644 --- a/integrations/anthropic/tests/test_vertex_chat_generator.py +++ b/integrations/anthropic/tests/test_vertex_chat_generator.py @@ -188,9 +188,9 @@ def test_default_inference_params(self, chat_messages): first_reply = replies[0] assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance" - assert first_reply.content, "First reply has no content" + assert first_reply.text, "First reply has no text" assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant" - assert "paris" in first_reply.content.lower(), "First reply does not contain 'paris'" + assert "paris" in first_reply.text.lower(), "First reply does not contain 'paris'" assert first_reply.meta, "First reply has no metadata" # Anthropic messages API is similar for AnthropicVertex and Anthropic endpoint, diff --git a/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py index e635e291c..3fae30baa 100644 --- a/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/chat/chat_generator.py @@ -136,7 +136,7 @@ def from_dict(cls, data: Dict[str, Any]) -> "CohereChatGenerator": def _message_to_dict(self, message: ChatMessage) -> Dict[str, str]: role = "User" if message.role == ChatRole.USER else "Chatbot" - chat_message = {"user_name": role, "text": message.content} + chat_message = {"user_name": role, "text": message.text} return chat_message @component.output_types(replies=List[ChatMessage]) @@ -157,7 +157,7 @@ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str, chat_history = [self._message_to_dict(m) for m in messages[:-1]] if self.streaming_callback: response = self.client.chat_stream( - message=messages[-1].content, + message=messages[-1].text, model=self.model, chat_history=chat_history, **generation_kwargs, @@ -190,7 +190,7 @@ def run(self, messages: List[ChatMessage], generation_kwargs: Optional[Dict[str, ) else: response = self.client.chat( - message=messages[-1].content, + message=messages[-1].text, model=self.model, chat_history=chat_history, **generation_kwargs, diff --git a/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py b/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py index e4eaf8670..4630962ba 100644 --- a/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py +++ b/integrations/cohere/src/haystack_integrations/components/generators/cohere/generator.py @@ -67,4 +67,4 @@ def run(self, prompt: str): chat_message = ChatMessage.from_user(prompt) # Note we have to call super() like this because of the way components are dynamically built with the decorator results = super(CohereGenerator, self).run([chat_message]) # noqa - return {"replies": [results["replies"][0].content], "meta": [results["replies"][0].meta]} + return {"replies": [results["replies"][0].text], "meta": [results["replies"][0].meta]} diff --git a/integrations/cohere/tests/test_cohere_chat_generator.py b/integrations/cohere/tests/test_cohere_chat_generator.py index b7cc0534a..09f3708eb 100644 --- a/integrations/cohere/tests/test_cohere_chat_generator.py +++ b/integrations/cohere/tests/test_cohere_chat_generator.py @@ -169,7 +169,7 @@ def test_live_run(self): results = component.run(chat_messages) assert len(results["replies"]) == 1 message: ChatMessage = results["replies"][0] - assert "Paris" in message.content + assert "Paris" in message.text assert "usage" in message.meta assert "prompt_tokens" in message.meta["usage"] assert "completion_tokens" in message.meta["usage"] @@ -205,7 +205,7 @@ def __call__(self, chunk: StreamingChunk) -> None: assert len(results["replies"]) == 1 message: ChatMessage = results["replies"][0] - assert "Paris" in message.content + assert "Paris" in message.text assert message.meta["finish_reason"] == "COMPLETE" @@ -227,7 +227,7 @@ def test_live_run_with_connector(self): results = component.run(chat_messages, generation_kwargs={"connectors": [{"id": "web-search"}]}) assert len(results["replies"]) == 1 message: ChatMessage = results["replies"][0] - assert "Paris" in message.content + assert "Paris" in message.text assert message.meta["documents"] is not None assert "citations" in message.meta # Citations might be None @@ -253,7 +253,7 @@ def __call__(self, chunk: StreamingChunk) -> None: assert len(results["replies"]) == 1 message: ChatMessage = results["replies"][0] - assert "Paris" in message.content + assert "Paris" in message.text assert message.meta["finish_reason"] == "COMPLETE" @@ -291,10 +291,10 @@ def test_tools_use(self): first_reply = replies[0] assert isinstance(first_reply, ChatMessage), "First reply is not a ChatMessage instance" - assert first_reply.content, "First reply has no content" + assert first_reply.text, "First reply has no text" assert ChatMessage.is_from(first_reply, ChatRole.ASSISTANT), "First reply is not from the assistant" - assert "get_stock_price" in first_reply.content.lower(), "First reply does not contain get_stock_price" + assert "get_stock_price" in first_reply.text.lower(), "First reply does not contain get_stock_price" assert first_reply.meta, "First reply has no metadata" - fc_response = json.loads(first_reply.content) + fc_response = json.loads(first_reply.text) assert "name" in fc_response, "First reply does not contain name of the tool" assert "parameters" in fc_response, "First reply does not contain parameters of the tool" From e62cf79e1e3e9351e85268992c3d12fdce0a8d24 Mon Sep 17 00:00:00 2001 From: HaystackBot Date: Mon, 9 Dec 2024 17:41:37 +0000 Subject: [PATCH 3/7] Update the changelog --- integrations/cohere/CHANGELOG.md | 96 ++++++++++++++++++++++++++------ 1 file changed, 80 insertions(+), 16 deletions(-) diff --git a/integrations/cohere/CHANGELOG.md b/integrations/cohere/CHANGELOG.md index 3f36836cc..1d98408e9 100644 --- a/integrations/cohere/CHANGELOG.md +++ b/integrations/cohere/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +## [integrations/cohere-v2.0.1] - 2024-12-09 + +### โš™๏ธ CI + +- Adopt uv as installer (#1142) + +### ๐Ÿงน Chores + +- Update ruff linting scripts and settings (#1105) +- Fix linting/isort (#1215) + +### ๐ŸŒ€ Miscellaneous + +- Chore: use class methods to create `ChatMessage` (#1222) +- Chore: use `text` instead of `content` for `ChatMessage` in Cohere and Anthropic (#1237) + ## [integrations/cohere-v2.0.0] - 2024-09-16 ### ๐Ÿš€ Features @@ -16,28 +32,49 @@ - Do not retry tests in `hatch run test` command (#954) -### โš™๏ธ Miscellaneous Tasks +### โš™๏ธ CI - Retry tests to reduce flakyness (#836) + +### ๐Ÿงน Chores + - Update ruff invocation to include check parameter (#853) -### Docs +### ๐ŸŒ€ Miscellaneous +- Ci: install `pytest-rerunfailures` where needed; add retry config to `test-cov` script (#845) - Update CohereChatGenerator docstrings (#958) - Update CohereGenerator docstrings (#960) ## [integrations/cohere-v1.1.1] - 2024-06-12 +### ๐ŸŒ€ Miscellaneous + +- Chore: `CohereGenerator` - remove warning about `generate` API (#805) + ## [integrations/cohere-v1.1.0] - 2024-05-24 ### ๐Ÿ› Bug Fixes - Remove support for generate API (#755) +### ๐ŸŒ€ Miscellaneous + +- Chore: change the pydoc renderer class (#718) + ## [integrations/cohere-v1.0.0] - 2024-05-03 +### ๐ŸŒ€ Miscellaneous + +- Follow up: update Cohere integration to use Cohere SDK v5 (#711) + ## [integrations/cohere-v0.7.0] - 2024-05-02 +### ๐ŸŒ€ Miscellaneous + +- Chore: add license classifiers (#680) +- Update Cohere integration to use Cohere SDK v5 (#702) + ## [integrations/cohere-v0.6.0] - 2024-04-08 ### ๐Ÿš€ Features @@ -46,21 +83,17 @@ ## [integrations/cohere-v0.5.0] - 2024-03-29 +### ๐ŸŒ€ Miscellaneous + +- Add the Cohere client name to cohere requests (#362) + ## [integrations/cohere-v0.4.1] - 2024-03-21 ### ๐Ÿ› Bug Fixes - Fix order of API docs (#447) - -This PR will also push the docs to Readme - Fix tests (#561) -* fix unit tests - -* try - -* remove flaky check - ### ๐Ÿ“š Documentation - Update category slug (#442) @@ -68,14 +101,20 @@ This PR will also push the docs to Readme - Small consistency improvements (#536) - Disable-class-def (#556) -### โš™๏ธ Miscellaneous Tasks +### ๐Ÿงน Chores - Update Cohere integration to use new generic callable (de)serializers for their callback handlers (#453) - Use `serialize_callable` instead of `serialize_callback_handler` in Cohere (#460) -### Cohere +### ๐ŸŒ€ Miscellaneous +- Choere - remove matching error message from tests (#419) - Fix linting (#509) +- Make tests show coverage (#566) +- Refactor tests (#574) +- Test: relax test constraints (#591) +- Remove references to Python 3.7 (#601) +- Fix: Pin cohere version (#609) ## [integrations/cohere-v0.4.0] - 2024-02-12 @@ -92,32 +131,57 @@ This PR will also push the docs to Readme - Fix failing `TestCohereChatGenerator.test_from_dict_fail_wo_env_var` test (#393) -## [integrations/cohere-v0.3.0] - 2024-01-25 +### ๐ŸŒ€ Miscellaneous -### ๐Ÿ› Bug Fixes +- Cohere: generate api docs (#321) +- Fix: update to latest haystack-ai version (#348) -- Fix project urls (#96) +## [integrations/cohere-v0.3.0] - 2024-01-25 +### ๐Ÿ› Bug Fixes +- Fix project URLs (#96) - Cohere namespace reorg (#271) ### ๐Ÿšœ Refactor - Use `hatch_vcs` to manage integrations versioning (#103) -### โš™๏ธ Miscellaneous Tasks +### ๐Ÿงน Chores - [**breaking**] Rename `model_name` to `model` in the Cohere integration (#222) - Cohere namespace change (#247) +### ๐ŸŒ€ Miscellaneous + +- Cohere: remove unused constant (#91) +- Change default 'input_type' for CohereTextEmbedder (#99) +- Change metadata to meta (#152) +- Add cohere chat generator (#88) +- Optimize API key reading (#162) +- Cohere - change metadata to meta (#178) + ## [integrations/cohere-v0.2.0] - 2023-12-11 ### ๐Ÿš€ Features - Add support for V3 Embed models to CohereEmbedders (#89) +### ๐ŸŒ€ Miscellaneous + +- Cohere: increase version to prepare release (#92) + ## [integrations/cohere-v0.1.1] - 2023-12-07 +### ๐ŸŒ€ Miscellaneous + +- [cohere] Add text and document embedders (#80) +- [cohere] fix cohere pypi version badge and add Embedder note (#86) + ## [integrations/cohere-v0.0.1] - 2023-12-04 +### ๐ŸŒ€ Miscellaneous + +- Add `cohere_haystack` integration package (#75) + From 92830654d2db3350a48a7eabc337c8bc8075612b Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Mon, 9 Dec 2024 19:51:32 +0100 Subject: [PATCH 4/7] chroma: unpin tokenizers (#1233) --- integrations/chroma/pyproject.toml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/integrations/chroma/pyproject.toml b/integrations/chroma/pyproject.toml index c91cc6cb0..40bc9a2b3 100644 --- a/integrations/chroma/pyproject.toml +++ b/integrations/chroma/pyproject.toml @@ -25,9 +25,8 @@ classifiers = [ dependencies = [ "haystack-ai", "chromadb>=0.5.17", - "typing_extensions>=4.8.0", - "tokenizers>=0.13.2,<=0.20.3" # TODO: remove when Chroma pins tokenizers internally -] + "typing_extensions>=4.8.0" + ] [project.urls] Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/chroma#readme" From d3677be7171425aab8d6523605bfb3715f0d640b Mon Sep 17 00:00:00 2001 From: isfuku <54598113+isfuku@users.noreply.github.com> Date: Tue, 10 Dec 2024 10:47:44 -0300 Subject: [PATCH 5/7] feat: warn if LangfuseTracer initialized without tracing enabled (#1231) * feat: warn if LangfuseTracer initialized without tracing enabled * test: warn when lagnfuse tracer init with tracing disabled --- .../tracing/langfuse/tracer.py | 11 +++++++++-- integrations/langfuse/tests/test_tracer.py | 16 ++++++++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/integrations/langfuse/src/haystack_integrations/tracing/langfuse/tracer.py b/integrations/langfuse/src/haystack_integrations/tracing/langfuse/tracer.py index d6f2535c7..6af05633e 100644 --- a/integrations/langfuse/src/haystack_integrations/tracing/langfuse/tracer.py +++ b/integrations/langfuse/src/haystack_integrations/tracing/langfuse/tracer.py @@ -7,7 +7,8 @@ from haystack import logging from haystack.components.generators.openai_utils import _convert_message_to_openai_format from haystack.dataclasses import ChatMessage -from haystack.tracing import Span, Tracer, tracer +from haystack.tracing import Span, Tracer +from haystack.tracing import tracer as proxy_tracer from haystack.tracing import utils as tracing_utils import langfuse @@ -78,7 +79,7 @@ def set_content_tag(self, key: str, value: Any) -> None: :param key: The content tag key. :param value: The content tag value. """ - if not tracer.is_content_tracing_enabled: + if not proxy_tracer.is_content_tracing_enabled: return if key.endswith(".input"): if "messages" in value: @@ -126,6 +127,12 @@ def __init__(self, tracer: "langfuse.Langfuse", name: str = "Haystack", public: be publicly accessible to anyone with the tracing URL. If set to `False`, the tracing data will be private and only accessible to the Langfuse account owner. """ + if not proxy_tracer.is_content_tracing_enabled: + logger.warning( + "Traces will not be logged to Langfuse because Haystack tracing is disabled. " + "To enable, set the HAYSTACK_CONTENT_TRACING_ENABLED environment variable to true " + "before importing Haystack." + ) self._tracer = tracer self._context: List[LangfuseSpan] = [] self._name = name diff --git a/integrations/langfuse/tests/test_tracer.py b/integrations/langfuse/tests/test_tracer.py index 42ae1d07d..d9790ea36 100644 --- a/integrations/langfuse/tests/test_tracer.py +++ b/integrations/langfuse/tests/test_tracer.py @@ -1,4 +1,6 @@ import datetime +import logging +import sys from unittest.mock import MagicMock, Mock, patch from haystack.dataclasses import ChatMessage @@ -149,3 +151,17 @@ def test_context_is_empty_after_tracing(self): pass assert tracer._context == [] + + def test_init_with_tracing_disabled(self, monkeypatch, caplog): + # Clear haystack modules because ProxyTracer is initialized whenever haystack is imported + modules_to_clear = [name for name in sys.modules if name.startswith('haystack')] + for name in modules_to_clear: + sys.modules.pop(name, None) + + # Re-import LangfuseTracer and instantiate it with tracing disabled + with caplog.at_level(logging.WARNING): + monkeypatch.setenv("HAYSTACK_CONTENT_TRACING_ENABLED", "false") + from haystack_integrations.tracing.langfuse import LangfuseTracer + + LangfuseTracer(tracer=MockTracer(), name="Haystack", public=False) + assert "tracing is disabled" in caplog.text From 54a0573e7c0c047b37a70877cce8ff49f09924e8 Mon Sep 17 00:00:00 2001 From: Stefano Fiorucci Date: Tue, 10 Dec 2024 14:55:18 +0100 Subject: [PATCH 6/7] chore: use text instead of content for ChatMessage in Llama.cpp, Langfuse and Mistral (#1238) --- integrations/langfuse/tests/test_tracing.py | 2 +- .../llama_cpp/chat/chat_generator.py | 2 +- .../llama_cpp/tests/test_chat_generator.py | 30 +++++++++---------- .../tests/test_mistral_chat_generator.py | 4 +-- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/integrations/langfuse/tests/test_tracing.py b/integrations/langfuse/tests/test_tracing.py index e5737b861..75c1b7a13 100644 --- a/integrations/langfuse/tests/test_tracing.py +++ b/integrations/langfuse/tests/test_tracing.py @@ -49,7 +49,7 @@ def test_tracing_integration(llm_class, env_var, expected_trace): "tracer": {"invocation_context": {"user_id": "user_42"}}, } ) - assert "Berlin" in response["llm"]["replies"][0].content + assert "Berlin" in response["llm"]["replies"][0].text assert response["tracer"]["trace_url"] trace_url = response["tracer"]["trace_url"] diff --git a/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py b/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py index d43700215..014dd7169 100644 --- a/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py +++ b/integrations/llama_cpp/src/haystack_integrations/components/generators/llama_cpp/chat/chat_generator.py @@ -17,7 +17,7 @@ def _convert_message_to_llamacpp_format(message: ChatMessage) -> Dict[str, str]: - `content` - `name` (optional) """ - formatted_msg = {"role": message.role.value, "content": message.content} + formatted_msg = {"role": message.role.value, "content": message.text} if message.name: formatted_msg["name"] = message.name diff --git a/integrations/llama_cpp/tests/test_chat_generator.py b/integrations/llama_cpp/tests/test_chat_generator.py index 802fe9128..0ddd78c4f 100644 --- a/integrations/llama_cpp/tests/test_chat_generator.py +++ b/integrations/llama_cpp/tests/test_chat_generator.py @@ -163,7 +163,7 @@ def test_run_with_valid_message(self, generator_mock): assert isinstance(result["replies"], list) assert len(result["replies"]) == 1 assert isinstance(result["replies"][0], ChatMessage) - assert result["replies"][0].content == "Generated text" + assert result["replies"][0].text == "Generated text" assert result["replies"][0].role == ChatRole.ASSISTANT def test_run_with_generation_kwargs(self, generator_mock): @@ -183,7 +183,7 @@ def test_run_with_generation_kwargs(self, generator_mock): mock_model.create_chat_completion.return_value = mock_output generation_kwargs = {"max_tokens": 128} result = generator.run([ChatMessage.from_system("Write a 200 word paragraph.")], generation_kwargs) - assert result["replies"][0].content == "Generated text" + assert result["replies"][0].text == "Generated text" assert result["replies"][0].meta["finish_reason"] == "length" @pytest.mark.integration @@ -206,7 +206,7 @@ def test_run(self, generator): assert "replies" in result assert isinstance(result["replies"], list) assert len(result["replies"]) > 0 - assert any(answer.lower() in reply.content.lower() for reply in result["replies"]) + assert any(answer.lower() in reply.text.lower() for reply in result["replies"]) @pytest.mark.integration def test_run_rag_pipeline(self, generator): @@ -270,7 +270,7 @@ def test_run_rag_pipeline(self, generator): replies = result["llm"]["replies"] assert len(replies) > 0 - assert any("bioluminescent waves" in reply.content for reply in replies) + assert any("bioluminescent waves" in reply.text.lower() for reply in replies) assert all(reply.role == ChatRole.ASSISTANT for reply in replies) @pytest.mark.integration @@ -308,15 +308,15 @@ def test_json_constraining(self, generator): assert len(result["replies"]) > 0 assert all(reply.role == ChatRole.ASSISTANT for reply in result["replies"]) for reply in result["replies"]: - assert json.loads(reply.content) - assert isinstance(json.loads(reply.content), dict) - assert "people" in json.loads(reply.content) - assert isinstance(json.loads(reply.content)["people"], list) - assert all(isinstance(person, dict) for person in json.loads(reply.content)["people"]) - assert all("name" in person for person in json.loads(reply.content)["people"]) - assert all("age" in person for person in json.loads(reply.content)["people"]) - assert all(isinstance(person["name"], str) for person in json.loads(reply.content)["people"]) - assert all(isinstance(person["age"], int) for person in json.loads(reply.content)["people"]) + assert json.loads(reply.text) + assert isinstance(json.loads(reply.text), dict) + assert "people" in json.loads(reply.text) + assert isinstance(json.loads(reply.text)["people"], list) + assert all(isinstance(person, dict) for person in json.loads(reply.text)["people"]) + assert all("name" in person for person in json.loads(reply.text)["people"]) + assert all("age" in person for person in json.loads(reply.text)["people"]) + assert all(isinstance(person["name"], str) for person in json.loads(reply.text)["people"]) + assert all(isinstance(person["age"], int) for person in json.loads(reply.text)["people"]) class TestLlamaCppChatGeneratorFunctionary: @@ -431,8 +431,8 @@ def test_function_call_and_execute(self, generator): second_response = generator.run(messages=messages) assert "replies" in second_response assert len(second_response["replies"]) > 0 - assert any("San Francisco" in reply.content for reply in second_response["replies"]) - assert any("72" in reply.content for reply in second_response["replies"]) + assert any("San Francisco" in reply.text for reply in second_response["replies"]) + assert any("72" in reply.text for reply in second_response["replies"]) class TestLlamaCppChatGeneratorChatML: diff --git a/integrations/mistral/tests/test_mistral_chat_generator.py b/integrations/mistral/tests/test_mistral_chat_generator.py index 3c95f19db..6277b9c36 100644 --- a/integrations/mistral/tests/test_mistral_chat_generator.py +++ b/integrations/mistral/tests/test_mistral_chat_generator.py @@ -214,7 +214,7 @@ def test_live_run(self): results = component.run(chat_messages) assert len(results["replies"]) == 1 message: ChatMessage = results["replies"][0] - assert "Paris" in message.content + assert "Paris" in message.text assert "mistral-tiny" in message.meta["model"] assert message.meta["finish_reason"] == "stop" @@ -249,7 +249,7 @@ def __call__(self, chunk: StreamingChunk) -> None: assert len(results["replies"]) == 1 message: ChatMessage = results["replies"][0] - assert "Paris" in message.content + assert "Paris" in message.text assert "mistral-tiny" in message.meta["model"] assert message.meta["finish_reason"] == "stop" From d22deba6ef45839cd382732fbde08aa313ac6fe4 Mon Sep 17 00:00:00 2001 From: HaystackBot Date: Tue, 10 Dec 2024 13:56:41 +0000 Subject: [PATCH 7/7] Update the changelog --- integrations/llama_cpp/CHANGELOG.md | 59 +++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 8 deletions(-) diff --git a/integrations/llama_cpp/CHANGELOG.md b/integrations/llama_cpp/CHANGELOG.md index ea4c05e4d..2d4a8c86e 100644 --- a/integrations/llama_cpp/CHANGELOG.md +++ b/integrations/llama_cpp/CHANGELOG.md @@ -1,46 +1,89 @@ # Changelog +## [integrations/llama_cpp-v0.4.2] - 2024-12-10 + +### ๐Ÿงช Testing + +- Do not retry tests in `hatch run test` command (#954) + +### โš™๏ธ CI + +- Adopt uv as installer (#1142) + +### ๐Ÿงน Chores + +- Update ruff linting scripts and settings (#1105) +- Unpin `llama-cpp-python` (#1115) +- Fix linting/isort (#1215) +- Use text instead of content for ChatMessage in Llama.cpp, Langfuse and Mistral (#1238) + +### ๐ŸŒ€ Miscellaneous + +- Chore: lamma_cpp - ruff update, don't ruff tests (#998) +- Fix: pin `llama-cpp-python<0.3.0` (#1111) + ## [integrations/llama_cpp-v0.4.1] - 2024-08-08 ### ๐Ÿ› Bug Fixes - Replace DynamicChatPromptBuilder with ChatPromptBuilder (#940) -### โš™๏ธ Miscellaneous Tasks +### โš™๏ธ CI - Retry tests to reduce flakyness (#836) + +### ๐Ÿงน Chores + - Update ruff invocation to include check parameter (#853) - Pin `llama-cpp-python>=0.2.87` (#955) -## [integrations/llama_cpp-v0.4.0] - 2024-05-13 +### ๐ŸŒ€ Miscellaneous -### ๐Ÿ› Bug Fixes +- Ci: install `pytest-rerunfailures` where needed; add retry config to `test-cov` script (#845) +- Fix: pin llama-cpp-python to an older version (#943) +- Refactor: introduce `_convert_message_to_llamacpp_format` utility function (#939) -- Fix commit (#436) +## [integrations/llama_cpp-v0.4.0] - 2024-05-13 +### ๐Ÿ› Bug Fixes +- Llama.cpp: change wrong links and imports (#436) - Fix order of API docs (#447) -This PR will also push the docs to Readme - ### ๐Ÿ“š Documentation - Update category slug (#442) - Small consistency improvements (#536) - Disable-class-def (#556) -### โš™๏ธ Miscellaneous Tasks +### ๐Ÿงน Chores - [**breaking**] Rename model_path to model in the Llama.cpp integration (#243) -### Llama.cpp +### ๐ŸŒ€ Miscellaneous - Generate api docs (#353) +- Model_name_or_path > model (#418) +- Llama.cpp - review docstrings (#510) +- Llama.cpp - update examples (#511) +- Make tests show coverage (#566) +- Remove references to Python 3.7 (#601) +- Chore: add license classifiers (#680) +- Chore: change the pydoc renderer class (#718) +- Basic implementation of llama.cpp chat generation (#723) ## [integrations/llama_cpp-v0.2.1] - 2024-01-18 +### ๐ŸŒ€ Miscellaneous + +- Update import paths for beta5 (#233) + ## [integrations/llama_cpp-v0.2.0] - 2024-01-17 +### ๐ŸŒ€ Miscellaneous + +- Mount llama_cpp in haystack_integrations (#217) + ## [integrations/llama_cpp-v0.1.0] - 2024-01-09 ### ๐Ÿš€ Features