diff --git a/integrations/llama_cpp/pyproject.toml b/integrations/llama_cpp/pyproject.toml index 673df575a..acf42d958 100644 --- a/integrations/llama_cpp/pyproject.toml +++ b/integrations/llama_cpp/pyproject.toml @@ -26,7 +26,7 @@ classifiers = [ "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ] -dependencies = ["haystack-ai", "llama-cpp-python>=0.2.87,<0.3.0"] +dependencies = ["haystack-ai", "llama-cpp-python>=0.2.87"] [project.urls] Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/llama_cpp#readme" diff --git a/integrations/llama_cpp/tests/test_chat_generator.py b/integrations/llama_cpp/tests/test_chat_generator.py index 1d4c9cf82..802fe9128 100644 --- a/integrations/llama_cpp/tests/test_chat_generator.py +++ b/integrations/llama_cpp/tests/test_chat_generator.py @@ -342,7 +342,7 @@ def generator(self, model_path, capsys): hf_tokenizer_path = "meetkai/functionary-small-v2.4-GGUF" generator = LlamaCppChatGenerator( model=model_path, - n_ctx=8192, + n_ctx=512, n_batch=512, model_kwargs={ "chat_format": "functionary-v2", @@ -399,7 +399,6 @@ def test_function_call_and_execute(self, generator): "type": "string", "description": "The city and state, e.g. San Francisco, CA", }, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, }, "required": ["location"], }, @@ -407,7 +406,8 @@ def test_function_call_and_execute(self, generator): } ] - response = generator.run(messages=messages, generation_kwargs={"tools": tools}) + tool_choice = {"type": "function", "function": {"name": "get_current_temperature"}} + response = generator.run(messages=messages, generation_kwargs={"tools": tools, "tool_choice": tool_choice}) available_functions = { "get_current_temperature": self.get_current_temperature,