From deb2f175a889bc2414116fdaea971ec91eabfd1b Mon Sep 17 00:00:00 2001 From: Daria Fokina Date: Thu, 4 Jan 2024 13:55:13 +0100 Subject: [PATCH 1/2] ollama docstrings update (#171) --- .../ollama/src/ollama_haystack/generator.py | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/integrations/ollama/src/ollama_haystack/generator.py b/integrations/ollama/src/ollama_haystack/generator.py index 0c6d22391..f9731d5d3 100644 --- a/integrations/ollama/src/ollama_haystack/generator.py +++ b/integrations/ollama/src/ollama_haystack/generator.py @@ -28,12 +28,12 @@ def __init__( :param url: The URL of the generation endpoint of a running Ollama instance. Default is "http://localhost:11434/api/generate". :param generation_kwargs: Optional arguments to pass to the Ollama generation endpoint, such as temperature, - top_p, etc. See the + top_p, and others. See the available arguments in [Ollama docs](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values). :param system_prompt: Optional system message (overrides what is defined in the Ollama Modelfile). :param template: The full prompt template (overrides what is defined in the Ollama Modelfile). :param raw: If True, no formatting will be applied to the prompt. You may choose to use the raw parameter - if you are specifying a full templated prompt in your request to the API. + if you are specifying a full templated prompt in your API request. :param timeout: The number of seconds before throwing a timeout error from the Ollama API. Default is 30 seconds. """ @@ -47,10 +47,12 @@ def __init__( def _create_json_payload(self, prompt: str, generation_kwargs=None) -> Dict[str, Any]: """ - Returns A dictionary of JSON arguments for a POST request to an Ollama service - :param prompt: the prompt to generate a response for - :param generation_kwargs: - :return: A dictionary of arguments for a POST request to an Ollama service + Returns a dictionary of JSON arguments for a POST request to an Ollama service. + :param prompt: The prompt to generate a response for. + :param generation_kwargs: Optional arguments to pass to the Ollama generation endpoint, such as temperature, + top_p, and others. See the available arguments in + [Ollama docs](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values). + :return: A dictionary of arguments for a POST request to an Ollama service. """ generation_kwargs = generation_kwargs or {} return { @@ -65,9 +67,9 @@ def _create_json_payload(self, prompt: str, generation_kwargs=None) -> Dict[str, def _convert_to_haystack_response(self, ollama_response: Response) -> Dict[str, List[Any]]: """ - Convert a response from the Ollama API to the required Haystack format - :param ollama_response: A response (requests library) from the Ollama API - :return: A dictionary of the returned responses and metadata + Convert a response from the Ollama API to the required Haystack format. + :param ollama_response: A response (requests library) from the Ollama API. + :return: A dictionary of the returned responses and metadata. """ resp_dict = ollama_response.json() @@ -83,10 +85,10 @@ def run( generation_kwargs: Optional[Dict[str, Any]] = None, ): """ - Run an Ollama Model on the a given prompt. + Run an Ollama Model on the given prompt. :param prompt: The prompt to generate a response for. :param generation_kwargs: Optional arguments to pass to the Ollama generation endpoint, such as temperature, - top_p, etc. See the + top_p, and others. See the available arguments in [Ollama docs](https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values). :return: A dictionary of the response and returned metadata """ From 03eca65af1ef5435807ec1c130004bb7dc50b7c5 Mon Sep 17 00:00:00 2001 From: Alistair Rogers <36593376+AlistairLR112@users.noreply.github.com> Date: Thu, 4 Jan 2024 14:37:28 +0000 Subject: [PATCH 2/2] add example of OllamaGenerator (#170) * add example of OllamaGenerator * fix example with ruff * change example to reference the greatest politician of all time - Super Mario * add comments on how to set up and expected output --- integrations/ollama/example/example.py | 55 ++++++++++++++++++++++++++ integrations/ollama/pyproject.toml | 2 + 2 files changed, 57 insertions(+) create mode 100644 integrations/ollama/example/example.py diff --git a/integrations/ollama/example/example.py b/integrations/ollama/example/example.py new file mode 100644 index 000000000..57486f8a4 --- /dev/null +++ b/integrations/ollama/example/example.py @@ -0,0 +1,55 @@ +# In order to run this example, you will need to have an instance of Ollama running with the +# orca-mini model downloaded. We suggest you use the following commands to serve an orca-mini +# model from Ollama +# +# docker run -d -p 11434:11434 --name ollama ollama/ollama:latest +# docker exec ollama ollama pull orca-mini + +from haystack import Document, Pipeline +from haystack.components.builders.prompt_builder import PromptBuilder +from haystack.components.retrievers import InMemoryBM25Retriever +from haystack.document_stores.in_memory import InMemoryDocumentStore + +from ollama_haystack import OllamaGenerator + +document_store = InMemoryDocumentStore() +document_store.write_documents( + [ + Document(content="Super Mario was an important politician"), + Document(content="Mario owns several castles and uses them to conduct important political business"), + Document( + content="Super Mario was a successful military leader who fought off several invasion attempts by " + "his arch rival - Bowser" + ), + ] +) + +query = "Who is Super Mario?" + +template = """ +Given only the following information, answer the question. +Ignore your own knowledge. + +Context: +{% for document in documents %} + {{ document.content }} +{% endfor %} + +Question: {{ query }}? +""" +pipe = Pipeline() + +pipe.add_component("retriever", InMemoryBM25Retriever(document_store=document_store)) +pipe.add_component("prompt_builder", PromptBuilder(template=template)) +pipe.add_component("llm", OllamaGenerator(model="orca-mini")) +pipe.connect("retriever", "prompt_builder.documents") +pipe.connect("prompt_builder", "llm") + +response = pipe.run({"prompt_builder": {"query": query}, "retriever": {"query": query}}) + +print(response["llm"]["replies"]) +# An expected response - the output is not deterministic: +# ['Based on the information provided, Super Mario is a successful military leader who fought +# off several invasion attempts by his arch rival - Bowser. He is also an important politician and owns several +# castles where he conducts political business. ' 'Therefore, it can be inferred that Super Mario is a combination of +# both a military leader and an important politician.'] diff --git a/integrations/ollama/pyproject.toml b/integrations/ollama/pyproject.toml index 4505138d9..551ee299b 100644 --- a/integrations/ollama/pyproject.toml +++ b/integrations/ollama/pyproject.toml @@ -145,6 +145,8 @@ ban-relative-imports = "all" [tool.ruff.per-file-ignores] # Tests can use magic values, assertions, and relative imports "tests/**/*" = ["PLR2004", "S101", "TID252"] +# Examples can print their output +"example/**" = ["T201"] [tool.coverage.run]