From 4cbe113fd10f193d449d9bf78e8f699de56dc2b2 Mon Sep 17 00:00:00 2001 From: Tuana Celik Date: Fri, 29 Dec 2023 16:56:14 +0300 Subject: [PATCH 01/11] draft for first bit of gemini work with makersuite --- integrations/gemini-haystack/LICENSE.txt | 73 +++++++ integrations/gemini-haystack/README.md | 21 ++ integrations/gemini-haystack/example.ipynb | 199 ++++++++++++++++++ integrations/gemini-haystack/pyproject.toml | 165 +++++++++++++++ .../src/gemini_haystack/__init__.py | 3 + .../gemini_haystack/generators/__init__.py | 3 + .../gemini_haystack/generators/chat/gemini.py | 170 +++++++++++++++ .../src/gemini_haystack/generators/gemini.py | 130 ++++++++++++ .../gemini-haystack/tests/__init__.py | 3 + 9 files changed, 767 insertions(+) create mode 100644 integrations/gemini-haystack/LICENSE.txt create mode 100644 integrations/gemini-haystack/README.md create mode 100644 integrations/gemini-haystack/example.ipynb create mode 100644 integrations/gemini-haystack/pyproject.toml create mode 100644 integrations/gemini-haystack/src/gemini_haystack/__init__.py create mode 100644 integrations/gemini-haystack/src/gemini_haystack/generators/__init__.py create mode 100644 integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py create mode 100644 integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py create mode 100644 integrations/gemini-haystack/tests/__init__.py diff --git a/integrations/gemini-haystack/LICENSE.txt b/integrations/gemini-haystack/LICENSE.txt new file mode 100644 index 000000000..137069b82 --- /dev/null +++ b/integrations/gemini-haystack/LICENSE.txt @@ -0,0 +1,73 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + + You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + +To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/integrations/gemini-haystack/README.md b/integrations/gemini-haystack/README.md new file mode 100644 index 000000000..603457372 --- /dev/null +++ b/integrations/gemini-haystack/README.md @@ -0,0 +1,21 @@ +# gemini-haystack + +[![PyPI - Version](https://img.shields.io/pypi/v/gemini-haystack.svg)](https://pypi.org/project/gemini-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/gemini-haystack.svg)](https://pypi.org/project/gemini-haystack) + +----- + +**Table of Contents** + +- [Installation](#installation) +- [License](#license) + +## Installation + +```console +pip install gemini-haystack +``` + +## License + +`gemini-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/gemini-haystack/example.ipynb b/integrations/gemini-haystack/example.ipynb new file mode 100644 index 000000000..89c80c29c --- /dev/null +++ b/integrations/gemini-haystack/example.ipynb @@ -0,0 +1,199 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import getpass\n", + "\n", + "makersuite_key = getpass.getpass(\"Your Makersuite API Key\")" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from gemini_haystack.generators.gemini import GeminiGenerator\n", + "\n", + "text_generator = GeminiGenerator(api_key=makersuite_key, project_id=\"deepset-general\", model=\"gemini-pro\")\n", + "text_image_generator = GeminiGenerator(api_key=makersuite_key, project_id=\"deepset-general\", model=\"gemini-pro-vision\")" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'answers': [\"1. **To Find Happiness and Fulfillment**: Many people believe that the meaning of life is to find happiness and fulfillment. This can be achieved through various means, such as pursuing passions, building meaningful relationships, and contributing positively to society.\\n2. **To Achieve Self-Actualization**: According to psychologist Abraham Maslow, self-actualization is the highest level of human development, where individuals fully realize their potential and become the best version of themselves. For some, achieving self-actualization is the ultimate goal of life.\\n3. **To Make a Difference**: Some individuals find meaning in life by making a positive impact on the world. This can involve engaging in acts of kindness and service, working towards social justice, or pursuing scientific or artistic endeavors that benefit society.\\n4. **To Experience and Appreciate Life**: Others believe that the meaning of life lies in simply experiencing and appreciating the wonders of the world. This can include traveling, learning new things, connecting with nature, and seeking moments of awe and inspiration.\\n5. **To Connect with Something Greater**: Many people find meaning in life through spiritual or religious beliefs. This can involve connecting with a higher power, practicing meditation or mindfulness, or engaging in rituals and traditions that provide a sense of purpose and belonging.\\n6. **To Leave a Legacy**: Some individuals believe that the meaning of life is to leave behind a lasting legacy. This can involve raising a family, creating works of art or literature, or making significant contributions to one's field of work or society as a whole.\\n7. **To Grow and Evolve**: For some, the meaning of life is to continually grow and evolve as a person. This involves learning from experiences, challenging oneself, and embracing new perspectives, leading to a deeper understanding of oneself and the world around them.\\n8. **To Find Purpose**: Many people seek meaning in life by finding a purpose or calling. This can involve identifying a specific goal or mission that drives their actions and provides them with a sense of direction and significance.\\n\\nUltimately, the meaning of life is a personal and subjective question, and there is no single answer that applies to everyone. The search for meaning is an ongoing journey, and individuals may find different meanings at different stages of their lives.\"]}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "text_generator.run(parts=\"What's the meaning of life?\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " The first image is of C-3PO and R2-D2 from the Star Wars franchise. They are protocol and astromech droids, respectively.\n", + "\n", + "The second image is of Maria from the 1927 film Metropolis. She is a robot created by Dr. Rotwang to be a perfect replica of his dead wife, Hel.\n", + "\n", + "The third image is of Gort from the 1951 film The Day the Earth Stood Still. He is a robot sent to Earth to warn humanity about the dangers of nuclear war.\n", + "\n", + "The fourth image is of Marvin from the 1980 film The Hitchhiker's Guide to the Galaxy. He is a paranoid android who is constantly depressed and hates life.\n" + ] + } + ], + "source": [ + "import requests\n", + "from haystack.dataclasses.byte_stream import ByteStream\n", + "\n", + "URLS = [\n", + " \"https://raw.githubusercontent.com/silvanocerza/robots/main/robot1.jpg\",\n", + " \"https://raw.githubusercontent.com/silvanocerza/robots/main/robot2.jpg\",\n", + " \"https://raw.githubusercontent.com/silvanocerza/robots/main/robot3.jpg\",\n", + " \"https://raw.githubusercontent.com/silvanocerza/robots/main/robot4.jpg\"\n", + "]\n", + "images = [\n", + " ByteStream(data=requests.get(url).content, mime_type=\"image/jpeg\")\n", + " for url in URLS\n", + "]\n", + "\n", + "result = text_image_generator.run(parts = [\"What can you tell me about this robots?\", *images])\n", + "for answer in result[\"answers\"]:\n", + " print(answer)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from google.ai.generativelanguage import FunctionDeclaration, Tool\n", + "\n", + "def get_current_weather(location: str, unit: str = \"celsius\"):\n", + " return {\"weather\": \"sunny\", \"temperature\": 21.8, \"unit\": unit}\n", + "\n", + "get_current_weather_func = FunctionDeclaration(name=\"get_current_weather\",\n", + " description= \"Get the current weather in a given location\",\n", + " parameters= {\n", + " \"type_\": \"OBJECT\",\n", + " \"properties\": {\n", + " \"location\": {\"type_\": \"STRING\", \"description\": \"The city and state, e.g. San Francisco, CA\"},\n", + " \"unit\": {\n", + " \"type_\": \"STRING\",\n", + " \"enum\": [\n", + " \"celsius\",\n", + " \"fahrenheit\",\n", + " ],\n", + " },\n", + " },\n", + " \"required\": [\"location\"],\n", + " }\n", + " )\n", + "\n", + "tool = Tool(function_declarations=[get_current_weather_func])" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/tuanacelik/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" + ] + } + ], + "source": [ + "from gemini_haystack.generators.chat.gemini import GeminiChatGenerator\n", + "\n", + "chat_generator = GeminiChatGenerator(api_key=makersuite_key, project_id=\"deepset-general\", model=\"gemini-pro\", tools=[tool])" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "ename": "InternalServerError", + "evalue": "500 An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31m_InactiveRpcError\u001b[0m Traceback (most recent call last)", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/grpc_helpers.py:79\u001b[0m, in \u001b[0;36m_wrap_unary_errors..error_remapped_callable\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 78\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m---> 79\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcallable_\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 80\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m grpc\u001b[38;5;241m.\u001b[39mRpcError \u001b[38;5;28;01mas\u001b[39;00m exc:\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/grpc/_channel.py:1160\u001b[0m, in \u001b[0;36m_UnaryUnaryMultiCallable.__call__\u001b[0;34m(self, request, timeout, metadata, credentials, wait_for_ready, compression)\u001b[0m\n\u001b[1;32m 1154\u001b[0m (\n\u001b[1;32m 1155\u001b[0m state,\n\u001b[1;32m 1156\u001b[0m call,\n\u001b[1;32m 1157\u001b[0m ) \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_blocking(\n\u001b[1;32m 1158\u001b[0m request, timeout, metadata, credentials, wait_for_ready, compression\n\u001b[1;32m 1159\u001b[0m )\n\u001b[0;32m-> 1160\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_end_unary_response_blocking\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcall\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/grpc/_channel.py:1003\u001b[0m, in \u001b[0;36m_end_unary_response_blocking\u001b[0;34m(state, call, with_call, deadline)\u001b[0m\n\u001b[1;32m 1002\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1003\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m _InactiveRpcError(state)\n", + "\u001b[0;31m_InactiveRpcError\u001b[0m: <_InactiveRpcError of RPC that terminated with:\n\tstatus = StatusCode.INTERNAL\n\tdetails = \"An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting\"\n\tdebug_error_string = \"UNKNOWN:Error received from peer ipv4:142.251.35.170:443 {created_time:\"2023-12-29T16:54:45.855416+03:00\", grpc_status:13, grpc_message:\"An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting\"}\"\n>", + "\nThe above exception was the direct cause of the following exception:\n", + "\u001b[0;31mInternalServerError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[4], line 4\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mhaystack\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mdataclasses\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ChatMessage\n\u001b[1;32m 3\u001b[0m messages \u001b[38;5;241m=\u001b[39m [ChatMessage\u001b[38;5;241m.\u001b[39mfrom_user(content \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mWhat is the temperature in celsius in Berlin?\u001b[39m\u001b[38;5;124m\"\u001b[39m)]\n\u001b[0;32m----> 4\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43mchat_generator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmessages\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmessages\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5\u001b[0m res[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mreplies\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/gemini_haystack/generators/chat/gemini.py:149\u001b[0m, in \u001b[0;36mGeminiChatGenerator.run\u001b[0;34m(self, messages)\u001b[0m\n\u001b[1;32m 146\u001b[0m session \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_model\u001b[38;5;241m.\u001b[39mstart_chat(history\u001b[38;5;241m=\u001b[39mhistory)\n\u001b[1;32m 148\u001b[0m new_message \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_message_to_part(messages[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m])\n\u001b[0;32m--> 149\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43msession\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend_message\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 150\u001b[0m \u001b[43m \u001b[49m\u001b[43mcontent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnew_message\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 151\u001b[0m \u001b[43m \u001b[49m\u001b[43mgeneration_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_generation_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 152\u001b[0m \u001b[43m \u001b[49m\u001b[43msafety_settings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_safety_settings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 153\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 155\u001b[0m replies \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 156\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m candidate \u001b[38;5;129;01min\u001b[39;00m res\u001b[38;5;241m.\u001b[39mcandidates:\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/generativeai/generative_models.py:367\u001b[0m, in \u001b[0;36mChatSession.send_message\u001b[0;34m(self, content, generation_config, safety_settings, stream, **kwargs)\u001b[0m\n\u001b[1;32m 365\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m generation_config\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcandidate_count\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 366\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCan\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt chat with `candidate_count > 1`\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 367\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate_content\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 368\u001b[0m \u001b[43m \u001b[49m\u001b[43mcontents\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhistory\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 369\u001b[0m \u001b[43m \u001b[49m\u001b[43mgeneration_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgeneration_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 370\u001b[0m \u001b[43m \u001b[49m\u001b[43msafety_settings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msafety_settings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 371\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 372\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 373\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 375\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m response\u001b[38;5;241m.\u001b[39mprompt_feedback\u001b[38;5;241m.\u001b[39mblock_reason:\n\u001b[1;32m 376\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m generation_types\u001b[38;5;241m.\u001b[39mBlockedPromptException(response\u001b[38;5;241m.\u001b[39mprompt_feedback)\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/generativeai/generative_models.py:248\u001b[0m, in \u001b[0;36mGenerativeModel.generate_content\u001b[0;34m(self, contents, generation_config, safety_settings, stream, **kwargs)\u001b[0m\n\u001b[1;32m 246\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m generation_types\u001b[38;5;241m.\u001b[39mGenerateContentResponse\u001b[38;5;241m.\u001b[39mfrom_iterator(iterator)\n\u001b[1;32m 247\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 248\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_client\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate_content\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m generation_types\u001b[38;5;241m.\u001b[39mGenerateContentResponse\u001b[38;5;241m.\u001b[39mfrom_response(response)\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/ai/generativelanguage_v1beta/services/generative_service/client.py:566\u001b[0m, in \u001b[0;36mGenerativeServiceClient.generate_content\u001b[0;34m(self, request, model, contents, retry, timeout, metadata)\u001b[0m\n\u001b[1;32m 561\u001b[0m metadata \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mtuple\u001b[39m(metadata) \u001b[38;5;241m+\u001b[39m (\n\u001b[1;32m 562\u001b[0m gapic_v1\u001b[38;5;241m.\u001b[39mrouting_header\u001b[38;5;241m.\u001b[39mto_grpc_metadata(((\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel\u001b[39m\u001b[38;5;124m\"\u001b[39m, request\u001b[38;5;241m.\u001b[39mmodel),)),\n\u001b[1;32m 563\u001b[0m )\n\u001b[1;32m 565\u001b[0m \u001b[38;5;66;03m# Send the request.\u001b[39;00m\n\u001b[0;32m--> 566\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43mrpc\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 567\u001b[0m \u001b[43m \u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 568\u001b[0m \u001b[43m \u001b[49m\u001b[43mretry\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mretry\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 569\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 570\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmetadata\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 571\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 573\u001b[0m \u001b[38;5;66;03m# Done; return the response.\u001b[39;00m\n\u001b[1;32m 574\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/gapic_v1/method.py:131\u001b[0m, in \u001b[0;36m_GapicCallable.__call__\u001b[0;34m(self, timeout, retry, compression, *args, **kwargs)\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compression \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 129\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcompression\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m compression\n\u001b[0;32m--> 131\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mwrapped_func\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/retry.py:372\u001b[0m, in \u001b[0;36mRetry.__call__..retry_wrapped_func\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 368\u001b[0m target \u001b[38;5;241m=\u001b[39m functools\u001b[38;5;241m.\u001b[39mpartial(func, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 369\u001b[0m sleep_generator \u001b[38;5;241m=\u001b[39m exponential_sleep_generator(\n\u001b[1;32m 370\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_initial, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_maximum, multiplier\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_multiplier\n\u001b[1;32m 371\u001b[0m )\n\u001b[0;32m--> 372\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mretry_target\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 373\u001b[0m \u001b[43m \u001b[49m\u001b[43mtarget\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 374\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_predicate\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 375\u001b[0m \u001b[43m \u001b[49m\u001b[43msleep_generator\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 376\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 377\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_error\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_error\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 378\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/retry.py:207\u001b[0m, in \u001b[0;36mretry_target\u001b[0;34m(target, predicate, sleep_generator, timeout, on_error, **kwargs)\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m sleep \u001b[38;5;129;01min\u001b[39;00m sleep_generator:\n\u001b[1;32m 206\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 207\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mtarget\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 208\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m inspect\u001b[38;5;241m.\u001b[39misawaitable(result):\n\u001b[1;32m 209\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(_ASYNC_RETRY_WARNING)\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/timeout.py:120\u001b[0m, in \u001b[0;36mTimeToDeadlineTimeout.__call__..func_with_timeout\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[38;5;66;03m# Avoid setting negative timeout\u001b[39;00m\n\u001b[1;32m 118\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtimeout\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mmax\u001b[39m(\u001b[38;5;241m0\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_timeout \u001b[38;5;241m-\u001b[39m time_since_first_attempt)\n\u001b[0;32m--> 120\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", + "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/grpc_helpers.py:81\u001b[0m, in \u001b[0;36m_wrap_unary_errors..error_remapped_callable\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 79\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m callable_(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 80\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m grpc\u001b[38;5;241m.\u001b[39mRpcError \u001b[38;5;28;01mas\u001b[39;00m exc:\n\u001b[0;32m---> 81\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m exceptions\u001b[38;5;241m.\u001b[39mfrom_grpc_error(exc) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mexc\u001b[39;00m\n", + "\u001b[0;31mInternalServerError\u001b[0m: 500 An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting" + ] + } + ], + "source": [ + "from haystack.dataclasses import ChatMessage\n", + "\n", + "messages = [ChatMessage.from_user(content = \"What is the temperature in celsius in Berlin?\")]\n", + "res = chat_generator.run(messages=messages)\n", + "res[\"replies\"]" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "gemini", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/integrations/gemini-haystack/pyproject.toml b/integrations/gemini-haystack/pyproject.toml new file mode 100644 index 000000000..1b74691fa --- /dev/null +++ b/integrations/gemini-haystack/pyproject.toml @@ -0,0 +1,165 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + +[project] +name = "gemini-haystack" +dynamic = ["version"] +description = 'Use models like Gemini via Makersuite' +readme = "README.md" +requires-python = ">=3.7" +license = "Apache-2.0" +keywords = [] +authors = [ + { name = "deepset GmbH", email = "info@deepset.ai" }, +] +classifiers = [ + "Development Status :: 4 - Beta", + "Programming Language :: Python", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dependencies = [ + "haystack-ai", + "google-generativeai>=0.3.1" +] + +[project.urls] +Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/gemini-haystack#readme" +Issues = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/gemini-haystack/issues" +Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/gemini-haystack" + +[tool.hatch.version] +source = "vcs" +tag-pattern = 'integrations\/gemini-v(?P.*)' + +[tool.hatch.version.raw-options] +root = "../.." +git_describe_command = 'git describe --tags --match="integrations/gemini-v[0-9]*"' + +[tool.hatch.envs.default] +dependencies = [ + "coverage[toml]>=6.5", + "pytest", +] +[tool.hatch.envs.default.scripts] +test = "pytest {args:tests}" +test-cov = "coverage run -m pytest {args:tests}" +cov-report = [ + "- coverage combine", + "coverage report", +] +cov = [ + "test-cov", + "cov-report", +] + +[[tool.hatch.envs.all.matrix]] +python = ["3.7", "3.8", "3.9", "3.10", "3.11"] + +[tool.hatch.envs.lint] +detached = true +dependencies = [ + "black>=23.1.0", + "mypy>=1.0.0", + "ruff>=0.0.243", +] +[tool.hatch.envs.lint.scripts] +typing = "mypy --install-types --non-interactive {args:src/gemini_haystack tests}" +style = [ + "ruff {args:.}", + "black --check --diff {args:.}", +] +fmt = [ + "black {args:.}", + "ruff --fix {args:.}", + "style", +] +all = [ + "style", + "typing", +] + +[tool.black] +target-version = ["py37"] +line-length = 120 +skip-string-normalization = true + +[tool.ruff] +target-version = "py37" +line-length = 120 +select = [ + "A", + "ARG", + "B", + "C", + "DTZ", + "E", + "EM", + "F", + "FBT", + "I", + "ICN", + "ISC", + "N", + "PLC", + "PLE", + "PLR", + "PLW", + "Q", + "RUF", + "S", + "T", + "TID", + "UP", + "W", + "YTT", +] +ignore = [ + # Allow non-abstract empty methods in abstract base classes + "B027", + # Allow boolean positional values in function calls, like `dict.get(... True)` + "FBT003", + # Ignore checks for possible passwords + "S105", "S106", "S107", + # Ignore complexity + "C901", "PLR0911", "PLR0912", "PLR0913", "PLR0915", +] +unfixable = [ + # Don't touch unused imports + "F401", +] + +[tool.ruff.isort] +known-first-party = ["gemini_haystack"] + +[tool.ruff.flake8-tidy-imports] +ban-relative-imports = "all" + +[tool.ruff.per-file-ignores] +# Tests can use magic values, assertions, and relative imports +"tests/**/*" = ["PLR2004", "S101", "TID252"] + +[tool.coverage.run] +source_pkgs = ["gemini_haystack", "tests"] +branch = true +parallel = true +omit = [ + "src/gemini_haystack/__about__.py", +] + +[tool.coverage.paths] +gemini_haystack = ["src/gemini_haystack", "*/gemini-haystack/src/gemini_haystack"] +tests = ["tests", "*/gemini-haystack/tests"] + +[tool.coverage.report] +exclude_lines = [ + "no cov", + "if __name__ == .__main__.:", + "if TYPE_CHECKING:", +] diff --git a/integrations/gemini-haystack/src/gemini_haystack/__init__.py b/integrations/gemini-haystack/src/gemini_haystack/__init__.py new file mode 100644 index 000000000..e873bc332 --- /dev/null +++ b/integrations/gemini-haystack/src/gemini_haystack/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/__init__.py b/integrations/gemini-haystack/src/gemini_haystack/generators/__init__.py new file mode 100644 index 000000000..49fd5f144 --- /dev/null +++ b/integrations/gemini-haystack/src/gemini_haystack/generators/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 \ No newline at end of file diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py b/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py new file mode 100644 index 000000000..54c6abc8b --- /dev/null +++ b/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py @@ -0,0 +1,170 @@ +import logging +from typing import Any, Dict, List, Optional, Union + +import google.generativeai as genai +from haystack.core.component import component +from haystack.core.component.types import Variadic +from haystack.core.serialization import default_from_dict, default_to_dict +from haystack.dataclasses.byte_stream import ByteStream +from google.generativeai import GenerationConfig, GenerativeModel +from google.generativeai.types import HarmBlockThreshold, HarmCategory +from haystack.dataclasses.chat_message import ChatMessage, ChatRole +from google.ai.generativelanguage import FunctionDeclaration, Tool, Part, Content + +logger = logging.getLogger(__name__) + + +@component +class GeminiChatGenerator: + def __init__( + self, + *, + api_key: str, + model: str = "gemini-pro-vision", + project_id: str, + generation_config: Optional[Union[GenerationConfig, Dict[str, Any]]] = None, + safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None, + tools: Optional[List[Tool]] = None, + ): + """ + Multi modal generator using Gemini model via Makersuite + """ + + # Login to GCP. This will fail if user has not set up their gcloud SDK + genai.configure(api_key=api_key) + + self._model_name = model + self._project_id = project_id + + self._generation_config = generation_config + self._safety_settings = safety_settings + self._tools = tools + self._model = GenerativeModel(self._model_name, tools=self._tools) + + def _function_to_dict(self, function: FunctionDeclaration) -> Dict[str, Any]: + return { + "name": function.name, + "parameters": function.parameters, + "description": function.description, + } + + def _tool_to_dict(self, tool: Tool) -> Dict[str, Any]: + return { + "function_declarations": [self._function_to_dict(f) for f in tool.function_declarations], + } + + def _generation_config_to_dict(self, config: Union[GenerationConfig, Dict[str, Any]]) -> Dict[str, Any]: + if isinstance(config, dict): + return config + return { + "temperature": config.temperature, + "top_p": config.top_p, + "top_k": config.top_k, + "candidate_count": config.candidate_count, + "max_output_tokens": config.max_output_tokens, + "stop_sequences": config.stop_sequences, + } + + def to_dict(self) -> Dict[str, Any]: + data = default_to_dict( + self, + model=self._model_name, + project_id=self._project_id, + generation_config=self._generation_config, + safety_settings=self._safety_settings, + tools=self._tools, + ) + if (tools := data["init_parameters"].get("tools")) is not None: + data["init_parameters"]["tools"] = [self._tool_to_dict(t) for t in tools] + if (generation_config := data["init_parameters"].get("generation_config")) is not None: + data["init_parameters"]["generation_config"] = self._generation_config_to_dict(generation_config) + return data + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "GeminiGenerator": + if (tools := data["init_parameters"].get("tools")) is not None: + data["init_parameters"]["tools"] = [Tool(t) for t in tools] + if (generation_config := data["init_parameters"].get("generation_config")) is not None: + data["init_parameters"]["generation_config"] = GenerationConfig.from_dict(generation_config) + + return default_from_dict(cls, data) + + def _convert_part(self, part: Union[str, ByteStream, Part]) -> Part: + if isinstance(part, str): + converted_part = Part() + converted_part.text = part + return converted_part + elif isinstance(part, ByteStream): + converted_part = Part() + converted_part.inline_data.data = part.data + converted_part.inline_data.mime_type = part.mime_type + return converted_part + elif isinstance(part, Part): + return part + else: + msg = f"Unsupported type {type(part)} for part {part}" + raise ValueError(msg) + + def _message_to_part(self, message: ChatMessage) -> Part: + if message.role == ChatRole.SYSTEM and message.name: + p = Part() + p.function_call.name = message.name + p.function_call.args = {} + for k, v in message.content.items(): + p.function_call.args[k] = v + return p + elif message.role == ChatRole.SYSTEM: + return Part().text(message.content) + elif message.role == ChatRole.FUNCTION: + return Part().function_response(name=message.name, response=message.content) + elif message.role == ChatRole.USER: + return self._convert_part(message.content) + + def _message_to_content(self, message: ChatMessage) -> Content: + if message.role == ChatRole.SYSTEM and message.name: + part = Part() + part.function_call.name = message.name + part.function_call.args = {} + for k, v in message.content.items(): + part.function_call.args[k] = v + elif message.role == ChatRole.SYSTEM: + part = Part().text=message.content + elif message.role == ChatRole.FUNCTION: + part = Part().function_response(name=message.name, response=message.content) + elif message.role == ChatRole.USER: + part = self._convert_part(message.content) + else: + msg = f"Unsupported message role {message.role}" + raise ValueError(msg) + role = "user" if message.role in [ChatRole.USER, ChatRole.FUNCTION] else "model" + return Content(parts=[part], role=role) + + + @component.output_types(replies=List[ChatMessage]) + def run(self, messages: List[ChatMessage]): + history = [self._message_to_content(m) for m in messages[:-1]] + session = self._model.start_chat(history=history) + + new_message = self._message_to_part(messages[-1]) + res = session.send_message( + content=new_message, + generation_config=self._generation_config, + safety_settings=self._safety_settings, + ) + + replies = [] + for candidate in res.candidates: + for part in candidate.content.parts: + if part.text != "": + replies.append(ChatMessage.from_system(part.text)) + elif part.function_call is not None: + replies.append( + ChatMessage( + content=dict(part.function_call.args.items()), + role=ChatRole.SYSTEM, + name=part.function_call.name, + ) + ) + + return {"replies": replies} + diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py b/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py new file mode 100644 index 000000000..f5e7ca801 --- /dev/null +++ b/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py @@ -0,0 +1,130 @@ +import logging +from typing import Any, Dict, List, Optional, Union + +import google.generativeai as genai +from haystack.core.component import component +from haystack.core.component.types import Variadic +from haystack.core.serialization import default_from_dict, default_to_dict +from haystack.dataclasses.byte_stream import ByteStream +from google.generativeai import GenerationConfig, GenerativeModel +from google.generativeai.types import HarmBlockThreshold, HarmCategory +from google.ai.generativelanguage import FunctionDeclaration, Tool, Part, Content + +logger = logging.getLogger(__name__) + + +@component +class GeminiGenerator: + def __init__( + self, + *, + api_key: str, + model: str = "gemini-pro-vision", + project_id: str, + generation_config: Optional[Union[GenerationConfig, Dict[str, Any]]] = None, + safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None, + tools: Optional[List[Tool]] = None, + ): + """ + Multi modal generator using Gemini model via Makersuite + """ + + # Login to GCP. This will fail if user has not set up their gcloud SDK + genai.configure(api_key=api_key) + + self._model_name = model + self._project_id = project_id + + self._generation_config = generation_config + self._safety_settings = safety_settings + self._tools = tools + self._model = GenerativeModel(self._model_name, tools=self._tools) + + def _function_to_dict(self, function: FunctionDeclaration) -> Dict[str, Any]: + return { + "name": function.name, + "parameters": function.parameters, + "description": function.description, + } + + def _tool_to_dict(self, tool: Tool) -> Dict[str, Any]: + return { + "function_declarations": [self._function_to_dict(f) for f in tool.function_declarations], + } + + def _generation_config_to_dict(self, config: Union[GenerationConfig, Dict[str, Any]]) -> Dict[str, Any]: + if isinstance(config, dict): + return config + return { + "temperature": config.temperature, + "top_p": config.top_p, + "top_k": config.top_k, + "candidate_count": config.candidate_count, + "max_output_tokens": config.max_output_tokens, + "stop_sequences": config.stop_sequences, + } + + def to_dict(self) -> Dict[str, Any]: + data = default_to_dict( + self, + model=self._model_name, + project_id=self._project_id, + generation_config=self._generation_config, + safety_settings=self._safety_settings, + tools=self._tools, + ) + if (tools := data["init_parameters"].get("tools")) is not None: + data["init_parameters"]["tools"] = [self._tool_to_dict(t) for t in tools] + if (generation_config := data["init_parameters"].get("generation_config")) is not None: + data["init_parameters"]["generation_config"] = self._generation_config_to_dict(generation_config) + return data + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> "GeminiGenerator": + if (tools := data["init_parameters"].get("tools")) is not None: + data["init_parameters"]["tools"] = [Tool(t) for t in tools] + if (generation_config := data["init_parameters"].get("generation_config")) is not None: + data["init_parameters"]["generation_config"] = GenerationConfig.from_dict(generation_config) + + return default_from_dict(cls, data) + + def _convert_part(self, part: Union[str, ByteStream, Part]) -> Part: + if isinstance(part, str): + converted_part = Part() + converted_part.text = part + return converted_part + elif isinstance(part, ByteStream): + converted_part = Part() + converted_part.inline_data.data = part.data + converted_part.inline_data.mime_type = part.mime_type + return converted_part + elif isinstance(part, Part): + return part + else: + msg = f"Unsupported type {type(part)} for part {part}" + raise ValueError(msg) + + @component.output_types(answers=List[Union[str, Dict[str, str]]]) + def run(self, parts: Variadic[Union[str, ByteStream, Part]]): + converted_parts = [self._convert_part(p) for p in parts] + + contents = [Content(parts=converted_parts, role="user")] + res = self._model.generate_content( + contents=contents, + generation_config=self._generation_config, + safety_settings=self._safety_settings, + ) + self._model.start_chat() + answers = [] + for candidate in res.candidates: + for part in candidate.content.parts: + if part.text != "": + answers.append(part.text) + elif part.function_call is not None: + function_call = { + "name": part.function_call.name, + "args": dict(part.function_call.args.items()), + } + answers.append(function_call) + + return {"answers": answers} diff --git a/integrations/gemini-haystack/tests/__init__.py b/integrations/gemini-haystack/tests/__init__.py new file mode 100644 index 000000000..e873bc332 --- /dev/null +++ b/integrations/gemini-haystack/tests/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: 2023-present deepset GmbH +# +# SPDX-License-Identifier: Apache-2.0 From d75264b6f43140390cbaa41c441276071d366e21 Mon Sep 17 00:00:00 2001 From: Tuana Celik Date: Fri, 29 Dec 2023 17:17:09 +0300 Subject: [PATCH 02/11] updating function response --- .../gemini_haystack/generators/chat/gemini.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py b/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py index 54c6abc8b..59d9b531c 100644 --- a/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py +++ b/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py @@ -114,9 +114,14 @@ def _message_to_part(self, message: ChatMessage) -> Part: p.function_call.args[k] = v return p elif message.role == ChatRole.SYSTEM: - return Part().text(message.content) + p = Part() + p.text = message.content + return p elif message.role == ChatRole.FUNCTION: - return Part().function_response(name=message.name, response=message.content) + p = Part() + p.function_response.name = message.name + p.function_response.response = message.content + return p elif message.role == ChatRole.USER: return self._convert_part(message.content) @@ -128,9 +133,14 @@ def _message_to_content(self, message: ChatMessage) -> Content: for k, v in message.content.items(): part.function_call.args[k] = v elif message.role == ChatRole.SYSTEM: - part = Part().text=message.content + part = Part() + part.text = message.content + return part elif message.role == ChatRole.FUNCTION: - part = Part().function_response(name=message.name, response=message.content) + part = Part() + part.function_response.name = message.name + part.function_response.response = message.content + return part elif message.role == ChatRole.USER: part = self._convert_part(message.content) else: From d7f3e0ba2858f78c90fd82bba5cbab5997e7f0c7 Mon Sep 17 00:00:00 2001 From: Silvano Cerza Date: Tue, 2 Jan 2024 17:08:58 +0100 Subject: [PATCH 03/11] Rename Gemini generators --- .../src/gemini_haystack/generators/chat/gemini.py | 4 ++-- .../gemini-haystack/src/gemini_haystack/generators/gemini.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py b/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py index 59d9b531c..df1d15f10 100644 --- a/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py +++ b/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py @@ -15,7 +15,7 @@ @component -class GeminiChatGenerator: +class GoogleAIGeminiChatGenerator: def __init__( self, *, @@ -81,7 +81,7 @@ def to_dict(self) -> Dict[str, Any]: return data @classmethod - def from_dict(cls, data: Dict[str, Any]) -> "GeminiGenerator": + def from_dict(cls, data: Dict[str, Any]) -> "GoogleAIGeminiChatGenerator": if (tools := data["init_parameters"].get("tools")) is not None: data["init_parameters"]["tools"] = [Tool(t) for t in tools] if (generation_config := data["init_parameters"].get("generation_config")) is not None: diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py b/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py index f5e7ca801..43999e271 100644 --- a/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py +++ b/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py @@ -14,7 +14,7 @@ @component -class GeminiGenerator: +class GoogleAIGeminiGenerator: def __init__( self, *, @@ -80,7 +80,7 @@ def to_dict(self) -> Dict[str, Any]: return data @classmethod - def from_dict(cls, data: Dict[str, Any]) -> "GeminiGenerator": + def from_dict(cls, data: Dict[str, Any]) -> "GoogleAIGeminiGenerator": if (tools := data["init_parameters"].get("tools")) is not None: data["init_parameters"]["tools"] = [Tool(t) for t in tools] if (generation_config := data["init_parameters"].get("generation_config")) is not None: From 2020498e514f6040950098779412e82a1cdd1f22 Mon Sep 17 00:00:00 2001 From: Silvano Cerza Date: Tue, 2 Jan 2024 17:09:37 +0100 Subject: [PATCH 04/11] Enhance GoogleAIGeminiGenerator serialization and removed unecessary parameters --- .../src/gemini_haystack/generators/gemini.py | 34 +++++++------------ 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py b/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py index 43999e271..c32698ca8 100644 --- a/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py +++ b/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py @@ -8,7 +8,7 @@ from haystack.dataclasses.byte_stream import ByteStream from google.generativeai import GenerationConfig, GenerativeModel from google.generativeai.types import HarmBlockThreshold, HarmCategory -from google.ai.generativelanguage import FunctionDeclaration, Tool, Part, Content +from google.ai.generativelanguage import Tool, Part, Content logger = logging.getLogger(__name__) @@ -18,9 +18,8 @@ class GoogleAIGeminiGenerator: def __init__( self, *, - api_key: str, + api_key: Optional[str] = None, model: str = "gemini-pro-vision", - project_id: str, generation_config: Optional[Union[GenerationConfig, Dict[str, Any]]] = None, safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None, tools: Optional[List[Tool]] = None, @@ -29,29 +28,15 @@ def __init__( Multi modal generator using Gemini model via Makersuite """ - # Login to GCP. This will fail if user has not set up their gcloud SDK + # Authenticate, if api_key is None it will use the GOOGLE_API_KEY env variable genai.configure(api_key=api_key) self._model_name = model - self._project_id = project_id - self._generation_config = generation_config self._safety_settings = safety_settings self._tools = tools self._model = GenerativeModel(self._model_name, tools=self._tools) - def _function_to_dict(self, function: FunctionDeclaration) -> Dict[str, Any]: - return { - "name": function.name, - "parameters": function.parameters, - "description": function.description, - } - - def _tool_to_dict(self, tool: Tool) -> Dict[str, Any]: - return { - "function_declarations": [self._function_to_dict(f) for f in tool.function_declarations], - } - def _generation_config_to_dict(self, config: Union[GenerationConfig, Dict[str, Any]]) -> Dict[str, Any]: if isinstance(config, dict): return config @@ -68,23 +53,28 @@ def to_dict(self) -> Dict[str, Any]: data = default_to_dict( self, model=self._model_name, - project_id=self._project_id, generation_config=self._generation_config, safety_settings=self._safety_settings, tools=self._tools, ) if (tools := data["init_parameters"].get("tools")) is not None: - data["init_parameters"]["tools"] = [self._tool_to_dict(t) for t in tools] + data["init_parameters"]["tools"] = [Tool.serialize(t) for t in tools] if (generation_config := data["init_parameters"].get("generation_config")) is not None: data["init_parameters"]["generation_config"] = self._generation_config_to_dict(generation_config) + if (safety_settings := data["init_parameters"].get("safety_settings")) is not None: + data["init_parameters"]["safety_settings"] = {k.value: v.value for k, v in safety_settings.items()} return data @classmethod def from_dict(cls, data: Dict[str, Any]) -> "GoogleAIGeminiGenerator": if (tools := data["init_parameters"].get("tools")) is not None: - data["init_parameters"]["tools"] = [Tool(t) for t in tools] + data["init_parameters"]["tools"] = [Tool.deserialize(t) for t in tools] if (generation_config := data["init_parameters"].get("generation_config")) is not None: - data["init_parameters"]["generation_config"] = GenerationConfig.from_dict(generation_config) + data["init_parameters"]["generation_config"] = GenerationConfig(**generation_config) + if (safety_settings := data["init_parameters"].get("safety_settings")) is not None: + data["init_parameters"]["safety_settings"] = { + HarmCategory(k): HarmBlockThreshold(v) for k, v in safety_settings.items() + } return default_from_dict(cls, data) From b2d5269958e9f253e1117d438e300761cc168039 Mon Sep 17 00:00:00 2001 From: Silvano Cerza Date: Tue, 2 Jan 2024 18:50:04 +0100 Subject: [PATCH 05/11] Enhance GoogleAIGeminiChatGenerator serialization and removed unecessary parameters --- .../gemini_haystack/generators/chat/gemini.py | 42 +++++++------------ 1 file changed, 14 insertions(+), 28 deletions(-) diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py b/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py index df1d15f10..4e71b784d 100644 --- a/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py +++ b/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py @@ -3,13 +3,12 @@ import google.generativeai as genai from haystack.core.component import component -from haystack.core.component.types import Variadic from haystack.core.serialization import default_from_dict, default_to_dict from haystack.dataclasses.byte_stream import ByteStream +from haystack.dataclasses.chat_message import ChatMessage, ChatRole from google.generativeai import GenerationConfig, GenerativeModel from google.generativeai.types import HarmBlockThreshold, HarmCategory -from haystack.dataclasses.chat_message import ChatMessage, ChatRole -from google.ai.generativelanguage import FunctionDeclaration, Tool, Part, Content +from google.ai.generativelanguage import Tool, Part, Content logger = logging.getLogger(__name__) @@ -19,9 +18,8 @@ class GoogleAIGeminiChatGenerator: def __init__( self, *, - api_key: str, + api_key: Optional[str] = None, model: str = "gemini-pro-vision", - project_id: str, generation_config: Optional[Union[GenerationConfig, Dict[str, Any]]] = None, safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None, tools: Optional[List[Tool]] = None, @@ -30,29 +28,15 @@ def __init__( Multi modal generator using Gemini model via Makersuite """ - # Login to GCP. This will fail if user has not set up their gcloud SDK + # Authenticate, if api_key is None it will use the GOOGLE_API_KEY env variable genai.configure(api_key=api_key) self._model_name = model - self._project_id = project_id - self._generation_config = generation_config self._safety_settings = safety_settings self._tools = tools self._model = GenerativeModel(self._model_name, tools=self._tools) - def _function_to_dict(self, function: FunctionDeclaration) -> Dict[str, Any]: - return { - "name": function.name, - "parameters": function.parameters, - "description": function.description, - } - - def _tool_to_dict(self, tool: Tool) -> Dict[str, Any]: - return { - "function_declarations": [self._function_to_dict(f) for f in tool.function_declarations], - } - def _generation_config_to_dict(self, config: Union[GenerationConfig, Dict[str, Any]]) -> Dict[str, Any]: if isinstance(config, dict): return config @@ -69,24 +53,28 @@ def to_dict(self) -> Dict[str, Any]: data = default_to_dict( self, model=self._model_name, - project_id=self._project_id, generation_config=self._generation_config, safety_settings=self._safety_settings, tools=self._tools, ) if (tools := data["init_parameters"].get("tools")) is not None: - data["init_parameters"]["tools"] = [self._tool_to_dict(t) for t in tools] + data["init_parameters"]["tools"] = [Tool.serialize(t) for t in tools] if (generation_config := data["init_parameters"].get("generation_config")) is not None: data["init_parameters"]["generation_config"] = self._generation_config_to_dict(generation_config) + if (safety_settings := data["init_parameters"].get("safety_settings")) is not None: + data["init_parameters"]["safety_settings"] = {k.value: v.value for k, v in safety_settings.items()} return data @classmethod def from_dict(cls, data: Dict[str, Any]) -> "GoogleAIGeminiChatGenerator": if (tools := data["init_parameters"].get("tools")) is not None: - data["init_parameters"]["tools"] = [Tool(t) for t in tools] + data["init_parameters"]["tools"] = [Tool.deserialize(t) for t in tools] if (generation_config := data["init_parameters"].get("generation_config")) is not None: - data["init_parameters"]["generation_config"] = GenerationConfig.from_dict(generation_config) - + data["init_parameters"]["generation_config"] = GenerationConfig(**generation_config) + if (safety_settings := data["init_parameters"].get("safety_settings")) is not None: + data["init_parameters"]["safety_settings"] = { + HarmCategory(k): HarmBlockThreshold(v) for k, v in safety_settings.items() + } return default_from_dict(cls, data) def _convert_part(self, part: Union[str, ByteStream, Part]) -> Part: @@ -104,7 +92,7 @@ def _convert_part(self, part: Union[str, ByteStream, Part]) -> Part: else: msg = f"Unsupported type {type(part)} for part {part}" raise ValueError(msg) - + def _message_to_part(self, message: ChatMessage) -> Part: if message.role == ChatRole.SYSTEM and message.name: p = Part() @@ -149,7 +137,6 @@ def _message_to_content(self, message: ChatMessage) -> Content: role = "user" if message.role in [ChatRole.USER, ChatRole.FUNCTION] else "model" return Content(parts=[part], role=role) - @component.output_types(replies=List[ChatMessage]) def run(self, messages: List[ChatMessage]): history = [self._message_to_content(m) for m in messages[:-1]] @@ -177,4 +164,3 @@ def run(self, messages: List[ChatMessage]): ) return {"replies": replies} - From 6f640adf130d15c0830102055b4934d148e31e2b Mon Sep 17 00:00:00 2001 From: Silvano Cerza Date: Tue, 2 Jan 2024 18:50:12 +0100 Subject: [PATCH 06/11] Add tests --- .../tests/generators/chat/test_chat_gemini.py | 211 ++++++++++++++++++ .../tests/generators/test_gemini.py | 180 +++++++++++++++ 2 files changed, 391 insertions(+) create mode 100644 integrations/gemini-haystack/tests/generators/chat/test_chat_gemini.py create mode 100644 integrations/gemini-haystack/tests/generators/test_gemini.py diff --git a/integrations/gemini-haystack/tests/generators/chat/test_chat_gemini.py b/integrations/gemini-haystack/tests/generators/chat/test_chat_gemini.py new file mode 100644 index 000000000..c3a2d259b --- /dev/null +++ b/integrations/gemini-haystack/tests/generators/chat/test_chat_gemini.py @@ -0,0 +1,211 @@ +import os +from unittest.mock import patch + +from haystack.dataclasses.chat_message import ChatMessage +from google.generativeai import GenerationConfig, GenerativeModel +from google.generativeai.types import HarmBlockThreshold, HarmCategory +from google.ai.generativelanguage import FunctionDeclaration, Tool +import pytest + +from gemini_haystack.generators.chat.gemini import GoogleAIGeminiChatGenerator + + +def test_init(): + generation_config = GenerationConfig( + candidate_count=1, + stop_sequences=["stop"], + max_output_tokens=10, + temperature=0.5, + top_p=0.5, + top_k=0.5, + ) + safety_settings = {HarmCategory.HARM_CATEGORY_DANGEROUS: HarmBlockThreshold.BLOCK_ONLY_HIGH} + get_current_weather_func = FunctionDeclaration( + name="get_current_weather", + description="Get the current weather in a given location", + parameters={ + "type_": "OBJECT", + "properties": { + "location": {"type_": "STRING", "description": "The city and state, e.g. San Francisco, CA"}, + "unit": { + "type_": "STRING", + "enum": [ + "celsius", + "fahrenheit", + ], + }, + }, + "required": ["location"], + }, + ) + + tool = Tool(function_declarations=[get_current_weather_func]) + with patch("gemini_haystack.generators.chat.gemini.genai.configure") as mock_genai_configure: + gemini = GoogleAIGeminiChatGenerator( + generation_config=generation_config, + safety_settings=safety_settings, + tools=[tool], + ) + mock_genai_configure.assert_called_once_with(api_key=None) + assert gemini._model_name == "gemini-pro-vision" + assert gemini._generation_config == generation_config + assert gemini._safety_settings == safety_settings + assert gemini._tools == [tool] + assert isinstance(gemini._model, GenerativeModel) + + +def test_to_dict(): + generation_config = GenerationConfig( + candidate_count=1, + stop_sequences=["stop"], + max_output_tokens=10, + temperature=0.5, + top_p=0.5, + top_k=0.5, + ) + safety_settings = {HarmCategory.HARM_CATEGORY_DANGEROUS: HarmBlockThreshold.BLOCK_ONLY_HIGH} + get_current_weather_func = FunctionDeclaration( + name="get_current_weather", + description="Get the current weather in a given location", + parameters={ + "type_": "OBJECT", + "properties": { + "location": {"type_": "STRING", "description": "The city and state, e.g. San Francisco, CA"}, + "unit": { + "type_": "STRING", + "enum": [ + "celsius", + "fahrenheit", + ], + }, + }, + "required": ["location"], + }, + ) + + tool = Tool(function_declarations=[get_current_weather_func]) + + with patch("gemini_haystack.generators.chat.gemini.genai.configure"): + gemini = GoogleAIGeminiChatGenerator( + generation_config=generation_config, + safety_settings=safety_settings, + tools=[tool], + ) + assert gemini.to_dict() == { + "type": "gemini_haystack.generators.chat.gemini.GoogleAIGeminiChatGenerator", + "init_parameters": { + "model": "gemini-pro-vision", + "generation_config": { + "temperature": 0.5, + "top_p": 0.5, + "top_k": 0.5, + "candidate_count": 1, + "max_output_tokens": 10, + "stop_sequences": ["stop"], + }, + "safety_settings": {6: 3}, + "tools": [ + b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" + ], + }, + } + + +def test_from_dict(): + with patch("gemini_haystack.generators.chat.gemini.genai.configure"): + gemini = GoogleAIGeminiChatGenerator.from_dict( + { + "type": "gemini_haystack.generators.chat.gemini.GoogleAIGeminiChatGenerator", + "init_parameters": { + "model": "gemini-pro-vision", + "generation_config": { + "temperature": 0.5, + "top_p": 0.5, + "top_k": 0.5, + "candidate_count": 1, + "max_output_tokens": 10, + "stop_sequences": ["stop"], + }, + "safety_settings": {6: 3}, + "tools": [ + b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" + ], + }, + } + ) + + assert gemini._model_name == "gemini-pro-vision" + assert gemini._generation_config == GenerationConfig( + candidate_count=1, + stop_sequences=["stop"], + max_output_tokens=10, + temperature=0.5, + top_p=0.5, + top_k=0.5, + ) + assert gemini._safety_settings == {HarmCategory.HARM_CATEGORY_DANGEROUS: HarmBlockThreshold.BLOCK_ONLY_HIGH} + assert gemini._tools == [ + Tool( + function_declarations=[ + FunctionDeclaration( + name="get_current_weather", + description="Get the current weather in a given location", + parameters={ + "type_": "OBJECT", + "properties": { + "location": { + "type_": "STRING", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": { + "type_": "STRING", + "enum": [ + "celsius", + "fahrenheit", + ], + }, + }, + "required": ["location"], + }, + ) + ] + ) + ] + assert isinstance(gemini._model, GenerativeModel) + + +@pytest.mark.skipif("GOOGLE_API_KEY" not in os.environ, reason="GOOGLE_API_KEY not set") +def test_run(): + def get_current_weather(location: str, unit: str = "celsius"): + return {"weather": "sunny", "temperature": 21.8, "unit": unit} + + get_current_weather_func = FunctionDeclaration( + name="get_current_weather", + description="Get the current weather in a given location", + parameters={ + "type_": "OBJECT", + "properties": { + "location": {"type_": "STRING", "description": "The city and state, e.g. San Francisco, CA"}, + "unit": { + "type_": "STRING", + "enum": [ + "celsius", + "fahrenheit", + ], + }, + }, + "required": ["location"], + }, + ) + + tool = Tool(function_declarations=[get_current_weather_func]) + gemini_chat = GoogleAIGeminiChatGenerator(model="gemini-pro", tools=[tool]) + messages = [ChatMessage.from_user(content="What is the temperature in celsius in Berlin?")] + res = gemini_chat.run(messages=messages) + assert len(res["replies"]) > 0 + + weather = get_current_weather(**res["replies"][0].content) + messages += res["replies"] + [ChatMessage.from_function(content=weather, name="get_current_weather")] + + res = gemini_chat.run(messages=messages) + assert len(res["replies"]) > 0 diff --git a/integrations/gemini-haystack/tests/generators/test_gemini.py b/integrations/gemini-haystack/tests/generators/test_gemini.py new file mode 100644 index 000000000..a010b1fbf --- /dev/null +++ b/integrations/gemini-haystack/tests/generators/test_gemini.py @@ -0,0 +1,180 @@ +import os +from unittest.mock import patch + +from google.generativeai import GenerationConfig, GenerativeModel +from google.generativeai.types import HarmBlockThreshold, HarmCategory +from google.ai.generativelanguage import FunctionDeclaration, Tool +import pytest + +from gemini_haystack.generators.gemini import GoogleAIGeminiGenerator + + +def test_init(): + generation_config = GenerationConfig( + candidate_count=1, + stop_sequences=["stop"], + max_output_tokens=10, + temperature=0.5, + top_p=0.5, + top_k=0.5, + ) + safety_settings = {HarmCategory.HARM_CATEGORY_DANGEROUS: HarmBlockThreshold.BLOCK_ONLY_HIGH} + get_current_weather_func = FunctionDeclaration( + name="get_current_weather", + description="Get the current weather in a given location", + parameters={ + "type_": "OBJECT", + "properties": { + "location": {"type_": "STRING", "description": "The city and state, e.g. San Francisco, CA"}, + "unit": { + "type_": "STRING", + "enum": [ + "celsius", + "fahrenheit", + ], + }, + }, + "required": ["location"], + }, + ) + + tool = Tool(function_declarations=[get_current_weather_func]) + with patch("gemini_haystack.generators.gemini.genai.configure") as mock_genai_configure: + gemini = GoogleAIGeminiGenerator( + generation_config=generation_config, + safety_settings=safety_settings, + tools=[tool], + ) + mock_genai_configure.assert_called_once_with(api_key=None) + assert gemini._model_name == "gemini-pro-vision" + assert gemini._generation_config == generation_config + assert gemini._safety_settings == safety_settings + assert gemini._tools == [tool] + assert isinstance(gemini._model, GenerativeModel) + + +def test_to_dict(): + generation_config = GenerationConfig( + candidate_count=1, + stop_sequences=["stop"], + max_output_tokens=10, + temperature=0.5, + top_p=0.5, + top_k=0.5, + ) + safety_settings = {HarmCategory.HARM_CATEGORY_DANGEROUS: HarmBlockThreshold.BLOCK_ONLY_HIGH} + get_current_weather_func = FunctionDeclaration( + name="get_current_weather", + description="Get the current weather in a given location", + parameters={ + "type_": "OBJECT", + "properties": { + "location": {"type_": "STRING", "description": "The city and state, e.g. San Francisco, CA"}, + "unit": { + "type_": "STRING", + "enum": [ + "celsius", + "fahrenheit", + ], + }, + }, + "required": ["location"], + }, + ) + + tool = Tool(function_declarations=[get_current_weather_func]) + + with patch("gemini_haystack.generators.gemini.genai.configure"): + gemini = GoogleAIGeminiGenerator( + generation_config=generation_config, + safety_settings=safety_settings, + tools=[tool], + ) + assert gemini.to_dict() == { + "type": "gemini_haystack.generators.gemini.GoogleAIGeminiGenerator", + "init_parameters": { + "model": "gemini-pro-vision", + "generation_config": { + "temperature": 0.5, + "top_p": 0.5, + "top_k": 0.5, + "candidate_count": 1, + "max_output_tokens": 10, + "stop_sequences": ["stop"], + }, + "safety_settings": {6: 3}, + "tools": [ + b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" + ], + }, + } + + +def test_from_dict(): + with patch("gemini_haystack.generators.gemini.genai.configure"): + gemini = GoogleAIGeminiGenerator.from_dict( + { + "type": "gemini_haystack.generators.gemini.GoogleAIGeminiGenerator", + "init_parameters": { + "model": "gemini-pro-vision", + "generation_config": { + "temperature": 0.5, + "top_p": 0.5, + "top_k": 0.5, + "candidate_count": 1, + "max_output_tokens": 10, + "stop_sequences": ["stop"], + }, + "safety_settings": {6: 3}, + "tools": [ + b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" + ], + }, + } + ) + + assert gemini._model_name == "gemini-pro-vision" + assert gemini._generation_config == GenerationConfig( + candidate_count=1, + stop_sequences=["stop"], + max_output_tokens=10, + temperature=0.5, + top_p=0.5, + top_k=0.5, + ) + assert gemini._safety_settings == {HarmCategory.HARM_CATEGORY_DANGEROUS: HarmBlockThreshold.BLOCK_ONLY_HIGH} + assert gemini._tools == [ + Tool( + function_declarations=[ + FunctionDeclaration( + name="get_current_weather", + description="Get the current weather in a given location", + parameters={ + "type_": "OBJECT", + "properties": { + "location": { + "type_": "STRING", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": { + "type_": "STRING", + "enum": [ + "celsius", + "fahrenheit", + ], + }, + }, + "required": ["location"], + }, + ) + ] + ) + ] + assert isinstance(gemini._model, GenerativeModel) + + +@pytest.mark.skipif("GOOGLE_API_KEY" not in os.environ, reason="GOOGLE_API_KEY not set") +def test_run(): + gemini = GoogleAIGeminiGenerator(model="gemini-pro") + res = gemini.run("Tell me something cool") + assert len(res["answers"]) > 0 From 47d991a897a46cb5a4e9a1102253db861e993d77 Mon Sep 17 00:00:00 2001 From: Silvano Cerza Date: Tue, 2 Jan 2024 18:56:14 +0100 Subject: [PATCH 07/11] Rename package from gemini-haystack to google_ai_haystack --- integrations/gemini-haystack/README.md | 21 ---------------- .../LICENSE.txt | 0 integrations/google_ai/README.md | 21 ++++++++++++++++ .../example.ipynb | 0 .../pyproject.toml | 24 +++++++++---------- .../src/google_ai_haystack}/__init__.py | 0 .../generators/__init__.py | 0 .../generators/chat/gemini.py | 0 .../google_ai_haystack}/generators/gemini.py | 0 .../tests/__init__.py | 0 .../tests/generators/chat/test_chat_gemini.py | 12 +++++----- .../tests/generators/test_gemini.py | 12 +++++----- 12 files changed, 45 insertions(+), 45 deletions(-) delete mode 100644 integrations/gemini-haystack/README.md rename integrations/{gemini-haystack => google_ai}/LICENSE.txt (100%) create mode 100644 integrations/google_ai/README.md rename integrations/{gemini-haystack => google_ai}/example.ipynb (100%) rename integrations/{gemini-haystack => google_ai}/pyproject.toml (82%) rename integrations/{gemini-haystack/src/gemini_haystack => google_ai/src/google_ai_haystack}/__init__.py (100%) rename integrations/{gemini-haystack/src/gemini_haystack => google_ai/src/google_ai_haystack}/generators/__init__.py (100%) rename integrations/{gemini-haystack/src/gemini_haystack => google_ai/src/google_ai_haystack}/generators/chat/gemini.py (100%) rename integrations/{gemini-haystack/src/gemini_haystack => google_ai/src/google_ai_haystack}/generators/gemini.py (100%) rename integrations/{gemini-haystack => google_ai}/tests/__init__.py (100%) rename integrations/{gemini-haystack => google_ai}/tests/generators/chat/test_chat_gemini.py (93%) rename integrations/{gemini-haystack => google_ai}/tests/generators/test_gemini.py (92%) diff --git a/integrations/gemini-haystack/README.md b/integrations/gemini-haystack/README.md deleted file mode 100644 index 603457372..000000000 --- a/integrations/gemini-haystack/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# gemini-haystack - -[![PyPI - Version](https://img.shields.io/pypi/v/gemini-haystack.svg)](https://pypi.org/project/gemini-haystack) -[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/gemini-haystack.svg)](https://pypi.org/project/gemini-haystack) - ------ - -**Table of Contents** - -- [Installation](#installation) -- [License](#license) - -## Installation - -```console -pip install gemini-haystack -``` - -## License - -`gemini-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/gemini-haystack/LICENSE.txt b/integrations/google_ai/LICENSE.txt similarity index 100% rename from integrations/gemini-haystack/LICENSE.txt rename to integrations/google_ai/LICENSE.txt diff --git a/integrations/google_ai/README.md b/integrations/google_ai/README.md new file mode 100644 index 000000000..34ddefc79 --- /dev/null +++ b/integrations/google_ai/README.md @@ -0,0 +1,21 @@ +# google-ai-haystack + +[![PyPI - Version](https://img.shields.io/pypi/v/google-ai-haystack.svg)](https://pypi.org/project/google-ai-haystack) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/google-ai-haystack.svg)](https://pypi.org/project/google-ai-haystack) + +--- + +**Table of Contents** + +- [Installation](#installation) +- [License](#license) + +## Installation + +```console +pip install google-ai-haystack +``` + +## License + +`google-ai-haystack` is distributed under the terms of the [Apache-2.0](https://spdx.org/licenses/Apache-2.0.html) license. diff --git a/integrations/gemini-haystack/example.ipynb b/integrations/google_ai/example.ipynb similarity index 100% rename from integrations/gemini-haystack/example.ipynb rename to integrations/google_ai/example.ipynb diff --git a/integrations/gemini-haystack/pyproject.toml b/integrations/google_ai/pyproject.toml similarity index 82% rename from integrations/gemini-haystack/pyproject.toml rename to integrations/google_ai/pyproject.toml index 1b74691fa..06ef23134 100644 --- a/integrations/gemini-haystack/pyproject.toml +++ b/integrations/google_ai/pyproject.toml @@ -3,7 +3,7 @@ requires = ["hatchling", "hatch-vcs"] build-backend = "hatchling.build" [project] -name = "gemini-haystack" +name = "google-ai-haystack" dynamic = ["version"] description = 'Use models like Gemini via Makersuite' readme = "README.md" @@ -30,17 +30,17 @@ dependencies = [ ] [project.urls] -Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/gemini-haystack#readme" -Issues = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/gemini-haystack/issues" -Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/gemini-haystack" +Documentation = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/google_ai_haystack#readme" +Issues = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/google_ai_haystack/issues" +Source = "https://github.com/deepset-ai/haystack-core-integrations/tree/main/integrations/google_ai_haystack" [tool.hatch.version] source = "vcs" -tag-pattern = 'integrations\/gemini-v(?P.*)' +tag-pattern = 'integrations\/google-ai-v(?P.*)' [tool.hatch.version.raw-options] root = "../.." -git_describe_command = 'git describe --tags --match="integrations/gemini-v[0-9]*"' +git_describe_command = 'git describe --tags --match="integrations/google-ai-v[0-9]*"' [tool.hatch.envs.default] dependencies = [ @@ -70,7 +70,7 @@ dependencies = [ "ruff>=0.0.243", ] [tool.hatch.envs.lint.scripts] -typing = "mypy --install-types --non-interactive {args:src/gemini_haystack tests}" +typing = "mypy --install-types --non-interactive {args:src/google_ai_haystack tests}" style = [ "ruff {args:.}", "black --check --diff {args:.}", @@ -136,7 +136,7 @@ unfixable = [ ] [tool.ruff.isort] -known-first-party = ["gemini_haystack"] +known-first-party = ["google_ai_haystack"] [tool.ruff.flake8-tidy-imports] ban-relative-imports = "all" @@ -146,16 +146,16 @@ ban-relative-imports = "all" "tests/**/*" = ["PLR2004", "S101", "TID252"] [tool.coverage.run] -source_pkgs = ["gemini_haystack", "tests"] +source_pkgs = ["google_ai_haystack", "tests"] branch = true parallel = true omit = [ - "src/gemini_haystack/__about__.py", + "src/google_ai_haystack/__about__.py", ] [tool.coverage.paths] -gemini_haystack = ["src/gemini_haystack", "*/gemini-haystack/src/gemini_haystack"] -tests = ["tests", "*/gemini-haystack/tests"] +google_ai_haystack = ["src/google_ai_haystack", "*/google_ai_haystack/src/google_ai_haystack"] +tests = ["tests", "*/google_ai_haystack/tests"] [tool.coverage.report] exclude_lines = [ diff --git a/integrations/gemini-haystack/src/gemini_haystack/__init__.py b/integrations/google_ai/src/google_ai_haystack/__init__.py similarity index 100% rename from integrations/gemini-haystack/src/gemini_haystack/__init__.py rename to integrations/google_ai/src/google_ai_haystack/__init__.py diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/__init__.py b/integrations/google_ai/src/google_ai_haystack/generators/__init__.py similarity index 100% rename from integrations/gemini-haystack/src/gemini_haystack/generators/__init__.py rename to integrations/google_ai/src/google_ai_haystack/generators/__init__.py diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py b/integrations/google_ai/src/google_ai_haystack/generators/chat/gemini.py similarity index 100% rename from integrations/gemini-haystack/src/gemini_haystack/generators/chat/gemini.py rename to integrations/google_ai/src/google_ai_haystack/generators/chat/gemini.py diff --git a/integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py b/integrations/google_ai/src/google_ai_haystack/generators/gemini.py similarity index 100% rename from integrations/gemini-haystack/src/gemini_haystack/generators/gemini.py rename to integrations/google_ai/src/google_ai_haystack/generators/gemini.py diff --git a/integrations/gemini-haystack/tests/__init__.py b/integrations/google_ai/tests/__init__.py similarity index 100% rename from integrations/gemini-haystack/tests/__init__.py rename to integrations/google_ai/tests/__init__.py diff --git a/integrations/gemini-haystack/tests/generators/chat/test_chat_gemini.py b/integrations/google_ai/tests/generators/chat/test_chat_gemini.py similarity index 93% rename from integrations/gemini-haystack/tests/generators/chat/test_chat_gemini.py rename to integrations/google_ai/tests/generators/chat/test_chat_gemini.py index c3a2d259b..8522af819 100644 --- a/integrations/gemini-haystack/tests/generators/chat/test_chat_gemini.py +++ b/integrations/google_ai/tests/generators/chat/test_chat_gemini.py @@ -7,7 +7,7 @@ from google.ai.generativelanguage import FunctionDeclaration, Tool import pytest -from gemini_haystack.generators.chat.gemini import GoogleAIGeminiChatGenerator +from google_ai_haystack.generators.chat.gemini import GoogleAIGeminiChatGenerator def test_init(): @@ -40,7 +40,7 @@ def test_init(): ) tool = Tool(function_declarations=[get_current_weather_func]) - with patch("gemini_haystack.generators.chat.gemini.genai.configure") as mock_genai_configure: + with patch("google_ai_haystack.generators.chat.gemini.genai.configure") as mock_genai_configure: gemini = GoogleAIGeminiChatGenerator( generation_config=generation_config, safety_settings=safety_settings, @@ -85,14 +85,14 @@ def test_to_dict(): tool = Tool(function_declarations=[get_current_weather_func]) - with patch("gemini_haystack.generators.chat.gemini.genai.configure"): + with patch("google_ai_haystack.generators.chat.gemini.genai.configure"): gemini = GoogleAIGeminiChatGenerator( generation_config=generation_config, safety_settings=safety_settings, tools=[tool], ) assert gemini.to_dict() == { - "type": "gemini_haystack.generators.chat.gemini.GoogleAIGeminiChatGenerator", + "type": "google_ai_haystack.generators.chat.gemini.GoogleAIGeminiChatGenerator", "init_parameters": { "model": "gemini-pro-vision", "generation_config": { @@ -112,10 +112,10 @@ def test_to_dict(): def test_from_dict(): - with patch("gemini_haystack.generators.chat.gemini.genai.configure"): + with patch("google_ai_haystack.generators.chat.gemini.genai.configure"): gemini = GoogleAIGeminiChatGenerator.from_dict( { - "type": "gemini_haystack.generators.chat.gemini.GoogleAIGeminiChatGenerator", + "type": "google_ai_haystack.generators.chat.gemini.GoogleAIGeminiChatGenerator", "init_parameters": { "model": "gemini-pro-vision", "generation_config": { diff --git a/integrations/gemini-haystack/tests/generators/test_gemini.py b/integrations/google_ai/tests/generators/test_gemini.py similarity index 92% rename from integrations/gemini-haystack/tests/generators/test_gemini.py rename to integrations/google_ai/tests/generators/test_gemini.py index a010b1fbf..2f410862c 100644 --- a/integrations/gemini-haystack/tests/generators/test_gemini.py +++ b/integrations/google_ai/tests/generators/test_gemini.py @@ -6,7 +6,7 @@ from google.ai.generativelanguage import FunctionDeclaration, Tool import pytest -from gemini_haystack.generators.gemini import GoogleAIGeminiGenerator +from google_ai_haystack.generators.gemini import GoogleAIGeminiGenerator def test_init(): @@ -39,7 +39,7 @@ def test_init(): ) tool = Tool(function_declarations=[get_current_weather_func]) - with patch("gemini_haystack.generators.gemini.genai.configure") as mock_genai_configure: + with patch("google_ai_haystack.generators.gemini.genai.configure") as mock_genai_configure: gemini = GoogleAIGeminiGenerator( generation_config=generation_config, safety_settings=safety_settings, @@ -84,14 +84,14 @@ def test_to_dict(): tool = Tool(function_declarations=[get_current_weather_func]) - with patch("gemini_haystack.generators.gemini.genai.configure"): + with patch("google_ai_haystack.generators.gemini.genai.configure"): gemini = GoogleAIGeminiGenerator( generation_config=generation_config, safety_settings=safety_settings, tools=[tool], ) assert gemini.to_dict() == { - "type": "gemini_haystack.generators.gemini.GoogleAIGeminiGenerator", + "type": "google_ai_haystack.generators.gemini.GoogleAIGeminiGenerator", "init_parameters": { "model": "gemini-pro-vision", "generation_config": { @@ -111,10 +111,10 @@ def test_to_dict(): def test_from_dict(): - with patch("gemini_haystack.generators.gemini.genai.configure"): + with patch("google_ai_haystack.generators.gemini.genai.configure"): gemini = GoogleAIGeminiGenerator.from_dict( { - "type": "gemini_haystack.generators.gemini.GoogleAIGeminiGenerator", + "type": "google_ai_haystack.generators.gemini.GoogleAIGeminiGenerator", "init_parameters": { "model": "gemini-pro-vision", "generation_config": { From 624879ee1cffe4561d406c084011dc027b81e757 Mon Sep 17 00:00:00 2001 From: Silvano Cerza Date: Wed, 3 Jan 2024 11:27:46 +0100 Subject: [PATCH 08/11] Add missing new line --- .../google_ai/src/google_ai_haystack/generators/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integrations/google_ai/src/google_ai_haystack/generators/__init__.py b/integrations/google_ai/src/google_ai_haystack/generators/__init__.py index 49fd5f144..e873bc332 100644 --- a/integrations/google_ai/src/google_ai_haystack/generators/__init__.py +++ b/integrations/google_ai/src/google_ai_haystack/generators/__init__.py @@ -1,3 +1,3 @@ # SPDX-FileCopyrightText: 2023-present deepset GmbH # -# SPDX-License-Identifier: Apache-2.0 \ No newline at end of file +# SPDX-License-Identifier: Apache-2.0 From 9cbe5e6a28d19645da785eee51168b3f7ff48ad7 Mon Sep 17 00:00:00 2001 From: Silvano Cerza Date: Wed, 3 Jan 2024 11:27:57 +0100 Subject: [PATCH 09/11] Add tests workflow --- .github/workflows/google_ai.yml | 57 +++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 .github/workflows/google_ai.yml diff --git a/.github/workflows/google_ai.yml b/.github/workflows/google_ai.yml new file mode 100644 index 000000000..46a871a76 --- /dev/null +++ b/.github/workflows/google_ai.yml @@ -0,0 +1,57 @@ +# This workflow comes from https://github.com/ofek/hatch-mypyc +# https://github.com/ofek/hatch-mypyc/blob/5a198c0ba8660494d02716cfc9d79ce4adfb1442/.github/workflows/test.yml +name: Test / google-ai + +on: + schedule: + - cron: "0 0 * * *" + pull_request: + paths: + - "integrations/google_ai/**" + - ".github/workflows/google_ai.yml" + +defaults: + run: + working-directory: integrations/google_ai + +concurrency: + group: google-vertex-${{ github.head_ref }} + cancel-in-progress: true + +env: + PYTHONUNBUFFERED: "1" + FORCE_COLOR: "1" + GOOGLE_API_KEY: "${{ secrets.GOOGLE_API_KEY }}" + +jobs: + run: + name: Python ${{ matrix.python-version }} on ${{ startsWith(matrix.os, 'macos-') && 'macOS' || startsWith(matrix.os, 'windows-') && 'Windows' || 'Linux' }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.9", "3.10"] + + steps: + - name: Support longpaths + if: matrix.os == 'windows-latest' + working-directory: . + run: git config --system core.longpaths true + + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install Hatch + run: pip install --upgrade hatch + + - name: Lint + if: matrix.python-version == '3.9' && runner.os == 'Linux' + run: hatch run lint:all + + - name: Run tests + run: hatch run cov From d2a1dba6600a45d2e409ec00c08096e204667ae4 Mon Sep 17 00:00:00 2001 From: Silvano Cerza Date: Wed, 3 Jan 2024 11:28:18 +0100 Subject: [PATCH 10/11] Remove example notebook --- integrations/google_ai/example.ipynb | 199 --------------------------- 1 file changed, 199 deletions(-) delete mode 100644 integrations/google_ai/example.ipynb diff --git a/integrations/google_ai/example.ipynb b/integrations/google_ai/example.ipynb deleted file mode 100644 index 89c80c29c..000000000 --- a/integrations/google_ai/example.ipynb +++ /dev/null @@ -1,199 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import getpass\n", - "\n", - "makersuite_key = getpass.getpass(\"Your Makersuite API Key\")" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "from gemini_haystack.generators.gemini import GeminiGenerator\n", - "\n", - "text_generator = GeminiGenerator(api_key=makersuite_key, project_id=\"deepset-general\", model=\"gemini-pro\")\n", - "text_image_generator = GeminiGenerator(api_key=makersuite_key, project_id=\"deepset-general\", model=\"gemini-pro-vision\")" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'answers': [\"1. **To Find Happiness and Fulfillment**: Many people believe that the meaning of life is to find happiness and fulfillment. This can be achieved through various means, such as pursuing passions, building meaningful relationships, and contributing positively to society.\\n2. **To Achieve Self-Actualization**: According to psychologist Abraham Maslow, self-actualization is the highest level of human development, where individuals fully realize their potential and become the best version of themselves. For some, achieving self-actualization is the ultimate goal of life.\\n3. **To Make a Difference**: Some individuals find meaning in life by making a positive impact on the world. This can involve engaging in acts of kindness and service, working towards social justice, or pursuing scientific or artistic endeavors that benefit society.\\n4. **To Experience and Appreciate Life**: Others believe that the meaning of life lies in simply experiencing and appreciating the wonders of the world. This can include traveling, learning new things, connecting with nature, and seeking moments of awe and inspiration.\\n5. **To Connect with Something Greater**: Many people find meaning in life through spiritual or religious beliefs. This can involve connecting with a higher power, practicing meditation or mindfulness, or engaging in rituals and traditions that provide a sense of purpose and belonging.\\n6. **To Leave a Legacy**: Some individuals believe that the meaning of life is to leave behind a lasting legacy. This can involve raising a family, creating works of art or literature, or making significant contributions to one's field of work or society as a whole.\\n7. **To Grow and Evolve**: For some, the meaning of life is to continually grow and evolve as a person. This involves learning from experiences, challenging oneself, and embracing new perspectives, leading to a deeper understanding of oneself and the world around them.\\n8. **To Find Purpose**: Many people seek meaning in life by finding a purpose or calling. This can involve identifying a specific goal or mission that drives their actions and provides them with a sense of direction and significance.\\n\\nUltimately, the meaning of life is a personal and subjective question, and there is no single answer that applies to everyone. The search for meaning is an ongoing journey, and individuals may find different meanings at different stages of their lives.\"]}" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "text_generator.run(parts=\"What's the meaning of life?\")" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " The first image is of C-3PO and R2-D2 from the Star Wars franchise. They are protocol and astromech droids, respectively.\n", - "\n", - "The second image is of Maria from the 1927 film Metropolis. She is a robot created by Dr. Rotwang to be a perfect replica of his dead wife, Hel.\n", - "\n", - "The third image is of Gort from the 1951 film The Day the Earth Stood Still. He is a robot sent to Earth to warn humanity about the dangers of nuclear war.\n", - "\n", - "The fourth image is of Marvin from the 1980 film The Hitchhiker's Guide to the Galaxy. He is a paranoid android who is constantly depressed and hates life.\n" - ] - } - ], - "source": [ - "import requests\n", - "from haystack.dataclasses.byte_stream import ByteStream\n", - "\n", - "URLS = [\n", - " \"https://raw.githubusercontent.com/silvanocerza/robots/main/robot1.jpg\",\n", - " \"https://raw.githubusercontent.com/silvanocerza/robots/main/robot2.jpg\",\n", - " \"https://raw.githubusercontent.com/silvanocerza/robots/main/robot3.jpg\",\n", - " \"https://raw.githubusercontent.com/silvanocerza/robots/main/robot4.jpg\"\n", - "]\n", - "images = [\n", - " ByteStream(data=requests.get(url).content, mime_type=\"image/jpeg\")\n", - " for url in URLS\n", - "]\n", - "\n", - "result = text_image_generator.run(parts = [\"What can you tell me about this robots?\", *images])\n", - "for answer in result[\"answers\"]:\n", - " print(answer)" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "from google.ai.generativelanguage import FunctionDeclaration, Tool\n", - "\n", - "def get_current_weather(location: str, unit: str = \"celsius\"):\n", - " return {\"weather\": \"sunny\", \"temperature\": 21.8, \"unit\": unit}\n", - "\n", - "get_current_weather_func = FunctionDeclaration(name=\"get_current_weather\",\n", - " description= \"Get the current weather in a given location\",\n", - " parameters= {\n", - " \"type_\": \"OBJECT\",\n", - " \"properties\": {\n", - " \"location\": {\"type_\": \"STRING\", \"description\": \"The city and state, e.g. San Francisco, CA\"},\n", - " \"unit\": {\n", - " \"type_\": \"STRING\",\n", - " \"enum\": [\n", - " \"celsius\",\n", - " \"fahrenheit\",\n", - " ],\n", - " },\n", - " },\n", - " \"required\": [\"location\"],\n", - " }\n", - " )\n", - "\n", - "tool = Tool(function_declarations=[get_current_weather_func])" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/tuanacelik/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", - " from .autonotebook import tqdm as notebook_tqdm\n" - ] - } - ], - "source": [ - "from gemini_haystack.generators.chat.gemini import GeminiChatGenerator\n", - "\n", - "chat_generator = GeminiChatGenerator(api_key=makersuite_key, project_id=\"deepset-general\", model=\"gemini-pro\", tools=[tool])" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "ename": "InternalServerError", - "evalue": "500 An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31m_InactiveRpcError\u001b[0m Traceback (most recent call last)", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/grpc_helpers.py:79\u001b[0m, in \u001b[0;36m_wrap_unary_errors..error_remapped_callable\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 78\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m---> 79\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcallable_\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 80\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m grpc\u001b[38;5;241m.\u001b[39mRpcError \u001b[38;5;28;01mas\u001b[39;00m exc:\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/grpc/_channel.py:1160\u001b[0m, in \u001b[0;36m_UnaryUnaryMultiCallable.__call__\u001b[0;34m(self, request, timeout, metadata, credentials, wait_for_ready, compression)\u001b[0m\n\u001b[1;32m 1154\u001b[0m (\n\u001b[1;32m 1155\u001b[0m state,\n\u001b[1;32m 1156\u001b[0m call,\n\u001b[1;32m 1157\u001b[0m ) \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_blocking(\n\u001b[1;32m 1158\u001b[0m request, timeout, metadata, credentials, wait_for_ready, compression\n\u001b[1;32m 1159\u001b[0m )\n\u001b[0;32m-> 1160\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_end_unary_response_blocking\u001b[49m\u001b[43m(\u001b[49m\u001b[43mstate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcall\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mNone\u001b[39;49;00m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/grpc/_channel.py:1003\u001b[0m, in \u001b[0;36m_end_unary_response_blocking\u001b[0;34m(state, call, with_call, deadline)\u001b[0m\n\u001b[1;32m 1002\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 1003\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m _InactiveRpcError(state)\n", - "\u001b[0;31m_InactiveRpcError\u001b[0m: <_InactiveRpcError of RPC that terminated with:\n\tstatus = StatusCode.INTERNAL\n\tdetails = \"An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting\"\n\tdebug_error_string = \"UNKNOWN:Error received from peer ipv4:142.251.35.170:443 {created_time:\"2023-12-29T16:54:45.855416+03:00\", grpc_status:13, grpc_message:\"An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting\"}\"\n>", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001b[0;31mInternalServerError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[4], line 4\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mhaystack\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mdataclasses\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m ChatMessage\n\u001b[1;32m 3\u001b[0m messages \u001b[38;5;241m=\u001b[39m [ChatMessage\u001b[38;5;241m.\u001b[39mfrom_user(content \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mWhat is the temperature in celsius in Berlin?\u001b[39m\u001b[38;5;124m\"\u001b[39m)]\n\u001b[0;32m----> 4\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43mchat_generator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmessages\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmessages\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 5\u001b[0m res[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mreplies\u001b[39m\u001b[38;5;124m\"\u001b[39m]\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/gemini_haystack/generators/chat/gemini.py:149\u001b[0m, in \u001b[0;36mGeminiChatGenerator.run\u001b[0;34m(self, messages)\u001b[0m\n\u001b[1;32m 146\u001b[0m session \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_model\u001b[38;5;241m.\u001b[39mstart_chat(history\u001b[38;5;241m=\u001b[39mhistory)\n\u001b[1;32m 148\u001b[0m new_message \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_message_to_part(messages[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m])\n\u001b[0;32m--> 149\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43msession\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msend_message\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 150\u001b[0m \u001b[43m \u001b[49m\u001b[43mcontent\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnew_message\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 151\u001b[0m \u001b[43m \u001b[49m\u001b[43mgeneration_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_generation_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 152\u001b[0m \u001b[43m \u001b[49m\u001b[43msafety_settings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_safety_settings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 153\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 155\u001b[0m replies \u001b[38;5;241m=\u001b[39m []\n\u001b[1;32m 156\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m candidate \u001b[38;5;129;01min\u001b[39;00m res\u001b[38;5;241m.\u001b[39mcandidates:\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/generativeai/generative_models.py:367\u001b[0m, in \u001b[0;36mChatSession.send_message\u001b[0;34m(self, content, generation_config, safety_settings, stream, **kwargs)\u001b[0m\n\u001b[1;32m 365\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m generation_config\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcandidate_count\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;241m1\u001b[39m) \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m1\u001b[39m:\n\u001b[1;32m 366\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCan\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mt chat with `candidate_count > 1`\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m--> 367\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate_content\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 368\u001b[0m \u001b[43m \u001b[49m\u001b[43mcontents\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mhistory\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 369\u001b[0m \u001b[43m \u001b[49m\u001b[43mgeneration_config\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mgeneration_config\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 370\u001b[0m \u001b[43m \u001b[49m\u001b[43msafety_settings\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msafety_settings\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 371\u001b[0m \u001b[43m \u001b[49m\u001b[43mstream\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mstream\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 372\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 373\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 375\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m response\u001b[38;5;241m.\u001b[39mprompt_feedback\u001b[38;5;241m.\u001b[39mblock_reason:\n\u001b[1;32m 376\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m generation_types\u001b[38;5;241m.\u001b[39mBlockedPromptException(response\u001b[38;5;241m.\u001b[39mprompt_feedback)\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/generativeai/generative_models.py:248\u001b[0m, in \u001b[0;36mGenerativeModel.generate_content\u001b[0;34m(self, contents, generation_config, safety_settings, stream, **kwargs)\u001b[0m\n\u001b[1;32m 246\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m generation_types\u001b[38;5;241m.\u001b[39mGenerateContentResponse\u001b[38;5;241m.\u001b[39mfrom_iterator(iterator)\n\u001b[1;32m 247\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m--> 248\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_client\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mgenerate_content\u001b[49m\u001b[43m(\u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 249\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m generation_types\u001b[38;5;241m.\u001b[39mGenerateContentResponse\u001b[38;5;241m.\u001b[39mfrom_response(response)\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/ai/generativelanguage_v1beta/services/generative_service/client.py:566\u001b[0m, in \u001b[0;36mGenerativeServiceClient.generate_content\u001b[0;34m(self, request, model, contents, retry, timeout, metadata)\u001b[0m\n\u001b[1;32m 561\u001b[0m metadata \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mtuple\u001b[39m(metadata) \u001b[38;5;241m+\u001b[39m (\n\u001b[1;32m 562\u001b[0m gapic_v1\u001b[38;5;241m.\u001b[39mrouting_header\u001b[38;5;241m.\u001b[39mto_grpc_metadata(((\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel\u001b[39m\u001b[38;5;124m\"\u001b[39m, request\u001b[38;5;241m.\u001b[39mmodel),)),\n\u001b[1;32m 563\u001b[0m )\n\u001b[1;32m 565\u001b[0m \u001b[38;5;66;03m# Send the request.\u001b[39;00m\n\u001b[0;32m--> 566\u001b[0m response \u001b[38;5;241m=\u001b[39m \u001b[43mrpc\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 567\u001b[0m \u001b[43m \u001b[49m\u001b[43mrequest\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 568\u001b[0m \u001b[43m \u001b[49m\u001b[43mretry\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mretry\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 569\u001b[0m \u001b[43m \u001b[49m\u001b[43mtimeout\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mtimeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 570\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetadata\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmetadata\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 571\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 573\u001b[0m \u001b[38;5;66;03m# Done; return the response.\u001b[39;00m\n\u001b[1;32m 574\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m response\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/gapic_v1/method.py:131\u001b[0m, in \u001b[0;36m_GapicCallable.__call__\u001b[0;34m(self, timeout, retry, compression, *args, **kwargs)\u001b[0m\n\u001b[1;32m 128\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_compression \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 129\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcompression\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m compression\n\u001b[0;32m--> 131\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mwrapped_func\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/retry.py:372\u001b[0m, in \u001b[0;36mRetry.__call__..retry_wrapped_func\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 368\u001b[0m target \u001b[38;5;241m=\u001b[39m functools\u001b[38;5;241m.\u001b[39mpartial(func, \u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 369\u001b[0m sleep_generator \u001b[38;5;241m=\u001b[39m exponential_sleep_generator(\n\u001b[1;32m 370\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_initial, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_maximum, multiplier\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_multiplier\n\u001b[1;32m 371\u001b[0m )\n\u001b[0;32m--> 372\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mretry_target\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 373\u001b[0m \u001b[43m \u001b[49m\u001b[43mtarget\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 374\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_predicate\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 375\u001b[0m \u001b[43m \u001b[49m\u001b[43msleep_generator\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 376\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_timeout\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 377\u001b[0m \u001b[43m \u001b[49m\u001b[43mon_error\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mon_error\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 378\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/retry.py:207\u001b[0m, in \u001b[0;36mretry_target\u001b[0;34m(target, predicate, sleep_generator, timeout, on_error, **kwargs)\u001b[0m\n\u001b[1;32m 205\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m sleep \u001b[38;5;129;01min\u001b[39;00m sleep_generator:\n\u001b[1;32m 206\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 207\u001b[0m result \u001b[38;5;241m=\u001b[39m \u001b[43mtarget\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 208\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m inspect\u001b[38;5;241m.\u001b[39misawaitable(result):\n\u001b[1;32m 209\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(_ASYNC_RETRY_WARNING)\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/timeout.py:120\u001b[0m, in \u001b[0;36mTimeToDeadlineTimeout.__call__..func_with_timeout\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 117\u001b[0m \u001b[38;5;66;03m# Avoid setting negative timeout\u001b[39;00m\n\u001b[1;32m 118\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtimeout\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mmax\u001b[39m(\u001b[38;5;241m0\u001b[39m, \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_timeout \u001b[38;5;241m-\u001b[39m time_since_first_attempt)\n\u001b[0;32m--> 120\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/opt/anaconda3/envs/gemini/lib/python3.12/site-packages/google/api_core/grpc_helpers.py:81\u001b[0m, in \u001b[0;36m_wrap_unary_errors..error_remapped_callable\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 79\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m callable_(\u001b[38;5;241m*\u001b[39margs, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs)\n\u001b[1;32m 80\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m grpc\u001b[38;5;241m.\u001b[39mRpcError \u001b[38;5;28;01mas\u001b[39;00m exc:\n\u001b[0;32m---> 81\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m exceptions\u001b[38;5;241m.\u001b[39mfrom_grpc_error(exc) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mexc\u001b[39;00m\n", - "\u001b[0;31mInternalServerError\u001b[0m: 500 An internal error has occurred. Please retry or report in https://developers.generativeai.google/guide/troubleshooting" - ] - } - ], - "source": [ - "from haystack.dataclasses import ChatMessage\n", - "\n", - "messages = [ChatMessage.from_user(content = \"What is the temperature in celsius in Berlin?\")]\n", - "res = chat_generator.run(messages=messages)\n", - "res[\"replies\"]" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "gemini", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.12.0" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} From 08028f3f559db25c0ceeb9c35047e8e02f7e8140 Mon Sep 17 00:00:00 2001 From: Silvano Cerza Date: Wed, 3 Jan 2024 11:39:38 +0100 Subject: [PATCH 11/11] Fix linting --- integrations/google_ai/pyproject.toml | 8 ++++++++ .../generators/chat/gemini.py | 6 +++--- .../google_ai_haystack/generators/gemini.py | 6 +++--- .../tests/generators/chat/test_chat_gemini.py | 18 ++++++++++++------ .../google_ai/tests/generators/test_gemini.py | 12 ++++++++---- 5 files changed, 34 insertions(+), 16 deletions(-) diff --git a/integrations/google_ai/pyproject.toml b/integrations/google_ai/pyproject.toml index 06ef23134..b04949592 100644 --- a/integrations/google_ai/pyproject.toml +++ b/integrations/google_ai/pyproject.toml @@ -163,3 +163,11 @@ exclude_lines = [ "if __name__ == .__main__.:", "if TYPE_CHECKING:", ] +[[tool.mypy.overrides]] +module = [ + "google.*", + "haystack.*", + "pytest.*", + "numpy.*", +] +ignore_missing_imports = true \ No newline at end of file diff --git a/integrations/google_ai/src/google_ai_haystack/generators/chat/gemini.py b/integrations/google_ai/src/google_ai_haystack/generators/chat/gemini.py index 4e71b784d..1f5557e0d 100644 --- a/integrations/google_ai/src/google_ai_haystack/generators/chat/gemini.py +++ b/integrations/google_ai/src/google_ai_haystack/generators/chat/gemini.py @@ -2,13 +2,13 @@ from typing import Any, Dict, List, Optional, Union import google.generativeai as genai +from google.ai.generativelanguage import Content, Part, Tool +from google.generativeai import GenerationConfig, GenerativeModel +from google.generativeai.types import HarmBlockThreshold, HarmCategory from haystack.core.component import component from haystack.core.serialization import default_from_dict, default_to_dict from haystack.dataclasses.byte_stream import ByteStream from haystack.dataclasses.chat_message import ChatMessage, ChatRole -from google.generativeai import GenerationConfig, GenerativeModel -from google.generativeai.types import HarmBlockThreshold, HarmCategory -from google.ai.generativelanguage import Tool, Part, Content logger = logging.getLogger(__name__) diff --git a/integrations/google_ai/src/google_ai_haystack/generators/gemini.py b/integrations/google_ai/src/google_ai_haystack/generators/gemini.py index c32698ca8..d05d99c60 100644 --- a/integrations/google_ai/src/google_ai_haystack/generators/gemini.py +++ b/integrations/google_ai/src/google_ai_haystack/generators/gemini.py @@ -2,13 +2,13 @@ from typing import Any, Dict, List, Optional, Union import google.generativeai as genai +from google.ai.generativelanguage import Content, Part, Tool +from google.generativeai import GenerationConfig, GenerativeModel +from google.generativeai.types import HarmBlockThreshold, HarmCategory from haystack.core.component import component from haystack.core.component.types import Variadic from haystack.core.serialization import default_from_dict, default_to_dict from haystack.dataclasses.byte_stream import ByteStream -from google.generativeai import GenerationConfig, GenerativeModel -from google.generativeai.types import HarmBlockThreshold, HarmCategory -from google.ai.generativelanguage import Tool, Part, Content logger = logging.getLogger(__name__) diff --git a/integrations/google_ai/tests/generators/chat/test_chat_gemini.py b/integrations/google_ai/tests/generators/chat/test_chat_gemini.py index 8522af819..16a2af236 100644 --- a/integrations/google_ai/tests/generators/chat/test_chat_gemini.py +++ b/integrations/google_ai/tests/generators/chat/test_chat_gemini.py @@ -1,11 +1,11 @@ import os from unittest.mock import patch -from haystack.dataclasses.chat_message import ChatMessage +import pytest +from google.ai.generativelanguage import FunctionDeclaration, Tool from google.generativeai import GenerationConfig, GenerativeModel from google.generativeai.types import HarmBlockThreshold, HarmCategory -from google.ai.generativelanguage import FunctionDeclaration, Tool -import pytest +from haystack.dataclasses.chat_message import ChatMessage from google_ai_haystack.generators.chat.gemini import GoogleAIGeminiChatGenerator @@ -105,7 +105,9 @@ def test_to_dict(): }, "safety_settings": {6: 3}, "tools": [ - b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" + b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai" + b"\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08" + b"\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" ], }, } @@ -128,7 +130,9 @@ def test_from_dict(): }, "safety_settings": {6: 3}, "tools": [ - b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" + b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai" + b"\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08" + b"\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" ], }, } @@ -176,7 +180,9 @@ def test_from_dict(): @pytest.mark.skipif("GOOGLE_API_KEY" not in os.environ, reason="GOOGLE_API_KEY not set") def test_run(): - def get_current_weather(location: str, unit: str = "celsius"): + # We're ignoring the unused function argument check since we must have that argument for the test + # to run successfully, but we don't actually use it. + def get_current_weather(location: str, unit: str = "celsius"): # noqa: ARG001 return {"weather": "sunny", "temperature": 21.8, "unit": unit} get_current_weather_func = FunctionDeclaration( diff --git a/integrations/google_ai/tests/generators/test_gemini.py b/integrations/google_ai/tests/generators/test_gemini.py index 2f410862c..c01c8b158 100644 --- a/integrations/google_ai/tests/generators/test_gemini.py +++ b/integrations/google_ai/tests/generators/test_gemini.py @@ -1,10 +1,10 @@ import os from unittest.mock import patch +import pytest +from google.ai.generativelanguage import FunctionDeclaration, Tool from google.generativeai import GenerationConfig, GenerativeModel from google.generativeai.types import HarmBlockThreshold, HarmCategory -from google.ai.generativelanguage import FunctionDeclaration, Tool -import pytest from google_ai_haystack.generators.gemini import GoogleAIGeminiGenerator @@ -104,7 +104,9 @@ def test_to_dict(): }, "safety_settings": {6: 3}, "tools": [ - b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" + b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai" + b"\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08" + b"\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" ], }, } @@ -127,7 +129,9 @@ def test_from_dict(): }, "safety_settings": {6: 3}, "tools": [ - b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" + b"\n\xad\x01\n\x13get_current_weather\x12+Get the current weather in a given location\x1ai" + b"\x08\x06:\x1f\n\x04unit\x12\x17\x08\x01*\x07celsius*\nfahrenheit::\n\x08location\x12.\x08" + b"\x01\x1a*The city and state, e.g. San Francisco, CAB\x08location" ], }, }