Skip to content

Commit

Permalink
working on adding gemini
Browse files Browse the repository at this point in the history
  • Loading branch information
dougollerenshaw committed Oct 9, 2024
1 parent 790c656 commit 3a3d46e
Show file tree
Hide file tree
Showing 5 changed files with 188 additions and 3 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/python-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [3.8, 3.9, '3.10']
python-version: [3.9, '3.10', '3.11', '3.12']

steps:
- uses: actions/checkout@v2
Expand All @@ -32,4 +32,4 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y xvfb
xvfb-run -a pytest -v tests/ui/test_chat_window.py || echo "UI tests failed but continuing"
xvfb-run -a pytest -v tests/ui/test_chat_window.py || echo "UI tests failed but continuing"
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ https://github.com/user-attachments/assets/8aa729ff-c431-4a61-a9ef-d17050a27d02

### Prerequisites

- Python 3.8 or higher
- Python 3.9 or higher

### Setup

Expand Down
2 changes: 2 additions & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
anthropic==0.34.2
google-generativeai==0.8.3
python-decouple==3.8
virtualenv==20.16.2
numpy==1.26.4
openai
hjson
pyyaml
Expand Down
144 changes: 144 additions & 0 deletions sandbox/gemini_sandbox.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,144 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"genai.configure(api_key=config('GEMINI_API_KEY'))\n",
"\n",
"model = genai.GenerativeModel(\"gemini-1.5-pro\")"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[0;31mSignature:\u001b[0m\n",
"\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgenerate_content\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n",
"\u001b[0;34m\u001b[0m \u001b[0mcontents\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'content_types.ContentsType'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
"\u001b[0;34m\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
"\u001b[0;34m\u001b[0m \u001b[0mgeneration_config\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'generation_types.GenerationConfigType | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
"\u001b[0;34m\u001b[0m \u001b[0msafety_settings\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'safety_types.SafetySettingOptions | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
"\u001b[0;34m\u001b[0m \u001b[0mstream\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'bool'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
"\u001b[0;34m\u001b[0m \u001b[0mtools\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'content_types.FunctionLibraryType | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
"\u001b[0;34m\u001b[0m \u001b[0mtool_config\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'content_types.ToolConfigType | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
"\u001b[0;34m\u001b[0m \u001b[0mrequest_options\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'helper_types.RequestOptionsType | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n",
"\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;34m'generation_types.GenerateContentResponse'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mDocstring:\u001b[0m\n",
"A multipurpose function to generate responses from the model.\n",
"\n",
"This `GenerativeModel.generate_content` method can handle multimodal input, and multi-turn\n",
"conversations.\n",
"\n",
">>> model = genai.GenerativeModel('models/gemini-pro')\n",
">>> response = model.generate_content('Tell me a story about a magic backpack')\n",
">>> response.text\n",
"\n",
"### Streaming\n",
"\n",
"This method supports streaming with the `stream=True`. The result has the same type as the non streaming case,\n",
"but you can iterate over the response chunks as they become available:\n",
"\n",
">>> response = model.generate_content('Tell me a story about a magic backpack', stream=True)\n",
">>> for chunk in response:\n",
"... print(chunk.text)\n",
"\n",
"### Multi-turn\n",
"\n",
"This method supports multi-turn chats but is **stateless**: the entire conversation history needs to be sent with each\n",
"request. This takes some manual management but gives you complete control:\n",
"\n",
">>> messages = [{'role':'user', 'parts': ['hello']}]\n",
">>> response = model.generate_content(messages) # \"Hello, how can I help\"\n",
">>> messages.append(response.candidates[0].content)\n",
">>> messages.append({'role':'user', 'parts': ['How does quantum physics work?']})\n",
">>> response = model.generate_content(messages)\n",
"\n",
"For a simpler multi-turn interface see `GenerativeModel.start_chat`.\n",
"\n",
"### Input type flexibility\n",
"\n",
"While the underlying API strictly expects a `list[protos.Content]` objects, this method\n",
"will convert the user input into the correct type. The hierarchy of types that can be\n",
"converted is below. Any of these objects can be passed as an equivalent `dict`.\n",
"\n",
"* `Iterable[protos.Content]`\n",
"* `protos.Content`\n",
"* `Iterable[protos.Part]`\n",
"* `protos.Part`\n",
"* `str`, `Image`, or `protos.Blob`\n",
"\n",
"In an `Iterable[protos.Content]` each `content` is a separate message.\n",
"But note that an `Iterable[protos.Part]` is taken as the parts of a single message.\n",
"\n",
"Arguments:\n",
" contents: The contents serving as the model's prompt.\n",
" generation_config: Overrides for the model's generation config.\n",
" safety_settings: Overrides for the model's safety settings.\n",
" stream: If True, yield response chunks as they are generated.\n",
" tools: `protos.Tools` more info coming soon.\n",
" request_options: Options for the request.\n",
"\u001b[0;31mFile:\u001b[0m ~/opt/anaconda3/envs/codeaide/lib/python3.11/site-packages/google/generativeai/generative_models.py\n",
"\u001b[0;31mType:\u001b[0m method"
]
}
],
"source": [
"# Define the system prompt (context)\n",
"system_prompt = \"\"\"You are an AI assistant specialized in explaining complex topics in simple terms.\n",
"Your responses should be clear, concise, and easy to understand for a general audience.\"\"\"\n",
"\n",
"# Define the user's question\n",
"user_question = \"Can you explain quantum computing in simple terms?\"\n",
"\n",
"# Set up the chat\n",
"chat = model.start_chat(context=system_prompt)\n",
"\n",
"# Generate a response\n",
"response = chat.send_message(user_question)\n",
"\n",
"# Print the response\n",
"print(response.text)\n",
"\n",
"# You can continue the conversation\n",
"follow_up = chat.send_message(\"How does that compare to classical computing?\")\n",
"print(follow_up.text)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "codeaide",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.10"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
39 changes: 39 additions & 0 deletions sandbox/prototype_gemini.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
"""
This is a little sandbox script to test out the Gemini API.
"""

import argparse
from decouple import config
import google.generativeai as genai

genai.configure(api_key=config("GEMINI_API_KEY"))

model = genai.GenerativeModel("gemini-1.5-flash")


def generate_a_story():
response = model.generate_content("Write a story about a magic backpack.")
print(response.text)


def request_code():
response = model.generate_content(
"Write a Python function to calculate the Fibonacci sequence."
)
print(response.text)


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Gemini API prototype script")
parser.add_argument(
"action",
choices=["story", "code"],
help="Action to perform: generate a story or request code",
)

args = parser.parse_args()

if args.action == "story":
generate_a_story()
elif args.action == "code":
request_code()

0 comments on commit 3a3d46e

Please sign in to comment.