-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
790c656
commit 3a3d46e
Showing
5 changed files
with
188 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,144 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"genai.configure(api_key=config('GEMINI_API_KEY'))\n", | ||
"\n", | ||
"model = genai.GenerativeModel(\"gemini-1.5-pro\")" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 4, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"\u001b[0;31mSignature:\u001b[0m\n", | ||
"\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mgenerate_content\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\u001b[0m\n", | ||
"\u001b[0;34m\u001b[0m \u001b[0mcontents\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'content_types.ContentsType'\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", | ||
"\u001b[0;34m\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", | ||
"\u001b[0;34m\u001b[0m \u001b[0mgeneration_config\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'generation_types.GenerationConfigType | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", | ||
"\u001b[0;34m\u001b[0m \u001b[0msafety_settings\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'safety_types.SafetySettingOptions | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", | ||
"\u001b[0;34m\u001b[0m \u001b[0mstream\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'bool'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", | ||
"\u001b[0;34m\u001b[0m \u001b[0mtools\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'content_types.FunctionLibraryType | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", | ||
"\u001b[0;34m\u001b[0m \u001b[0mtool_config\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'content_types.ToolConfigType | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", | ||
"\u001b[0;34m\u001b[0m \u001b[0mrequest_options\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m'helper_types.RequestOptionsType | None'\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\n", | ||
"\u001b[0;34m\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;34m'generation_types.GenerateContentResponse'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | ||
"\u001b[0;31mDocstring:\u001b[0m\n", | ||
"A multipurpose function to generate responses from the model.\n", | ||
"\n", | ||
"This `GenerativeModel.generate_content` method can handle multimodal input, and multi-turn\n", | ||
"conversations.\n", | ||
"\n", | ||
">>> model = genai.GenerativeModel('models/gemini-pro')\n", | ||
">>> response = model.generate_content('Tell me a story about a magic backpack')\n", | ||
">>> response.text\n", | ||
"\n", | ||
"### Streaming\n", | ||
"\n", | ||
"This method supports streaming with the `stream=True`. The result has the same type as the non streaming case,\n", | ||
"but you can iterate over the response chunks as they become available:\n", | ||
"\n", | ||
">>> response = model.generate_content('Tell me a story about a magic backpack', stream=True)\n", | ||
">>> for chunk in response:\n", | ||
"... print(chunk.text)\n", | ||
"\n", | ||
"### Multi-turn\n", | ||
"\n", | ||
"This method supports multi-turn chats but is **stateless**: the entire conversation history needs to be sent with each\n", | ||
"request. This takes some manual management but gives you complete control:\n", | ||
"\n", | ||
">>> messages = [{'role':'user', 'parts': ['hello']}]\n", | ||
">>> response = model.generate_content(messages) # \"Hello, how can I help\"\n", | ||
">>> messages.append(response.candidates[0].content)\n", | ||
">>> messages.append({'role':'user', 'parts': ['How does quantum physics work?']})\n", | ||
">>> response = model.generate_content(messages)\n", | ||
"\n", | ||
"For a simpler multi-turn interface see `GenerativeModel.start_chat`.\n", | ||
"\n", | ||
"### Input type flexibility\n", | ||
"\n", | ||
"While the underlying API strictly expects a `list[protos.Content]` objects, this method\n", | ||
"will convert the user input into the correct type. The hierarchy of types that can be\n", | ||
"converted is below. Any of these objects can be passed as an equivalent `dict`.\n", | ||
"\n", | ||
"* `Iterable[protos.Content]`\n", | ||
"* `protos.Content`\n", | ||
"* `Iterable[protos.Part]`\n", | ||
"* `protos.Part`\n", | ||
"* `str`, `Image`, or `protos.Blob`\n", | ||
"\n", | ||
"In an `Iterable[protos.Content]` each `content` is a separate message.\n", | ||
"But note that an `Iterable[protos.Part]` is taken as the parts of a single message.\n", | ||
"\n", | ||
"Arguments:\n", | ||
" contents: The contents serving as the model's prompt.\n", | ||
" generation_config: Overrides for the model's generation config.\n", | ||
" safety_settings: Overrides for the model's safety settings.\n", | ||
" stream: If True, yield response chunks as they are generated.\n", | ||
" tools: `protos.Tools` more info coming soon.\n", | ||
" request_options: Options for the request.\n", | ||
"\u001b[0;31mFile:\u001b[0m ~/opt/anaconda3/envs/codeaide/lib/python3.11/site-packages/google/generativeai/generative_models.py\n", | ||
"\u001b[0;31mType:\u001b[0m method" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"# Define the system prompt (context)\n", | ||
"system_prompt = \"\"\"You are an AI assistant specialized in explaining complex topics in simple terms.\n", | ||
"Your responses should be clear, concise, and easy to understand for a general audience.\"\"\"\n", | ||
"\n", | ||
"# Define the user's question\n", | ||
"user_question = \"Can you explain quantum computing in simple terms?\"\n", | ||
"\n", | ||
"# Set up the chat\n", | ||
"chat = model.start_chat(context=system_prompt)\n", | ||
"\n", | ||
"# Generate a response\n", | ||
"response = chat.send_message(user_question)\n", | ||
"\n", | ||
"# Print the response\n", | ||
"print(response.text)\n", | ||
"\n", | ||
"# You can continue the conversation\n", | ||
"follow_up = chat.send_message(\"How does that compare to classical computing?\")\n", | ||
"print(follow_up.text)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "codeaide", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.11.10" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 2 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
""" | ||
This is a little sandbox script to test out the Gemini API. | ||
""" | ||
|
||
import argparse | ||
from decouple import config | ||
import google.generativeai as genai | ||
|
||
genai.configure(api_key=config("GEMINI_API_KEY")) | ||
|
||
model = genai.GenerativeModel("gemini-1.5-flash") | ||
|
||
|
||
def generate_a_story(): | ||
response = model.generate_content("Write a story about a magic backpack.") | ||
print(response.text) | ||
|
||
|
||
def request_code(): | ||
response = model.generate_content( | ||
"Write a Python function to calculate the Fibonacci sequence." | ||
) | ||
print(response.text) | ||
|
||
|
||
if __name__ == "__main__": | ||
parser = argparse.ArgumentParser(description="Gemini API prototype script") | ||
parser.add_argument( | ||
"action", | ||
choices=["story", "code"], | ||
help="Action to perform: generate a story or request code", | ||
) | ||
|
||
args = parser.parse_args() | ||
|
||
if args.action == "story": | ||
generate_a_story() | ||
elif args.action == "code": | ||
request_code() |