From 754a6fbbd8b6344f0edfd82b9797ad5d65886fd0 Mon Sep 17 00:00:00 2001 From: fern-api <115122769+fern-api[bot]@users.noreply.github.com> Date: Wed, 11 Sep 2024 13:28:16 +0000 Subject: [PATCH] Release 0.0.1-beta13 --- pyproject.toml | 2 +- reference.md | 2376 +++----- src/gooey/__init__.py | 165 +- src/gooey/client.py | 4797 ++++++++++------- src/gooey/copilot/__init__.py | 47 + .../client.py | 152 +- src/gooey/copilot/types/__init__.py | 49 + .../copilot_completion_request_asr_model.py} | 2 +- ...ilot_completion_request_citation_style.py} | 2 +- ...lot_completion_request_embedding_model.py} | 2 +- ...ilot_completion_request_functions_item.py} | 6 +- ...mpletion_request_functions_item_trigger.py | 5 + ...opilot_completion_request_lipsync_model.py | 5 + ...pilot_completion_request_messages_item.py} | 10 +- ...ompletion_request_messages_item_content.py | 8 + ...ion_request_messages_item_content_item.py} | 8 +- ...t_completion_request_messages_item_role.py | 5 + ...lot_completion_request_openai_tts_model.py | 5 + ...t_completion_request_openai_voice_name.py} | 2 +- ...completion_request_response_format_type.py | 5 + ..._completion_request_sadtalker_settings.py} | 8 +- ..._request_sadtalker_settings_preprocess.py} | 2 +- ...ilot_completion_request_selected_model.py} | 2 +- ...ot_completion_request_translation_model.py | 5 + ...opilot_completion_request_tts_provider.py} | 2 +- .../copilot_for_your_enterprise/__init__.py | 47 - .../types/__init__.py | 47 - ...deo_bots_request_functions_item_trigger.py | 5 - .../async_video_bots_request_lipsync_model.py | 5 - ...ideo_bots_request_messages_item_content.py | 6 - ...c_video_bots_request_messages_item_role.py | 5 - ...ync_video_bots_request_openai_tts_model.py | 5 - ...video_bots_request_response_format_type.py | 5 - ...nc_video_bots_request_translation_model.py | 5 - src/gooey/copilot_integrations/__init__.py | 29 - src/gooey/copilot_integrations/client.py | 921 ---- .../copilot_integrations/types/__init__.py | 27 - .../types/video_bots_stream_response.py | 10 - src/gooey/core/client_wrapper.py | 2 +- src/gooey/errors/payment_required_error.py | 4 +- src/gooey/evaluator/__init__.py | 21 - src/gooey/evaluator/client.py | 342 -- src/gooey/evaluator/types/__init__.py | 19 - src/gooey/functions/__init__.py | 2 - src/gooey/functions/client.py | 231 - src/gooey/lip_syncing/__init__.py | 17 - src/gooey/lip_syncing/client.py | 305 -- src/gooey/lip_syncing/types/__init__.py | 15 - ..._lipsync_request_functions_item_trigger.py | 5 - .../async_lipsync_request_selected_model.py | 5 - src/gooey/smart_gpt/__init__.py | 15 - src/gooey/smart_gpt/client.py | 324 -- src/gooey/smart_gpt/types/__init__.py | 13 - src/gooey/types/__init__.py | 54 + ...lk_eval_page_request_agg_functions_item.py | 4 +- ...age_request_agg_functions_item_function.py | 0 ...ulk_eval_page_request_eval_prompts_item.py | 4 +- .../bulk_eval_page_request_functions_item.py | 4 +- ...val_page_request_functions_item_trigger.py | 0 ..._eval_page_request_response_format_type.py | 0 .../bulk_eval_page_request_selected_model.py | 0 src/gooey/types/create_stream_request.py | 175 + .../types/create_stream_request_asr_model.py | 0 .../create_stream_request_citation_style.py | 0 .../create_stream_request_embedding_model.py | 0 .../create_stream_request_lipsync_model.py | 0 .../create_stream_request_openai_tts_model.py | 0 ...create_stream_request_openai_voice_name.py | 0 ...ate_stream_request_response_format_type.py | 0 .../create_stream_request_selected_model.py | 0 ...create_stream_request_translation_model.py | 0 .../create_stream_request_tts_provider.py | 0 .../lipsync_request_functions_item.py} | 10 +- .../lipsync_request_functions_item_trigger.py | 5 + .../lipsync_request_sadtalker_settings.py} | 10 +- ..._request_sadtalker_settings_preprocess.py} | 2 +- .../types/lipsync_request_selected_model.py | 5 + .../smart_gpt_page_request_functions_item.py | 4 +- ...gpt_page_request_functions_item_trigger.py | 0 ...t_gpt_page_request_response_format_type.py | 0 .../smart_gpt_page_request_selected_model.py | 0 81 files changed, 4361 insertions(+), 6025 deletions(-) create mode 100644 src/gooey/copilot/__init__.py rename src/gooey/{copilot_for_your_enterprise => copilot}/client.py (81%) create mode 100644 src/gooey/copilot/types/__init__.py rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_asr_model.py => copilot/types/copilot_completion_request_asr_model.py} (90%) rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_citation_style.py => copilot/types/copilot_completion_request_citation_style.py} (90%) rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_embedding_model.py => copilot/types/copilot_completion_request_embedding_model.py} (86%) rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_functions_item.py => copilot/types/copilot_completion_request_functions_item.py} (71%) create mode 100644 src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py create mode 100644 src/gooey/copilot/types/copilot_completion_request_lipsync_model.py rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_messages_item.py => copilot/types/copilot_completion_request_messages_item.py} (59%) create mode 100644 src/gooey/copilot/types/copilot_completion_request_messages_item_content.py rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content_item.py => copilot/types/copilot_completion_request_messages_item_content_item.py} (75%) create mode 100644 src/gooey/copilot/types/copilot_completion_request_messages_item_role.py create mode 100644 src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_openai_voice_name.py => copilot/types/copilot_completion_request_openai_voice_name.py} (74%) create mode 100644 src/gooey/copilot/types/copilot_completion_request_response_format_type.py rename src/gooey/{lip_syncing/types/async_lipsync_request_sadtalker_settings.py => copilot/types/copilot_completion_request_sadtalker_settings.py} (83%) rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings_preprocess.py => copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py} (70%) rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_selected_model.py => copilot/types/copilot_completion_request_selected_model.py} (95%) create mode 100644 src/gooey/copilot/types/copilot_completion_request_translation_model.py rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_tts_provider.py => copilot/types/copilot_completion_request_tts_provider.py} (78%) delete mode 100644 src/gooey/copilot_for_your_enterprise/__init__.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/__init__.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item_trigger.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_lipsync_model.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_role.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_tts_model.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_response_format_type.py delete mode 100644 src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_translation_model.py delete mode 100644 src/gooey/copilot_integrations/__init__.py delete mode 100644 src/gooey/copilot_integrations/client.py delete mode 100644 src/gooey/copilot_integrations/types/__init__.py delete mode 100644 src/gooey/copilot_integrations/types/video_bots_stream_response.py delete mode 100644 src/gooey/evaluator/__init__.py delete mode 100644 src/gooey/evaluator/client.py delete mode 100644 src/gooey/evaluator/types/__init__.py delete mode 100644 src/gooey/functions/__init__.py delete mode 100644 src/gooey/functions/client.py delete mode 100644 src/gooey/lip_syncing/__init__.py delete mode 100644 src/gooey/lip_syncing/client.py delete mode 100644 src/gooey/lip_syncing/types/__init__.py delete mode 100644 src/gooey/lip_syncing/types/async_lipsync_request_functions_item_trigger.py delete mode 100644 src/gooey/lip_syncing/types/async_lipsync_request_selected_model.py delete mode 100644 src/gooey/smart_gpt/__init__.py delete mode 100644 src/gooey/smart_gpt/client.py delete mode 100644 src/gooey/smart_gpt/types/__init__.py rename src/gooey/{evaluator => }/types/bulk_eval_page_request_agg_functions_item.py (85%) rename src/gooey/{evaluator => }/types/bulk_eval_page_request_agg_functions_item_function.py (100%) rename src/gooey/{evaluator => }/types/bulk_eval_page_request_eval_prompts_item.py (81%) rename src/gooey/{evaluator => }/types/bulk_eval_page_request_functions_item.py (86%) rename src/gooey/{evaluator => }/types/bulk_eval_page_request_functions_item_trigger.py (100%) rename src/gooey/{evaluator => }/types/bulk_eval_page_request_response_format_type.py (100%) rename src/gooey/{evaluator => }/types/bulk_eval_page_request_selected_model.py (100%) create mode 100644 src/gooey/types/create_stream_request.py rename src/gooey/{copilot_integrations => }/types/create_stream_request_asr_model.py (100%) rename src/gooey/{copilot_integrations => }/types/create_stream_request_citation_style.py (100%) rename src/gooey/{copilot_integrations => }/types/create_stream_request_embedding_model.py (100%) rename src/gooey/{copilot_integrations => }/types/create_stream_request_lipsync_model.py (100%) rename src/gooey/{copilot_integrations => }/types/create_stream_request_openai_tts_model.py (100%) rename src/gooey/{copilot_integrations => }/types/create_stream_request_openai_voice_name.py (100%) rename src/gooey/{copilot_integrations => }/types/create_stream_request_response_format_type.py (100%) rename src/gooey/{copilot_integrations => }/types/create_stream_request_selected_model.py (100%) rename src/gooey/{copilot_integrations => }/types/create_stream_request_translation_model.py (100%) rename src/gooey/{copilot_integrations => }/types/create_stream_request_tts_provider.py (100%) rename src/gooey/{lip_syncing/types/async_lipsync_request_functions_item.py => types/lipsync_request_functions_item.py} (59%) create mode 100644 src/gooey/types/lipsync_request_functions_item_trigger.py rename src/gooey/{copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings.py => types/lipsync_request_sadtalker_settings.py} (78%) rename src/gooey/{lip_syncing/types/async_lipsync_request_sadtalker_settings_preprocess.py => types/lipsync_request_sadtalker_settings_preprocess.py} (71%) create mode 100644 src/gooey/types/lipsync_request_selected_model.py rename src/gooey/{smart_gpt => }/types/smart_gpt_page_request_functions_item.py (86%) rename src/gooey/{smart_gpt => }/types/smart_gpt_page_request_functions_item_trigger.py (100%) rename src/gooey/{smart_gpt => }/types/smart_gpt_page_request_response_format_type.py (100%) rename src/gooey/{smart_gpt => }/types/smart_gpt_page_request_selected_model.py (100%) diff --git a/pyproject.toml b/pyproject.toml index 989ad22..52a220c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gooeyai" -version = "0.0.1-beta12" +version = "0.0.1-beta13" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index b905fb3..6f7dda4 100644 --- a/reference.md +++ b/reference.md @@ -1496,6 +1496,181 @@ _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the result + + + + +
client.eval(...) +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.eval( + documents=["documents"], +) + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**documents:** `typing.Sequence[str]` + + +Upload or link to a CSV or google sheet that contains your sample input data. +For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. +Remember to includes header names in your CSV too. + + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]` + +
+
+ +
+
+ +**functions:** `typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]]` + +
+
+ +
+
+ +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments + +
+
+ +
+
+ +**eval_prompts:** `typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]]` + + +Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. +_The `columns` dictionary can be used to reference the spreadsheet columns._ + + +
+
+ +
+
+ +**agg_functions:** `typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]]` + + +Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + + +
+
+ +
+
+ +**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]` + +
+
+ +
+
+ +**avoid_repetition:** `typing.Optional[bool]` + +
+
+ +
+
+ +**num_outputs:** `typing.Optional[int]` + +
+
+ +
+
+ +**quality:** `typing.Optional[float]` + +
+
+ +
+
+ +**max_tokens:** `typing.Optional[int]` + +
+
+ +
+
+ +**sampling_temperature:** `typing.Optional[float]` + +
+
+ +
+
+ +**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]` + +
+
+ +
+
+ +**settings:** `typing.Optional[RunSettings]` + +
+
+ +
+
+ +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. + +
+
+
+
+ +
@@ -2067,7 +2242,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q -
client.doc_summary(...) +
client.smart_gpt(...)
@@ -2085,7 +2260,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.doc_summary() +client.smart_gpt( + input_prompt="input_prompt", +) ```
@@ -2101,9 +2278,7 @@ client.doc_summary()
-**documents:** `from __future__ import annotations - -typing.List[core.File]` — See core.File for more documentation +**input_prompt:** `str`
@@ -2119,7 +2294,7 @@ typing.List[core.File]` — See core.File for more documentation
-**functions:** `typing.Optional[typing.List[DocSummaryRequestFunctionsItem]]` +**functions:** `typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]]`
@@ -2135,23 +2310,7 @@ typing.List[core.File]` — See core.File for more documentation
-**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**merge_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[DocSummaryRequestSelectedModel]` +**cot_prompt:** `typing.Optional[str]`
@@ -2159,7 +2318,7 @@ typing.List[core.File]` — See core.File for more documentation
-**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]` +**reflexion_prompt:** `typing.Optional[str]`
@@ -2167,7 +2326,7 @@ typing.List[core.File]` — See core.File for more documentation
-**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]` +**dera_prompt:** `typing.Optional[str]`
@@ -2175,7 +2334,7 @@ typing.List[core.File]` — See core.File for more documentation
-**google_translate_target:** `typing.Optional[str]` +**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]`
@@ -2223,7 +2382,7 @@ typing.List[core.File]` — See core.File for more documentation
-**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]` +**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]`
@@ -2251,7 +2410,7 @@ typing.List[core.File]` — See core.File for more documentation
-
client.lipsync_tts(...) +
client.doc_summary(...)
@@ -2269,9 +2428,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.lipsync_tts( - text_prompt="text_prompt", -) +client.doc_summary() ```
@@ -2287,7 +2444,9 @@ client.lipsync_tts(
-**text_prompt:** `str` +**documents:** `from __future__ import annotations + +typing.List[core.File]` — See core.File for more documentation
@@ -2303,7 +2462,7 @@ client.lipsync_tts(
-**functions:** `typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]]` +**functions:** `typing.Optional[typing.List[DocSummaryRequestFunctionsItem]]`
@@ -2319,7 +2478,7 @@ client.lipsync_tts(
-**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]` +**task_instructions:** `typing.Optional[str]`
@@ -2327,7 +2486,7 @@ client.lipsync_tts(
-**uberduck_voice_name:** `typing.Optional[str]` +**merge_instructions:** `typing.Optional[str]`
@@ -2335,7 +2494,7 @@ client.lipsync_tts(
-**uberduck_speaking_rate:** `typing.Optional[float]` +**selected_model:** `typing.Optional[DocSummaryRequestSelectedModel]`
@@ -2343,7 +2502,7 @@ client.lipsync_tts(
-**google_voice_name:** `typing.Optional[str]` +**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]`
@@ -2351,7 +2510,7 @@ client.lipsync_tts(
-**google_speaking_rate:** `typing.Optional[float]` +**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]`
@@ -2359,7 +2518,7 @@ client.lipsync_tts(
-**google_pitch:** `typing.Optional[float]` +**google_translate_target:** `typing.Optional[str]`
@@ -2367,7 +2526,7 @@ client.lipsync_tts(
-**bark_history_prompt:** `typing.Optional[str]` +**avoid_repetition:** `typing.Optional[bool]`
@@ -2375,7 +2534,7 @@ client.lipsync_tts(
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead +**num_outputs:** `typing.Optional[int]`
@@ -2383,7 +2542,7 @@ client.lipsync_tts(
-**elevenlabs_api_key:** `typing.Optional[str]` +**quality:** `typing.Optional[float]`
@@ -2391,7 +2550,7 @@ client.lipsync_tts(
-**elevenlabs_voice_id:** `typing.Optional[str]` +**max_tokens:** `typing.Optional[int]`
@@ -2399,7 +2558,7 @@ client.lipsync_tts(
-**elevenlabs_model:** `typing.Optional[str]` +**sampling_temperature:** `typing.Optional[float]`
@@ -2407,7 +2566,7 @@ client.lipsync_tts(
-**elevenlabs_stability:** `typing.Optional[float]` +**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]`
@@ -2415,7 +2574,7 @@ client.lipsync_tts(
-**elevenlabs_similarity_boost:** `typing.Optional[float]` +**settings:** `typing.Optional[RunSettings]`
@@ -2423,81 +2582,53 @@ client.lipsync_tts(
-**elevenlabs_style:** `typing.Optional[float]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
-
-
-**elevenlabs_speaker_boost:** `typing.Optional[bool]` -
+
+
client.functions(...)
-**azure_voice_name:** `typing.Optional[str]` - -
-
+#### 🔌 Usage
-**openai_voice_name:** `typing.Optional[LipsyncTtsRequestOpenaiVoiceName]` - -
-
-
-**openai_tts_model:** `typing.Optional[LipsyncTtsRequestOpenaiTtsModel]` - -
-
- -
-
+```python +from gooey import Gooey -**input_face:** `from __future__ import annotations +client = Gooey( + api_key="YOUR_API_KEY", +) +client.functions() -typing.Optional[core.File]` — See core.File for more documentation - +```
- -
-
- -**face_padding_top:** `typing.Optional[int]` -
-
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
+#### ⚙️ Parameters
-**face_padding_left:** `typing.Optional[int]` - -
-
-
-**face_padding_right:** `typing.Optional[int]` +**example_id:** `typing.Optional[str]`
@@ -2505,7 +2636,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**sadtalker_settings:** `typing.Optional[LipsyncTtsRequestSadtalkerSettings]` +**code:** `typing.Optional[str]` — The JS code to be executed.
@@ -2513,7 +2644,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used in the code
@@ -2541,7 +2672,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
client.text_to_speech(...) +
client.lipsync(...)
@@ -2559,9 +2690,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.text_to_speech( - text_prompt="text_prompt", -) +client.lipsync() ```
@@ -2577,7 +2706,7 @@ client.text_to_speech(
-**text_prompt:** `str` +**example_id:** `typing.Optional[str]`
@@ -2585,7 +2714,7 @@ client.text_to_speech(
-**example_id:** `typing.Optional[str]` +**functions:** `typing.Optional[typing.List[LipsyncRequestFunctionsItem]]`
@@ -2593,7 +2722,7 @@ client.text_to_speech(
-**functions:** `typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2601,7 +2730,9 @@ client.text_to_speech(
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**input_face:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
@@ -2609,7 +2740,7 @@ client.text_to_speech(
-**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]` +**face_padding_top:** `typing.Optional[int]`
@@ -2617,7 +2748,7 @@ client.text_to_speech(
-**uberduck_voice_name:** `typing.Optional[str]` +**face_padding_bottom:** `typing.Optional[int]`
@@ -2625,7 +2756,7 @@ client.text_to_speech(
-**uberduck_speaking_rate:** `typing.Optional[float]` +**face_padding_left:** `typing.Optional[int]`
@@ -2633,7 +2764,7 @@ client.text_to_speech(
-**google_voice_name:** `typing.Optional[str]` +**face_padding_right:** `typing.Optional[int]`
@@ -2641,7 +2772,7 @@ client.text_to_speech(
-**google_speaking_rate:** `typing.Optional[float]` +**sadtalker_settings:** `typing.Optional[LipsyncRequestSadtalkerSettings]`
@@ -2649,7 +2780,7 @@ client.text_to_speech(
-**google_pitch:** `typing.Optional[float]` +**selected_model:** `typing.Optional[LipsyncRequestSelectedModel]`
@@ -2657,7 +2788,9 @@ client.text_to_speech(
-**bark_history_prompt:** `typing.Optional[str]` +**input_audio:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
@@ -2665,7 +2798,7 @@ client.text_to_speech(
-**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead +**settings:** `typing.Optional[RunSettings]`
@@ -2673,23 +2806,55 @@ client.text_to_speech(
-**elevenlabs_api_key:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ +
+ + + + +
+
client.lipsync_tts(...)
-**elevenlabs_voice_id:** `typing.Optional[str]` - +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.lipsync_tts( + text_prompt="text_prompt", +) + +``` +
+
+#### ⚙️ Parameters +
-**elevenlabs_model:** `typing.Optional[str]` +
+
+ +**text_prompt:** `str`
@@ -2697,7 +2862,7 @@ client.text_to_speech(
-**elevenlabs_stability:** `typing.Optional[float]` +**example_id:** `typing.Optional[str]`
@@ -2705,7 +2870,7 @@ client.text_to_speech(
-**elevenlabs_similarity_boost:** `typing.Optional[float]` +**functions:** `typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]]`
@@ -2713,7 +2878,7 @@ client.text_to_speech(
-**elevenlabs_style:** `typing.Optional[float]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2721,7 +2886,7 @@ client.text_to_speech(
-**elevenlabs_speaker_boost:** `typing.Optional[bool]` +**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]`
@@ -2729,7 +2894,7 @@ client.text_to_speech(
-**azure_voice_name:** `typing.Optional[str]` +**uberduck_voice_name:** `typing.Optional[str]`
@@ -2737,7 +2902,7 @@ client.text_to_speech(
-**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]` +**uberduck_speaking_rate:** `typing.Optional[float]`
@@ -2745,7 +2910,7 @@ client.text_to_speech(
-**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]` +**google_voice_name:** `typing.Optional[str]`
@@ -2753,7 +2918,7 @@ client.text_to_speech(
-**settings:** `typing.Optional[RunSettings]` +**google_speaking_rate:** `typing.Optional[float]`
@@ -2761,55 +2926,71 @@ client.text_to_speech(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**google_pitch:** `typing.Optional[float]`
-
-
+
+
+**bark_history_prompt:** `typing.Optional[str]` +
-
-
client.speech_recognition(...)
-#### 🔌 Usage +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead + +
+
+**elevenlabs_api_key:** `typing.Optional[str]` + +
+
+
-```python -from gooey import Gooey +**elevenlabs_voice_id:** `typing.Optional[str]` + +
+
-client = Gooey( - api_key="YOUR_API_KEY", -) -client.speech_recognition() +
+
-``` +**elevenlabs_model:** `typing.Optional[str]` +
+ +
+
+ +**elevenlabs_stability:** `typing.Optional[float]` +
-#### ⚙️ Parameters -
+**elevenlabs_similarity_boost:** `typing.Optional[float]` + +
+
+
-**documents:** `from __future__ import annotations - -typing.List[core.File]` — See core.File for more documentation +**elevenlabs_style:** `typing.Optional[float]`
@@ -2817,7 +2998,7 @@ typing.List[core.File]` — See core.File for more documentation
-**example_id:** `typing.Optional[str]` +**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -2825,7 +3006,7 @@ typing.List[core.File]` — See core.File for more documentation
-**functions:** `typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]]` +**azure_voice_name:** `typing.Optional[str]`
@@ -2833,7 +3014,7 @@ typing.List[core.File]` — See core.File for more documentation
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**openai_voice_name:** `typing.Optional[LipsyncTtsRequestOpenaiVoiceName]`
@@ -2841,7 +3022,7 @@ typing.List[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]` +**openai_tts_model:** `typing.Optional[LipsyncTtsRequestOpenaiTtsModel]`
@@ -2849,7 +3030,9 @@ typing.List[core.File]` — See core.File for more documentation
-**language:** `typing.Optional[str]` +**input_face:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
@@ -2857,7 +3040,7 @@ typing.List[core.File]` — See core.File for more documentation
-**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]` +**face_padding_top:** `typing.Optional[int]`
@@ -2865,7 +3048,7 @@ typing.List[core.File]` — See core.File for more documentation
-**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]` +**face_padding_bottom:** `typing.Optional[int]`
@@ -2873,7 +3056,7 @@ typing.List[core.File]` — See core.File for more documentation
-**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead. +**face_padding_left:** `typing.Optional[int]`
@@ -2881,7 +3064,7 @@ typing.List[core.File]` — See core.File for more documentation
-**translation_source:** `typing.Optional[str]` +**face_padding_right:** `typing.Optional[int]`
@@ -2889,7 +3072,7 @@ typing.List[core.File]` — See core.File for more documentation
-**translation_target:** `typing.Optional[str]` +**sadtalker_settings:** `typing.Optional[LipsyncTtsRequestSadtalkerSettings]`
@@ -2897,9 +3080,7 @@ typing.List[core.File]` — See core.File for more documentation
-**glossary_document:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]`
@@ -2927,7 +3108,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
client.text_to_music(...) +
client.text_to_speech(...)
@@ -2945,7 +3126,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.text_to_music( +client.text_to_speech( text_prompt="text_prompt", ) @@ -2979,7 +3160,7 @@ client.text_to_music(
-**functions:** `typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]]` +**functions:** `typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]]`
@@ -2995,7 +3176,7 @@ client.text_to_music(
-**negative_prompt:** `typing.Optional[str]` +**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]`
@@ -3003,7 +3184,7 @@ client.text_to_music(
-**duration_sec:** `typing.Optional[float]` +**uberduck_voice_name:** `typing.Optional[str]`
@@ -3011,7 +3192,7 @@ client.text_to_music(
-**num_outputs:** `typing.Optional[int]` +**uberduck_speaking_rate:** `typing.Optional[float]`
@@ -3019,7 +3200,7 @@ client.text_to_music(
-**quality:** `typing.Optional[int]` +**google_voice_name:** `typing.Optional[str]`
@@ -3027,7 +3208,7 @@ client.text_to_music(
-**guidance_scale:** `typing.Optional[float]` +**google_speaking_rate:** `typing.Optional[float]`
@@ -3035,7 +3216,7 @@ client.text_to_music(
-**seed:** `typing.Optional[int]` +**google_pitch:** `typing.Optional[float]`
@@ -3043,7 +3224,7 @@ client.text_to_music(
-**sd2upscaling:** `typing.Optional[bool]` +**bark_history_prompt:** `typing.Optional[str]`
@@ -3051,7 +3232,7 @@ client.text_to_music(
-**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]` +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
@@ -3059,7 +3240,7 @@ client.text_to_music(
-**settings:** `typing.Optional[RunSettings]` +**elevenlabs_api_key:** `typing.Optional[str]`
@@ -3067,69 +3248,31 @@ client.text_to_music(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**elevenlabs_voice_id:** `typing.Optional[str]`
-
-
+
+
+**elevenlabs_model:** `typing.Optional[str]` +
-
-
client.translate(...)
-#### 🔌 Usage - -
-
+**elevenlabs_stability:** `typing.Optional[float]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.translate() - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**example_id:** `typing.Optional[str]` - -
-
- -
-
- -**functions:** `typing.Optional[typing.List[TranslateRequestFunctionsItem]]` - -
-
- -
-
- -**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**elevenlabs_similarity_boost:** `typing.Optional[float]`
@@ -3137,7 +3280,7 @@ client.translate()
-**texts:** `typing.Optional[typing.List[str]]` +**elevenlabs_style:** `typing.Optional[float]`
@@ -3145,7 +3288,7 @@ client.translate()
-**selected_model:** `typing.Optional[TranslateRequestSelectedModel]` +**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -3153,7 +3296,7 @@ client.translate()
-**translation_source:** `typing.Optional[str]` +**azure_voice_name:** `typing.Optional[str]`
@@ -3161,7 +3304,7 @@ client.translate()
-**translation_target:** `typing.Optional[str]` +**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]`
@@ -3169,9 +3312,7 @@ client.translate()
-**glossary_document:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]`
@@ -3199,7 +3340,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
client.remix_image(...) +
client.speech_recognition(...)
@@ -3217,7 +3358,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.remix_image() +client.speech_recognition() ```
@@ -3233,9 +3374,9 @@ client.remix_image()
-**input_image:** `from __future__ import annotations +**documents:** `from __future__ import annotations -core.File` — See core.File for more documentation +typing.List[core.File]` — See core.File for more documentation
@@ -3251,7 +3392,7 @@ core.File` — See core.File for more documentation
-**functions:** `typing.Optional[typing.List[RemixImageRequestFunctionsItem]]` +**functions:** `typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]]`
@@ -3267,39 +3408,7 @@ core.File` — See core.File for more documentation
-**text_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]` - -
-
- -
-
- -**selected_controlnet_model:** `typing.Optional[RemixImageRequestSelectedControlnetModel]` - -
-
- -
-
- -**negative_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` +**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]`
@@ -3307,7 +3416,7 @@ core.File` — See core.File for more documentation
-**quality:** `typing.Optional[int]` +**language:** `typing.Optional[str]`
@@ -3315,7 +3424,7 @@ core.File` — See core.File for more documentation
-**output_width:** `typing.Optional[int]` +**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]`
@@ -3323,7 +3432,7 @@ core.File` — See core.File for more documentation
-**output_height:** `typing.Optional[int]` +**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]`
@@ -3331,7 +3440,7 @@ core.File` — See core.File for more documentation
-**guidance_scale:** `typing.Optional[float]` +**google_translate_target:** `typing.Optional[str]` — use `translation_model` & `translation_target` instead.
@@ -3339,7 +3448,7 @@ core.File` — See core.File for more documentation
-**prompt_strength:** `typing.Optional[float]` +**translation_source:** `typing.Optional[str]`
@@ -3347,7 +3456,7 @@ core.File` — See core.File for more documentation
-**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]` +**translation_target:** `typing.Optional[str]`
@@ -3355,15 +3464,9 @@ core.File` — See core.File for more documentation
-**seed:** `typing.Optional[int]` - -
-
- -
-
+**glossary_document:** `from __future__ import annotations -**image_guidance_scale:** `typing.Optional[float]` +typing.Optional[core.File]` — See core.File for more documentation
@@ -3391,7 +3494,7 @@ core.File` — See core.File for more documentation
-
client.text_to_image(...) +
client.text_to_music(...)
@@ -3409,7 +3512,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.text_to_image( +client.text_to_music( text_prompt="text_prompt", ) @@ -3443,7 +3546,7 @@ client.text_to_image(
-**functions:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]]` +**functions:** `typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]]`
@@ -3467,7 +3570,7 @@ client.text_to_image(
-**output_width:** `typing.Optional[int]` +**duration_sec:** `typing.Optional[float]`
@@ -3475,7 +3578,7 @@ client.text_to_image(
-**output_height:** `typing.Optional[int]` +**num_outputs:** `typing.Optional[int]`
@@ -3483,7 +3586,7 @@ client.text_to_image(
-**num_outputs:** `typing.Optional[int]` +**quality:** `typing.Optional[int]`
@@ -3491,7 +3594,7 @@ client.text_to_image(
-**quality:** `typing.Optional[int]` +**guidance_scale:** `typing.Optional[float]`
@@ -3499,7 +3602,7 @@ client.text_to_image(
-**dall_e3quality:** `typing.Optional[str]` +**seed:** `typing.Optional[int]`
@@ -3507,7 +3610,7 @@ client.text_to_image(
-**dall_e3style:** `typing.Optional[str]` +**sd2upscaling:** `typing.Optional[bool]`
@@ -3515,7 +3618,7 @@ client.text_to_image(
-**guidance_scale:** `typing.Optional[float]` +**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]`
@@ -3523,7 +3626,7 @@ client.text_to_image(
-**seed:** `typing.Optional[int]` +**settings:** `typing.Optional[RunSettings]`
@@ -3531,15 +3634,53 @@ client.text_to_image(
-**sd2upscaling:** `typing.Optional[bool]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
+ + + +
+ +
client.translate(...)
-**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]` +#### 🔌 Usage + +
+
+ +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.translate() + +``` +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**example_id:** `typing.Optional[str]`
@@ -3547,7 +3688,7 @@ client.text_to_image(
-**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]` +**functions:** `typing.Optional[typing.List[TranslateRequestFunctionsItem]]`
@@ -3555,7 +3696,7 @@ client.text_to_image(
-**edit_instruction:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3563,7 +3704,41 @@ client.text_to_image(
-**image_guidance_scale:** `typing.Optional[float]` +**texts:** `typing.Optional[typing.List[str]]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[TranslateRequestSelectedModel]` + +
+
+ +
+
+ +**translation_source:** `typing.Optional[str]` + +
+
+ +
+
+ +**translation_target:** `typing.Optional[str]` + +
+
+ +
+
+ +**glossary_document:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
@@ -3591,7 +3766,7 @@ client.text_to_image(
-
client.product_image(...) +
client.remix_image(...)
@@ -3609,9 +3784,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.product_image( - text_prompt="text_prompt", -) +client.remix_image() ```
@@ -3637,7 +3810,7 @@ core.File` — See core.File for more documentation
-**text_prompt:** `str` +**example_id:** `typing.Optional[str]`
@@ -3645,7 +3818,7 @@ core.File` — See core.File for more documentation
-**example_id:** `typing.Optional[str]` +**functions:** `typing.Optional[typing.List[RemixImageRequestFunctionsItem]]`
@@ -3653,7 +3826,7 @@ core.File` — See core.File for more documentation
-**functions:** `typing.Optional[typing.List[ProductImageRequestFunctionsItem]]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3661,7 +3834,7 @@ core.File` — See core.File for more documentation
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**text_prompt:** `typing.Optional[str]`
@@ -3669,7 +3842,7 @@ core.File` — See core.File for more documentation
-**obj_scale:** `typing.Optional[float]` +**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]`
@@ -3677,7 +3850,7 @@ core.File` — See core.File for more documentation
-**obj_pos_x:** `typing.Optional[float]` +**selected_controlnet_model:** `typing.Optional[RemixImageRequestSelectedControlnetModel]`
@@ -3685,7 +3858,7 @@ core.File` — See core.File for more documentation
-**obj_pos_y:** `typing.Optional[float]` +**negative_prompt:** `typing.Optional[str]`
@@ -3693,7 +3866,7 @@ core.File` — See core.File for more documentation
-**mask_threshold:** `typing.Optional[float]` +**num_outputs:** `typing.Optional[int]`
@@ -3701,7 +3874,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]` +**quality:** `typing.Optional[int]`
@@ -3709,7 +3882,7 @@ core.File` — See core.File for more documentation
-**negative_prompt:** `typing.Optional[str]` +**output_width:** `typing.Optional[int]`
@@ -3717,7 +3890,7 @@ core.File` — See core.File for more documentation
-**num_outputs:** `typing.Optional[int]` +**output_height:** `typing.Optional[int]`
@@ -3725,7 +3898,7 @@ core.File` — See core.File for more documentation
-**quality:** `typing.Optional[int]` +**guidance_scale:** `typing.Optional[float]`
@@ -3733,7 +3906,7 @@ core.File` — See core.File for more documentation
-**output_width:** `typing.Optional[int]` +**prompt_strength:** `typing.Optional[float]`
@@ -3741,7 +3914,7 @@ core.File` — See core.File for more documentation
-**output_height:** `typing.Optional[int]` +**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]`
@@ -3749,15 +3922,7 @@ core.File` — See core.File for more documentation
-**guidance_scale:** `typing.Optional[float]` - -
-
- -
-
- -**sd2upscaling:** `typing.Optional[bool]` +**seed:** `typing.Optional[int]`
@@ -3765,7 +3930,7 @@ core.File` — See core.File for more documentation
-**seed:** `typing.Optional[int]` +**image_guidance_scale:** `typing.Optional[float]`
@@ -3793,7 +3958,7 @@ core.File` — See core.File for more documentation
-
client.portrait(...) +
client.text_to_image(...)
@@ -3811,7 +3976,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.portrait( +client.text_to_image( text_prompt="text_prompt", ) @@ -3829,9 +3994,15 @@ client.portrait(
-**input_image:** `from __future__ import annotations +**text_prompt:** `str` + +
+
-core.File` — See core.File for more documentation +
+
+ +**example_id:** `typing.Optional[str]`
@@ -3839,7 +4010,7 @@ core.File` — See core.File for more documentation
-**text_prompt:** `str` +**functions:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]]`
@@ -3847,7 +4018,7 @@ core.File` — See core.File for more documentation
-**example_id:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3855,7 +4026,7 @@ core.File` — See core.File for more documentation
-**functions:** `typing.Optional[typing.List[PortraitRequestFunctionsItem]]` +**negative_prompt:** `typing.Optional[str]`
@@ -3863,7 +4034,7 @@ core.File` — See core.File for more documentation
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**output_width:** `typing.Optional[int]`
@@ -3871,7 +4042,7 @@ core.File` — See core.File for more documentation
-**face_scale:** `typing.Optional[float]` +**output_height:** `typing.Optional[int]`
@@ -3879,7 +4050,7 @@ core.File` — See core.File for more documentation
-**face_pos_x:** `typing.Optional[float]` +**num_outputs:** `typing.Optional[int]`
@@ -3887,7 +4058,7 @@ core.File` — See core.File for more documentation
-**face_pos_y:** `typing.Optional[float]` +**quality:** `typing.Optional[int]`
@@ -3895,7 +4066,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[PortraitRequestSelectedModel]` +**dall_e3quality:** `typing.Optional[str]`
@@ -3903,7 +4074,7 @@ core.File` — See core.File for more documentation
-**negative_prompt:** `typing.Optional[str]` +**dall_e3style:** `typing.Optional[str]`
@@ -3911,7 +4082,7 @@ core.File` — See core.File for more documentation
-**num_outputs:** `typing.Optional[int]` +**guidance_scale:** `typing.Optional[float]`
@@ -3919,7 +4090,7 @@ core.File` — See core.File for more documentation
-**quality:** `typing.Optional[int]` +**seed:** `typing.Optional[int]`
@@ -3927,7 +4098,7 @@ core.File` — See core.File for more documentation
-**upscale_factor:** `typing.Optional[float]` +**sd2upscaling:** `typing.Optional[bool]`
@@ -3935,7 +4106,7 @@ core.File` — See core.File for more documentation
-**output_width:** `typing.Optional[int]` +**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]`
@@ -3943,7 +4114,7 @@ core.File` — See core.File for more documentation
-**output_height:** `typing.Optional[int]` +**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]`
@@ -3951,7 +4122,7 @@ core.File` — See core.File for more documentation
-**guidance_scale:** `typing.Optional[float]` +**edit_instruction:** `typing.Optional[str]`
@@ -3959,7 +4130,7 @@ core.File` — See core.File for more documentation
-**seed:** `typing.Optional[int]` +**image_guidance_scale:** `typing.Optional[float]`
@@ -3987,7 +4158,7 @@ core.File` — See core.File for more documentation
-
client.image_from_email(...) +
client.product_image(...)
@@ -4005,9 +4176,8 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.image_from_email( - email_address="sean@dara.network", - text_prompt="winter's day in paris", +client.product_image( + text_prompt="text_prompt", ) ``` @@ -4024,7 +4194,9 @@ client.image_from_email(
-**text_prompt:** `str` +**input_image:** `from __future__ import annotations + +core.File` — See core.File for more documentation
@@ -4032,7 +4204,7 @@ client.image_from_email(
-**example_id:** `typing.Optional[str]` +**text_prompt:** `str`
@@ -4040,7 +4212,7 @@ client.image_from_email(
-**functions:** `typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]]` +**example_id:** `typing.Optional[str]`
@@ -4048,7 +4220,7 @@ client.image_from_email(
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**functions:** `typing.Optional[typing.List[ProductImageRequestFunctionsItem]]`
@@ -4056,7 +4228,7 @@ client.image_from_email(
-**email_address:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4064,7 +4236,7 @@ client.image_from_email(
-**twitter_handle:** `typing.Optional[str]` +**obj_scale:** `typing.Optional[float]`
@@ -4072,7 +4244,7 @@ client.image_from_email(
-**face_scale:** `typing.Optional[float]` +**obj_pos_x:** `typing.Optional[float]`
@@ -4080,7 +4252,7 @@ client.image_from_email(
-**face_pos_x:** `typing.Optional[float]` +**obj_pos_y:** `typing.Optional[float]`
@@ -4088,7 +4260,7 @@ client.image_from_email(
-**face_pos_y:** `typing.Optional[float]` +**mask_threshold:** `typing.Optional[float]`
@@ -4096,7 +4268,7 @@ client.image_from_email(
-**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]` +**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]`
@@ -4128,14 +4300,6 @@ client.image_from_email(
-**upscale_factor:** `typing.Optional[float]` - -
-
- -
-
- **output_width:** `typing.Optional[int]`
@@ -4160,63 +4324,7 @@ client.image_from_email(
-**should_send_email:** `typing.Optional[bool]` - -
-
- -
-
- -**email_from:** `typing.Optional[str]` - -
-
- -
-
- -**email_cc:** `typing.Optional[str]` - -
-
- -
-
- -**email_bcc:** `typing.Optional[str]` - -
-
- -
-
- -**email_subject:** `typing.Optional[str]` - -
-
- -
-
- -**email_body:** `typing.Optional[str]` - -
-
- -
-
- -**email_body_enable_html:** `typing.Optional[bool]` - -
-
- -
-
- -**fallback_email_body:** `typing.Optional[str]` +**sd2upscaling:** `typing.Optional[bool]`
@@ -4252,7 +4360,7 @@ client.image_from_email(
-
client.image_from_web_search(...) +
client.portrait(...)
@@ -4270,8 +4378,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.image_from_web_search( - search_query="search_query", +client.portrait( text_prompt="text_prompt", ) @@ -4289,7 +4396,9 @@ client.image_from_web_search(
-**search_query:** `str` +**input_image:** `from __future__ import annotations + +core.File` — See core.File for more documentation
@@ -4313,7 +4422,7 @@ client.image_from_web_search(
-**functions:** `typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]]` +**functions:** `typing.Optional[typing.List[PortraitRequestFunctionsItem]]`
@@ -4329,7 +4438,7 @@ client.image_from_web_search(
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**face_scale:** `typing.Optional[float]`
@@ -4337,7 +4446,7 @@ client.image_from_web_search(
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead +**face_pos_x:** `typing.Optional[float]`
@@ -4345,7 +4454,15 @@ client.image_from_web_search(
-**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]` +**face_pos_y:** `typing.Optional[float]` + +
+
+ +
+
+ +**selected_model:** `typing.Optional[PortraitRequestSelectedModel]`
@@ -4377,7 +4494,7 @@ client.image_from_web_search(
-**guidance_scale:** `typing.Optional[float]` +**upscale_factor:** `typing.Optional[float]`
@@ -4385,7 +4502,7 @@ client.image_from_web_search(
-**prompt_strength:** `typing.Optional[float]` +**output_width:** `typing.Optional[int]`
@@ -4393,7 +4510,7 @@ client.image_from_web_search(
-**sd2upscaling:** `typing.Optional[bool]` +**output_height:** `typing.Optional[int]`
@@ -4401,7 +4518,7 @@ client.image_from_web_search(
-**seed:** `typing.Optional[int]` +**guidance_scale:** `typing.Optional[float]`
@@ -4409,7 +4526,7 @@ client.image_from_web_search(
-**image_guidance_scale:** `typing.Optional[float]` +**seed:** `typing.Optional[int]`
@@ -4437,7 +4554,7 @@ client.image_from_web_search(
-
client.remove_background(...) +
client.image_from_email(...)
@@ -4455,7 +4572,10 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.remove_background() +client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", +) ```
@@ -4471,9 +4591,7 @@ client.remove_background()
-**input_image:** `from __future__ import annotations - -core.File` — See core.File for more documentation +**text_prompt:** `str`
@@ -4489,7 +4607,7 @@ core.File` — See core.File for more documentation
-**functions:** `typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]]` +**functions:** `typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]]`
@@ -4505,7 +4623,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]` +**email_address:** `typing.Optional[str]`
@@ -4513,7 +4631,7 @@ core.File` — See core.File for more documentation
-**mask_threshold:** `typing.Optional[float]` +**twitter_handle:** `typing.Optional[str]`
@@ -4521,7 +4639,7 @@ core.File` — See core.File for more documentation
-**rect_persepective_transform:** `typing.Optional[bool]` +**face_scale:** `typing.Optional[float]`
@@ -4529,7 +4647,7 @@ core.File` — See core.File for more documentation
-**reflection_opacity:** `typing.Optional[float]` +**face_pos_x:** `typing.Optional[float]`
@@ -4537,7 +4655,7 @@ core.File` — See core.File for more documentation
-**obj_scale:** `typing.Optional[float]` +**face_pos_y:** `typing.Optional[float]`
@@ -4545,7 +4663,7 @@ core.File` — See core.File for more documentation
-**obj_pos_x:** `typing.Optional[float]` +**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]`
@@ -4553,7 +4671,7 @@ core.File` — See core.File for more documentation
-**obj_pos_y:** `typing.Optional[float]` +**negative_prompt:** `typing.Optional[str]`
@@ -4561,7 +4679,7 @@ core.File` — See core.File for more documentation
-**settings:** `typing.Optional[RunSettings]` +**num_outputs:** `typing.Optional[int]`
@@ -4569,55 +4687,47 @@ core.File` — See core.File for more documentation
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**quality:** `typing.Optional[int]`
- -
+
+
+**upscale_factor:** `typing.Optional[float]` +
-
-
client.upscale(...)
-#### 🔌 Usage +**output_width:** `typing.Optional[int]` + +
+
+**output_height:** `typing.Optional[int]` + +
+
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.upscale( - scale=1, -) - -``` -
-
+**guidance_scale:** `typing.Optional[float]` + -#### ⚙️ Parameters -
-
-
- -**scale:** `int` — The final upsampling scale of the image +**should_send_email:** `typing.Optional[bool]`
@@ -4625,7 +4735,7 @@ client.upscale(
-**example_id:** `typing.Optional[str]` +**email_from:** `typing.Optional[str]`
@@ -4633,7 +4743,7 @@ client.upscale(
-**functions:** `typing.Optional[typing.List[UpscaleRequestFunctionsItem]]` +**email_cc:** `typing.Optional[str]`
@@ -4641,7 +4751,7 @@ client.upscale(
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**email_bcc:** `typing.Optional[str]`
@@ -4649,9 +4759,7 @@ client.upscale(
-**input_image:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**email_subject:** `typing.Optional[str]`
@@ -4659,9 +4767,15 @@ typing.Optional[core.File]` — See core.File for more documentation
-**input_video:** `from __future__ import annotations +**email_body:** `typing.Optional[str]` + +
+
-typing.Optional[core.File]` — See core.File for more documentation +
+
+ +**email_body_enable_html:** `typing.Optional[bool]`
@@ -4669,7 +4783,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]` +**fallback_email_body:** `typing.Optional[str]`
@@ -4677,7 +4791,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]` +**seed:** `typing.Optional[int]`
@@ -4705,7 +4819,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
client.embed(...) +
client.image_from_web_search(...)
@@ -4723,8 +4837,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.embed( - texts=["texts"], +client.image_from_web_search( + search_query="search_query", + text_prompt="text_prompt", ) ``` @@ -4741,7 +4856,7 @@ client.embed(
-**texts:** `typing.Sequence[str]` +**search_query:** `str`
@@ -4749,7 +4864,7 @@ client.embed(
-**example_id:** `typing.Optional[str]` +**text_prompt:** `str`
@@ -4757,7 +4872,7 @@ client.embed(
-**functions:** `typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]]` +**example_id:** `typing.Optional[str]`
@@ -4765,7 +4880,7 @@ client.embed(
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**functions:** `typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]]`
@@ -4773,7 +4888,7 @@ client.embed(
-**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4781,7 +4896,7 @@ client.embed(
-**settings:** `typing.Optional[RunSettings]` +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -4789,55 +4904,23 @@ client.embed(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
-
-
- - - - -
- -
client.seo_people_also_ask_doc(...) -
-
- -#### 🔌 Usage - -
-
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.seo_people_also_ask_doc( - search_query="search_query", -) - -``` -
-
+**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]` +
-#### ⚙️ Parameters - -
-
-
-**search_query:** `str` +**negative_prompt:** `typing.Optional[str]`
@@ -4845,7 +4928,7 @@ client.seo_people_also_ask_doc(
-**example_id:** `typing.Optional[str]` +**num_outputs:** `typing.Optional[int]`
@@ -4853,7 +4936,7 @@ client.seo_people_also_ask_doc(
-**functions:** `typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]]` +**quality:** `typing.Optional[int]`
@@ -4861,7 +4944,7 @@ client.seo_people_also_ask_doc(
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**guidance_scale:** `typing.Optional[float]`
@@ -4869,7 +4952,7 @@ client.seo_people_also_ask_doc(
-**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]` +**prompt_strength:** `typing.Optional[float]`
@@ -4877,7 +4960,7 @@ client.seo_people_also_ask_doc(
-**documents:** `typing.Optional[typing.Sequence[str]]` +**sd2upscaling:** `typing.Optional[bool]`
@@ -4885,7 +4968,7 @@ client.seo_people_also_ask_doc(
-**max_references:** `typing.Optional[int]` +**seed:** `typing.Optional[int]`
@@ -4893,7 +4976,7 @@ client.seo_people_also_ask_doc(
-**max_context_words:** `typing.Optional[int]` +**image_guidance_scale:** `typing.Optional[float]`
@@ -4901,7 +4984,7 @@ client.seo_people_also_ask_doc(
-**scroll_jump:** `typing.Optional[int]` +**settings:** `typing.Optional[RunSettings]`
@@ -4909,60 +4992,55 @@ client.seo_people_also_ask_doc(
-**doc_extract_url:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+
+
-
-
-**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]` -
+
+
client.remove_background(...)
-**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
+#### 🔌 Usage
-**task_instructions:** `typing.Optional[str]` - -
-
-
-**query_instructions:** `typing.Optional[str]` - +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.remove_background() + +``` +
+
+#### ⚙️ Parameters +
-**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]` - -
-
-
-**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]` +**input_image:** `from __future__ import annotations + +core.File` — See core.File for more documentation
@@ -4970,7 +5048,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**avoid_repetition:** `typing.Optional[bool]` +**example_id:** `typing.Optional[str]`
@@ -4978,7 +5056,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**num_outputs:** `typing.Optional[int]` +**functions:** `typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]]`
@@ -4986,7 +5064,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**quality:** `typing.Optional[float]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -4994,7 +5072,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**max_tokens:** `typing.Optional[int]` +**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]`
@@ -5002,7 +5080,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**sampling_temperature:** `typing.Optional[float]` +**mask_threshold:** `typing.Optional[float]`
@@ -5010,7 +5088,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]` +**rect_persepective_transform:** `typing.Optional[bool]`
@@ -5018,7 +5096,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**reflection_opacity:** `typing.Optional[float]`
@@ -5026,7 +5104,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead +**obj_scale:** `typing.Optional[float]`
@@ -5034,7 +5112,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_type:** `typing.Optional[SerpSearchType]` +**obj_pos_x:** `typing.Optional[float]`
@@ -5042,7 +5120,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead +**obj_pos_y:** `typing.Optional[float]`
@@ -5070,7 +5148,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
client.health_status_get() +
client.upscale(...)
@@ -5088,7 +5166,9 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.health_status_get() +client.upscale( + scale=1, +) ```
@@ -5104,56 +5184,23 @@ client.health_status_get()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**scale:** `int` — The final upsampling scale of the image
- -
- - - - -
- -## CopilotIntegrations -
client.copilot_integrations.video_bots_stream_create(...) -
-
- -#### 🔌 Usage - -
-
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.copilot_integrations.video_bots_stream_create( - integration_id="integration_id", -) - -``` -
-
+**example_id:** `typing.Optional[str]` +
-#### ⚙️ Parameters - -
-
-
-**integration_id:** `str` — Your Integration ID as shown in the Copilot Integrations tab +**functions:** `typing.Optional[typing.List[UpscaleRequestFunctionsItem]]`
@@ -5161,13 +5208,7 @@ client.copilot_integrations.video_bots_stream_create(
-**conversation_id:** `typing.Optional[str]` - -The gooey conversation ID. - -If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. - -Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -5175,11 +5216,9 @@ Note that you may not provide a custom ID here, and must only use the `conversat
-**user_id:** `typing.Optional[str]` - -Your app's custom user ID. +**input_image:** `from __future__ import annotations -If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation. +typing.Optional[core.File]` — See core.File for more documentation
@@ -5187,11 +5226,9 @@ If not provided, a random user will be created and a new ID will be returned in
-**user_message_id:** `typing.Optional[str]` - -Your app's custom message ID for the user message. +**input_video:** `from __future__ import annotations -If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. +typing.Optional[core.File]` — See core.File for more documentation
@@ -5199,7 +5236,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**button_pressed:** `typing.Optional[ButtonPressed]` — The button that was pressed by the user. +**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]`
@@ -5207,7 +5244,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]` +**selected_bg_model:** `typing.Optional[typing.Literal["real_esrgan_x2"]]`
@@ -5215,7 +5252,7 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**settings:** `typing.Optional[RunSettings]`
@@ -5223,579 +5260,39 @@ If not provided, a random ID will be generated and returned in the response. Thi
-**input_prompt:** `typing.Optional[str]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**input_audio:** `typing.Optional[str]` -
-
-
-**input_images:** `typing.Optional[typing.Sequence[str]]` -
+
+
client.embed(...)
-**input_documents:** `typing.Optional[typing.Sequence[str]]` - -
-
+#### 🔌 Usage
-**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. - -
-
-
-**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]` - -
-
- -
-
- -**bot_script:** `typing.Optional[str]` - -
-
- -
-
- -**selected_model:** `typing.Optional[CreateStreamRequestSelectedModel]` - -
-
- -
-
- -**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - -
-
- -
-
- -**task_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**query_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**keyword_instructions:** `typing.Optional[str]` - -
-
- -
-
- -**documents:** `typing.Optional[typing.Sequence[str]]` - -
-
- -
-
- -**max_references:** `typing.Optional[int]` - -
-
- -
-
- -**max_context_words:** `typing.Optional[int]` - -
-
- -
-
- -**scroll_jump:** `typing.Optional[int]` - -
-
- -
-
- -**embedding_model:** `typing.Optional[CreateStreamRequestEmbeddingModel]` - -
-
- -
-
- -**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - -
-
- -
-
- -**citation_style:** `typing.Optional[CreateStreamRequestCitationStyle]` - -
-
- -
-
- -**use_url_shortener:** `typing.Optional[bool]` - -
-
- -
-
- -**asr_model:** `typing.Optional[CreateStreamRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. - -
-
- -
-
- -**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. - -
-
- -
-
- -**translation_model:** `typing.Optional[CreateStreamRequestTranslationModel]` - -
-
- -
-
- -**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - -
-
- -
-
- -**input_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for User Langauge -> LLM Language (English) - - -
-
- -
-
- -**output_glossary_document:** `typing.Optional[str]` - - -Translation Glossary for LLM Language (English) -> User Langauge - - -
-
- -
-
- -**lipsync_model:** `typing.Optional[CreateStreamRequestLipsyncModel]` - -
-
- -
-
- -**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - -
-
- -
-
- -**avoid_repetition:** `typing.Optional[bool]` - -
-
- -
-
- -**num_outputs:** `typing.Optional[int]` - -
-
- -
-
- -**quality:** `typing.Optional[float]` - -
-
- -
-
- -**max_tokens:** `typing.Optional[int]` - -
-
- -
-
- -**sampling_temperature:** `typing.Optional[float]` - -
-
- -
-
- -**response_format_type:** `typing.Optional[CreateStreamRequestResponseFormatType]` - -
-
- -
-
- -**tts_provider:** `typing.Optional[CreateStreamRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**openai_voice_name:** `typing.Optional[CreateStreamRequestOpenaiVoiceName]` - -
-
- -
-
- -**openai_tts_model:** `typing.Optional[CreateStreamRequestOpenaiTtsModel]` - -
-
- -
-
- -**input_face:** `typing.Optional[str]` - -
-
- -
-
- -**face_padding_top:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_bottom:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_left:** `typing.Optional[int]` - -
-
- -
-
- -**face_padding_right:** `typing.Optional[int]` - -
-
- -
-
- -**sadtalker_settings:** `typing.Optional[SadTalkerSettings]` - -
-
- -
-
- -**input_text:** `typing.Optional[str]` — Use `input_prompt` instead - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
- - - - - - -
- -
client.copilot_integrations.video_bots_stream(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.copilot_integrations.video_bots_stream( - request_id="request_id", -) - -``` -
-
-
-
- -#### ⚙️ Parameters - -
-
- -
-
- -**request_id:** `str` - -
-
- -
-
- -**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. - -
-
-
-
- - -
-
-
- -## CopilotForYourEnterprise -
client.copilot_for_your_enterprise.async_video_bots(...) -
-
- -#### 🔌 Usage - -
-
- -
-
- -```python -from gooey import Gooey +```python +from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.copilot_for_your_enterprise.async_video_bots() +client.embed( + texts=["texts"], +) ```
@@ -5811,7 +5308,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**example_id:** `typing.Optional[str]` +**texts:** `typing.Sequence[str]`
@@ -5819,7 +5316,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**functions:** `typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]]` +**example_id:** `typing.Optional[str]`
@@ -5827,7 +5324,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**functions:** `typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]]`
@@ -5835,7 +5332,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**input_prompt:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -5843,7 +5340,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**input_audio:** `typing.Optional[str]` +**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]`
@@ -5851,9 +5348,7 @@ client.copilot_for_your_enterprise.async_video_bots()
-**input_images:** `from __future__ import annotations - -typing.Optional[typing.List[core.File]]` — See core.File for more documentation +**settings:** `typing.Optional[RunSettings]`
@@ -5861,83 +5356,55 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-**input_documents:** `from __future__ import annotations - -typing.Optional[typing.List[core.File]]` — See core.File for more documentation +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
- -
-
- -**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images. -
-
-
-**messages:** `typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]]` -
+
+
client.seo_people_also_ask_doc(...)
-**bot_script:** `typing.Optional[str]` - -
-
+#### 🔌 Usage
-**selected_model:** `typing.Optional[AsyncVideoBotsRequestSelectedModel]` - -
-
-
-**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - -
-
+```python +from gooey import Gooey -
-
+client = Gooey( + api_key="YOUR_API_KEY", +) +client.seo_people_also_ask_doc( + search_query="search_query", +) -**task_instructions:** `typing.Optional[str]` - +```
- -
-
- -**query_instructions:** `typing.Optional[str]` -
+#### ⚙️ Parameters +
-**keyword_instructions:** `typing.Optional[str]` - -
-
-
-**documents:** `from __future__ import annotations - -typing.Optional[typing.List[core.File]]` — See core.File for more documentation +**search_query:** `str`
@@ -5945,7 +5412,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-**max_references:** `typing.Optional[int]` +**example_id:** `typing.Optional[str]`
@@ -5953,7 +5420,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-**max_context_words:** `typing.Optional[int]` +**functions:** `typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]]`
@@ -5961,7 +5428,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-**scroll_jump:** `typing.Optional[int]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -5969,7 +5436,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-**embedding_model:** `typing.Optional[AsyncVideoBotsRequestEmbeddingModel]` +**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]`
@@ -5977,12 +5444,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-**dense_weight:** `typing.Optional[float]` - - -Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. -Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - +**documents:** `typing.Optional[typing.Sequence[str]]`
@@ -5990,7 +5452,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[AsyncVideoBotsRequestCitationStyle]` +**max_references:** `typing.Optional[int]`
@@ -5998,7 +5460,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**use_url_shortener:** `typing.Optional[bool]` +**max_context_words:** `typing.Optional[int]`
@@ -6006,7 +5468,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_model:** `typing.Optional[AsyncVideoBotsRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. +**scroll_jump:** `typing.Optional[int]`
@@ -6014,7 +5476,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text. +**doc_extract_url:** `typing.Optional[str]`
@@ -6022,7 +5484,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**translation_model:** `typing.Optional[AsyncVideoBotsRequestTranslationModel]` +**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]`
@@ -6030,7 +5492,12 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. +**dense_weight:** `typing.Optional[float]` + + +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. +
@@ -6038,9 +5505,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**input_glossary_document:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**task_instructions:** `typing.Optional[str]`
@@ -6048,9 +5513,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**output_glossary_document:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**query_instructions:** `typing.Optional[str]`
@@ -6058,7 +5521,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**lipsync_model:** `typing.Optional[AsyncVideoBotsRequestLipsyncModel]` +**selected_model:** `typing.Optional[RelatedQnADocPageRequestSelectedModel]`
@@ -6066,7 +5529,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**tools:** `typing.Optional[typing.List[typing.Literal["json_to_pdf"]]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). +**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]`
@@ -6114,135 +5577,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**response_format_type:** `typing.Optional[AsyncVideoBotsRequestResponseFormatType]` - -
-
- -
-
- -**tts_provider:** `typing.Optional[AsyncVideoBotsRequestTtsProvider]` - -
-
- -
-
- -**uberduck_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**uberduck_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_voice_name:** `typing.Optional[str]` - -
-
- -
-
- -**google_speaking_rate:** `typing.Optional[float]` - -
-
- -
-
- -**google_pitch:** `typing.Optional[float]` - -
-
- -
-
- -**bark_history_prompt:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead - -
-
- -
-
- -**elevenlabs_api_key:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_voice_id:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_model:** `typing.Optional[str]` - -
-
- -
-
- -**elevenlabs_stability:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_similarity_boost:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_style:** `typing.Optional[float]` - -
-
- -
-
- -**elevenlabs_speaker_boost:** `typing.Optional[bool]` - -
-
- -
-
- -**azure_voice_name:** `typing.Optional[str]` +**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]`
@@ -6250,7 +5585,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**openai_voice_name:** `typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName]` +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -6258,7 +5593,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**openai_tts_model:** `typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel]` +**scaleserp_locations:** `typing.Optional[typing.Sequence[str]]` — DEPRECATED: use `serp_search_location` instead
@@ -6266,9 +5601,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**input_face:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**serp_search_type:** `typing.Optional[SerpSearchType]`
@@ -6276,7 +5609,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**face_padding_top:** `typing.Optional[int]` +**scaleserp_search_field:** `typing.Optional[str]` — DEPRECATED: use `serp_search_type` instead
@@ -6284,7 +5617,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**face_padding_bottom:** `typing.Optional[int]` +**settings:** `typing.Optional[RunSettings]`
@@ -6292,35 +5625,49 @@ typing.Optional[core.File]` — See core.File for more documentation
-**face_padding_left:** `typing.Optional[int]` +**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
+ + -
-
-**face_padding_right:** `typing.Optional[int]` -
+
+
client.health_status_get()
-**sadtalker_settings:** `typing.Optional[AsyncVideoBotsRequestSadtalkerSettings]` - -
-
+#### 🔌 Usage
-**settings:** `typing.Optional[RunSettings]` - +
+
+ +```python +from gooey import Gooey + +client = Gooey( + api_key="YOUR_API_KEY", +) +client.health_status_get() + +``` +
+
+#### ⚙️ Parameters + +
+
+
@@ -6336,8 +5683,8 @@ typing.Optional[core.File]` — See core.File for more documentation
-## Evaluator -
client.evaluator.async_bulk_eval(...) +## Copilot +
client.copilot.completion(...)
@@ -6355,9 +5702,7 @@ from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) -client.evaluator.async_bulk_eval( - documents=["documents"], -) +client.copilot.completion() ```
@@ -6373,13 +5718,15 @@ client.evaluator.async_bulk_eval(
-**documents:** `typing.Sequence[str]` +**example_id:** `typing.Optional[str]` + +
+
+
+
-Upload or link to a CSV or google sheet that contains your sample input data. -For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. -Remember to includes header names in your CSV too. - +**functions:** `typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]]`
@@ -6387,7 +5734,7 @@ Remember to includes header names in your CSV too.
-**example_id:** `typing.Optional[str]` +**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments
@@ -6395,7 +5742,7 @@ Remember to includes header names in your CSV too.
-**functions:** `typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]]` +**input_prompt:** `typing.Optional[str]`
@@ -6403,7 +5750,7 @@ Remember to includes header names in your CSV too.
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**input_audio:** `typing.Optional[str]`
@@ -6411,12 +5758,9 @@ Remember to includes header names in your CSV too.
-**eval_prompts:** `typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]]` - +**input_images:** `from __future__ import annotations -Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. -_The `columns` dictionary can be used to reference the spreadsheet columns._ - +typing.Optional[typing.List[core.File]]` — See core.File for more documentation
@@ -6424,11 +5768,9 @@ _The `columns` dictionary can be used to reference the spreadsheet columns._
-**agg_functions:** `typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]]` - +**input_documents:** `from __future__ import annotations -Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - +typing.Optional[typing.List[core.File]]` — See core.File for more documentation
@@ -6436,7 +5778,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**selected_model:** `typing.Optional[BulkEvalPageRequestSelectedModel]` +**doc_extract_url:** `typing.Optional[str]` — Select a workflow to extract text from documents and images.
@@ -6444,7 +5786,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**avoid_repetition:** `typing.Optional[bool]` +**messages:** `typing.Optional[typing.List[CopilotCompletionRequestMessagesItem]]`
@@ -6452,7 +5794,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**num_outputs:** `typing.Optional[int]` +**bot_script:** `typing.Optional[str]`
@@ -6460,7 +5802,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**quality:** `typing.Optional[float]` +**selected_model:** `typing.Optional[CopilotCompletionRequestSelectedModel]`
@@ -6468,7 +5810,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**max_tokens:** `typing.Optional[int]` +**document_model:** `typing.Optional[str]` — When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
@@ -6476,7 +5818,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**sampling_temperature:** `typing.Optional[float]` +**task_instructions:** `typing.Optional[str]`
@@ -6484,7 +5826,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]` +**query_instructions:** `typing.Optional[str]`
@@ -6492,7 +5834,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**settings:** `typing.Optional[RunSettings]` +**keyword_instructions:** `typing.Optional[str]`
@@ -6500,56 +5842,78 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**documents:** `from __future__ import annotations + +typing.Optional[typing.List[core.File]]` — See core.File for more documentation
+ +
+
+ +**max_references:** `typing.Optional[int]` +
+
+
+**max_context_words:** `typing.Optional[int]` +
-
-## SmartGpt -
client.smart_gpt.async_smart_gpt(...)
-#### 🔌 Usage +**scroll_jump:** `typing.Optional[int]` + +
+
+**embedding_model:** `typing.Optional[CopilotCompletionRequestEmbeddingModel]` + +
+
+
-```python -from gooey import Gooey +**dense_weight:** `typing.Optional[float]` -client = Gooey( - api_key="YOUR_API_KEY", -) -client.smart_gpt.async_smart_gpt( - input_prompt="input_prompt", -) -``` +Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. +Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + +
+ +
+
+ +**citation_style:** `typing.Optional[CopilotCompletionRequestCitationStyle]` +
-#### ⚙️ Parameters -
+**use_url_shortener:** `typing.Optional[bool]` + +
+
+
-**input_prompt:** `str` +**asr_model:** `typing.Optional[CopilotCompletionRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
@@ -6557,7 +5921,7 @@ client.smart_gpt.async_smart_gpt(
-**example_id:** `typing.Optional[str]` +**asr_language:** `typing.Optional[str]` — Choose a language to transcribe incoming audio messages to text.
@@ -6565,7 +5929,7 @@ client.smart_gpt.async_smart_gpt(
-**functions:** `typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]]` +**translation_model:** `typing.Optional[CopilotCompletionRequestTranslationModel]`
@@ -6573,7 +5937,7 @@ client.smart_gpt.async_smart_gpt(
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**user_language:** `typing.Optional[str]` — Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -6581,7 +5945,9 @@ client.smart_gpt.async_smart_gpt(
-**cot_prompt:** `typing.Optional[str]` +**input_glossary_document:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
@@ -6589,7 +5955,9 @@ client.smart_gpt.async_smart_gpt(
-**reflexion_prompt:** `typing.Optional[str]` +**output_glossary_document:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
@@ -6597,7 +5965,7 @@ client.smart_gpt.async_smart_gpt(
-**dera_prompt:** `typing.Optional[str]` +**lipsync_model:** `typing.Optional[CopilotCompletionRequestLipsyncModel]`
@@ -6605,7 +5973,7 @@ client.smart_gpt.async_smart_gpt(
-**selected_model:** `typing.Optional[SmartGptPageRequestSelectedModel]` +**tools:** `typing.Optional[typing.List[typing.Literal["json_to_pdf"]]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
@@ -6653,7 +6021,7 @@ client.smart_gpt.async_smart_gpt(
-**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[CopilotCompletionRequestResponseFormatType]`
@@ -6661,7 +6029,7 @@ client.smart_gpt.async_smart_gpt(
-**settings:** `typing.Optional[RunSettings]` +**tts_provider:** `typing.Optional[CopilotCompletionRequestTtsProvider]`
@@ -6669,54 +6037,39 @@ client.smart_gpt.async_smart_gpt(
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**uberduck_voice_name:** `typing.Optional[str]`
- - +
+
+**uberduck_speaking_rate:** `typing.Optional[float]` +
-
-## Functions -
client.functions.async_functions(...)
-#### 🔌 Usage - -
-
+**google_voice_name:** `typing.Optional[str]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.functions.async_functions() - -``` -
-
+**google_speaking_rate:** `typing.Optional[float]` +
-#### ⚙️ Parameters - -
-
-
-**example_id:** `typing.Optional[str]` +**google_pitch:** `typing.Optional[float]`
@@ -6724,7 +6077,7 @@ client.functions.async_functions()
-**code:** `typing.Optional[str]` — The JS code to be executed. +**bark_history_prompt:** `typing.Optional[str]`
@@ -6732,7 +6085,7 @@ client.functions.async_functions()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used in the code +**elevenlabs_voice_name:** `typing.Optional[str]` — Use `elevenlabs_voice_id` instead
@@ -6740,7 +6093,7 @@ client.functions.async_functions()
-**settings:** `typing.Optional[RunSettings]` +**elevenlabs_api_key:** `typing.Optional[str]`
@@ -6748,54 +6101,39 @@ client.functions.async_functions()
-**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration. +**elevenlabs_voice_id:** `typing.Optional[str]`
-
-
+
+
+**elevenlabs_model:** `typing.Optional[str]` +
-
-## LipSyncing -
client.lip_syncing.async_lipsync(...)
-#### 🔌 Usage - -
-
+**elevenlabs_stability:** `typing.Optional[float]` + +
+
-```python -from gooey import Gooey - -client = Gooey( - api_key="YOUR_API_KEY", -) -client.lip_syncing.async_lipsync() - -``` -
-
+**elevenlabs_similarity_boost:** `typing.Optional[float]` +
-#### ⚙️ Parameters - -
-
-
-**example_id:** `typing.Optional[str]` +**elevenlabs_style:** `typing.Optional[float]`
@@ -6803,7 +6141,7 @@ client.lip_syncing.async_lipsync()
-**functions:** `typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]]` +**elevenlabs_speaker_boost:** `typing.Optional[bool]`
@@ -6811,7 +6149,7 @@ client.lip_syncing.async_lipsync()
-**variables:** `typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]` — Variables to be used as Jinja prompt templates and in functions as arguments +**azure_voice_name:** `typing.Optional[str]`
@@ -6819,9 +6157,7 @@ client.lip_syncing.async_lipsync()
-**input_face:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**openai_voice_name:** `typing.Optional[CopilotCompletionRequestOpenaiVoiceName]`
@@ -6829,7 +6165,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**face_padding_top:** `typing.Optional[int]` +**openai_tts_model:** `typing.Optional[CopilotCompletionRequestOpenaiTtsModel]`
@@ -6837,7 +6173,9 @@ typing.Optional[core.File]` — See core.File for more documentation
-**face_padding_bottom:** `typing.Optional[int]` +**input_face:** `from __future__ import annotations + +typing.Optional[core.File]` — See core.File for more documentation
@@ -6845,7 +6183,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**face_padding_left:** `typing.Optional[int]` +**face_padding_top:** `typing.Optional[int]`
@@ -6853,7 +6191,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**face_padding_right:** `typing.Optional[int]` +**face_padding_bottom:** `typing.Optional[int]`
@@ -6861,7 +6199,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**sadtalker_settings:** `typing.Optional[AsyncLipsyncRequestSadtalkerSettings]` +**face_padding_left:** `typing.Optional[int]`
@@ -6869,7 +6207,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[AsyncLipsyncRequestSelectedModel]` +**face_padding_right:** `typing.Optional[int]`
@@ -6877,9 +6215,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**input_audio:** `from __future__ import annotations - -typing.Optional[core.File]` — See core.File for more documentation +**sadtalker_settings:** `typing.Optional[CopilotCompletionRequestSadtalkerSettings]`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py index 96c44d8..33cebf3 100644 --- a/src/gooey/__init__.py +++ b/src/gooey/__init__.py @@ -21,6 +21,13 @@ BalanceResponse, BotBroadcastFilters, BulkEvalPageOutput, + BulkEvalPageRequestAggFunctionsItem, + BulkEvalPageRequestAggFunctionsItemFunction, + BulkEvalPageRequestEvalPromptsItem, + BulkEvalPageRequestFunctionsItem, + BulkEvalPageRequestFunctionsItemTrigger, + BulkEvalPageRequestResponseFormatType, + BulkEvalPageRequestSelectedModel, BulkEvalPageStatusResponse, BulkRunRequestFunctionsItem, BulkRunRequestFunctionsItemTrigger, @@ -66,6 +73,17 @@ ConversationEntryContentItem_Text, ConversationEntryRole, ConversationStart, + CreateStreamRequest, + CreateStreamRequestAsrModel, + CreateStreamRequestCitationStyle, + CreateStreamRequestEmbeddingModel, + CreateStreamRequestLipsyncModel, + CreateStreamRequestOpenaiTtsModel, + CreateStreamRequestOpenaiVoiceName, + CreateStreamRequestResponseFormatType, + CreateStreamRequestSelectedModel, + CreateStreamRequestTranslationModel, + CreateStreamRequestTtsProvider, CreateStreamResponse, DeforumSdPageOutput, DeforumSdPageRequestAnimationPromptsItem, @@ -168,6 +186,11 @@ LipsyncPageRequestSadtalkerSettingsPreprocess, LipsyncPageRequestSelectedModel, LipsyncPageStatusResponse, + LipsyncRequestFunctionsItem, + LipsyncRequestFunctionsItemTrigger, + LipsyncRequestSadtalkerSettings, + LipsyncRequestSadtalkerSettingsPreprocess, + LipsyncRequestSelectedModel, LipsyncTtsPageOutput, LipsyncTtsPageRequest, LipsyncTtsPageRequestFunctionsItem, @@ -266,6 +289,10 @@ SerpSearchLocation, SerpSearchType, SmartGptPageOutput, + SmartGptPageRequestFunctionsItem, + SmartGptPageRequestFunctionsItemTrigger, + SmartGptPageRequestResponseFormatType, + SmartGptPageRequestSelectedModel, SmartGptPageStatusResponse, SocialLookupEmailPageOutput, SocialLookupEmailPageRequestFunctionsItem, @@ -338,66 +365,31 @@ VideoBotsPageStatusResponse, ) from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError -from . import copilot_for_your_enterprise, copilot_integrations, evaluator, functions, lip_syncing, misc, smart_gpt +from . import copilot, misc from .client import AsyncGooey, Gooey -from .copilot_for_your_enterprise import ( - AsyncVideoBotsRequestAsrModel, - AsyncVideoBotsRequestCitationStyle, - AsyncVideoBotsRequestEmbeddingModel, - AsyncVideoBotsRequestFunctionsItem, - AsyncVideoBotsRequestFunctionsItemTrigger, - AsyncVideoBotsRequestLipsyncModel, - AsyncVideoBotsRequestMessagesItem, - AsyncVideoBotsRequestMessagesItemContent, - AsyncVideoBotsRequestMessagesItemContentItem, - AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl, - AsyncVideoBotsRequestMessagesItemContentItem_Text, - AsyncVideoBotsRequestMessagesItemRole, - AsyncVideoBotsRequestOpenaiTtsModel, - AsyncVideoBotsRequestOpenaiVoiceName, - AsyncVideoBotsRequestResponseFormatType, - AsyncVideoBotsRequestSadtalkerSettings, - AsyncVideoBotsRequestSadtalkerSettingsPreprocess, - AsyncVideoBotsRequestSelectedModel, - AsyncVideoBotsRequestTranslationModel, - AsyncVideoBotsRequestTtsProvider, -) -from .copilot_integrations import ( - CreateStreamRequestAsrModel, - CreateStreamRequestCitationStyle, - CreateStreamRequestEmbeddingModel, - CreateStreamRequestLipsyncModel, - CreateStreamRequestOpenaiTtsModel, - CreateStreamRequestOpenaiVoiceName, - CreateStreamRequestResponseFormatType, - CreateStreamRequestSelectedModel, - CreateStreamRequestTranslationModel, - CreateStreamRequestTtsProvider, - VideoBotsStreamResponse, +from .copilot import ( + CopilotCompletionRequestAsrModel, + CopilotCompletionRequestCitationStyle, + CopilotCompletionRequestEmbeddingModel, + CopilotCompletionRequestFunctionsItem, + CopilotCompletionRequestFunctionsItemTrigger, + CopilotCompletionRequestLipsyncModel, + CopilotCompletionRequestMessagesItem, + CopilotCompletionRequestMessagesItemContent, + CopilotCompletionRequestMessagesItemContentItem, + CopilotCompletionRequestMessagesItemContentItem_ImageUrl, + CopilotCompletionRequestMessagesItemContentItem_Text, + CopilotCompletionRequestMessagesItemRole, + CopilotCompletionRequestOpenaiTtsModel, + CopilotCompletionRequestOpenaiVoiceName, + CopilotCompletionRequestResponseFormatType, + CopilotCompletionRequestSadtalkerSettings, + CopilotCompletionRequestSadtalkerSettingsPreprocess, + CopilotCompletionRequestSelectedModel, + CopilotCompletionRequestTranslationModel, + CopilotCompletionRequestTtsProvider, ) from .environment import GooeyEnvironment -from .evaluator import ( - BulkEvalPageRequestAggFunctionsItem, - BulkEvalPageRequestAggFunctionsItemFunction, - BulkEvalPageRequestEvalPromptsItem, - BulkEvalPageRequestFunctionsItem, - BulkEvalPageRequestFunctionsItemTrigger, - BulkEvalPageRequestResponseFormatType, - BulkEvalPageRequestSelectedModel, -) -from .lip_syncing import ( - AsyncLipsyncRequestFunctionsItem, - AsyncLipsyncRequestFunctionsItemTrigger, - AsyncLipsyncRequestSadtalkerSettings, - AsyncLipsyncRequestSadtalkerSettingsPreprocess, - AsyncLipsyncRequestSelectedModel, -) -from .smart_gpt import ( - SmartGptPageRequestFunctionsItem, - SmartGptPageRequestFunctionsItemTrigger, - SmartGptPageRequestResponseFormatType, - SmartGptPageRequestSelectedModel, -) from .version import __version__ __all__ = [ @@ -419,31 +411,6 @@ "AsrPageStatusResponse", "AsyncApiResponseModelV3", "AsyncGooey", - "AsyncLipsyncRequestFunctionsItem", - "AsyncLipsyncRequestFunctionsItemTrigger", - "AsyncLipsyncRequestSadtalkerSettings", - "AsyncLipsyncRequestSadtalkerSettingsPreprocess", - "AsyncLipsyncRequestSelectedModel", - "AsyncVideoBotsRequestAsrModel", - "AsyncVideoBotsRequestCitationStyle", - "AsyncVideoBotsRequestEmbeddingModel", - "AsyncVideoBotsRequestFunctionsItem", - "AsyncVideoBotsRequestFunctionsItemTrigger", - "AsyncVideoBotsRequestLipsyncModel", - "AsyncVideoBotsRequestMessagesItem", - "AsyncVideoBotsRequestMessagesItemContent", - "AsyncVideoBotsRequestMessagesItemContentItem", - "AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl", - "AsyncVideoBotsRequestMessagesItemContentItem_Text", - "AsyncVideoBotsRequestMessagesItemRole", - "AsyncVideoBotsRequestOpenaiTtsModel", - "AsyncVideoBotsRequestOpenaiVoiceName", - "AsyncVideoBotsRequestResponseFormatType", - "AsyncVideoBotsRequestSadtalkerSettings", - "AsyncVideoBotsRequestSadtalkerSettingsPreprocess", - "AsyncVideoBotsRequestSelectedModel", - "AsyncVideoBotsRequestTranslationModel", - "AsyncVideoBotsRequestTtsProvider", "BalanceResponse", "BotBroadcastFilters", "BulkEvalPageOutput", @@ -499,6 +466,27 @@ "ConversationEntryContentItem_Text", "ConversationEntryRole", "ConversationStart", + "CopilotCompletionRequestAsrModel", + "CopilotCompletionRequestCitationStyle", + "CopilotCompletionRequestEmbeddingModel", + "CopilotCompletionRequestFunctionsItem", + "CopilotCompletionRequestFunctionsItemTrigger", + "CopilotCompletionRequestLipsyncModel", + "CopilotCompletionRequestMessagesItem", + "CopilotCompletionRequestMessagesItemContent", + "CopilotCompletionRequestMessagesItemContentItem", + "CopilotCompletionRequestMessagesItemContentItem_ImageUrl", + "CopilotCompletionRequestMessagesItemContentItem_Text", + "CopilotCompletionRequestMessagesItemRole", + "CopilotCompletionRequestOpenaiTtsModel", + "CopilotCompletionRequestOpenaiVoiceName", + "CopilotCompletionRequestResponseFormatType", + "CopilotCompletionRequestSadtalkerSettings", + "CopilotCompletionRequestSadtalkerSettingsPreprocess", + "CopilotCompletionRequestSelectedModel", + "CopilotCompletionRequestTranslationModel", + "CopilotCompletionRequestTtsProvider", + "CreateStreamRequest", "CreateStreamRequestAsrModel", "CreateStreamRequestCitationStyle", "CreateStreamRequestEmbeddingModel", @@ -613,6 +601,11 @@ "LipsyncPageRequestSadtalkerSettingsPreprocess", "LipsyncPageRequestSelectedModel", "LipsyncPageStatusResponse", + "LipsyncRequestFunctionsItem", + "LipsyncRequestFunctionsItemTrigger", + "LipsyncRequestSadtalkerSettings", + "LipsyncRequestSadtalkerSettingsPreprocess", + "LipsyncRequestSelectedModel", "LipsyncTtsPageOutput", "LipsyncTtsPageRequest", "LipsyncTtsPageRequestFunctionsItem", @@ -788,13 +781,7 @@ "VideoBotsPageRequestTranslationModel", "VideoBotsPageRequestTtsProvider", "VideoBotsPageStatusResponse", - "VideoBotsStreamResponse", "__version__", - "copilot_for_your_enterprise", - "copilot_integrations", - "evaluator", - "functions", - "lip_syncing", + "copilot", "misc", - "smart_gpt", ] diff --git a/src/gooey/client.py b/src/gooey/client.py index d9c90dc..580b1c2 100644 --- a/src/gooey/client.py +++ b/src/gooey/client.py @@ -6,12 +6,7 @@ import httpx from .core.api_error import ApiError from .core.client_wrapper import SyncClientWrapper -from .copilot_integrations.client import CopilotIntegrationsClient -from .copilot_for_your_enterprise.client import CopilotForYourEnterpriseClient -from .evaluator.client import EvaluatorClient -from .smart_gpt.client import SmartGptClient -from .functions.client import FunctionsClient -from .lip_syncing.client import LipSyncingClient +from .copilot.client import CopilotClient from .misc.client import MiscClient from .types.deforum_sd_page_request_animation_prompts_item import DeforumSdPageRequestAnimationPromptsItem from .types.deforum_sd_page_request_functions_item import DeforumSdPageRequestFunctionsItem @@ -21,10 +16,10 @@ from .types.deforum_sd_page_output import DeforumSdPageOutput from .core.pydantic_utilities import parse_obj_as from .errors.payment_required_error import PaymentRequiredError +from .types.generic_error_response import GenericErrorResponse from .errors.unprocessable_entity_error import UnprocessableEntityError from .types.http_validation_error import HttpValidationError from .errors.too_many_requests_error import TooManyRequestsError -from .types.generic_error_response import GenericErrorResponse from json.decoder import JSONDecodeError from .types.qr_code_request_functions_item import QrCodeRequestFunctionsItem from . import core @@ -55,6 +50,12 @@ from .types.social_lookup_email_page_output import SocialLookupEmailPageOutput from .types.bulk_run_request_functions_item import BulkRunRequestFunctionsItem from .types.bulk_runner_page_output import BulkRunnerPageOutput +from .types.bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem +from .types.bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem +from .types.bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem +from .types.bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel +from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from .types.bulk_eval_page_output import BulkEvalPageOutput from .types.synthesize_data_request_functions_item import SynthesizeDataRequestFunctionsItem from .types.synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel from .types.synthesize_data_request_selected_model import SynthesizeDataRequestSelectedModel @@ -71,11 +72,20 @@ from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType from .types.doc_search_page_output import DocSearchPageOutput +from .types.smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem +from .types.smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel +from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from .types.smart_gpt_page_output import SmartGptPageOutput from .types.doc_summary_request_functions_item import DocSummaryRequestFunctionsItem from .types.doc_summary_request_selected_model import DocSummaryRequestSelectedModel from .types.doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType from .types.doc_summary_page_output import DocSummaryPageOutput +from .types.functions_page_output import FunctionsPageOutput +from .types.lipsync_request_functions_item import LipsyncRequestFunctionsItem +from .types.lipsync_request_sadtalker_settings import LipsyncRequestSadtalkerSettings +from .types.lipsync_request_selected_model import LipsyncRequestSelectedModel +from .types.lipsync_page_output import LipsyncPageOutput from .types.lipsync_tts_request_functions_item import LipsyncTtsRequestFunctionsItem from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName @@ -135,12 +145,7 @@ from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType from .types.related_qn_a_doc_page_output import RelatedQnADocPageOutput from .core.client_wrapper import AsyncClientWrapper -from .copilot_integrations.client import AsyncCopilotIntegrationsClient -from .copilot_for_your_enterprise.client import AsyncCopilotForYourEnterpriseClient -from .evaluator.client import AsyncEvaluatorClient -from .smart_gpt.client import AsyncSmartGptClient -from .functions.client import AsyncFunctionsClient -from .lip_syncing.client import AsyncLipSyncingClient +from .copilot.client import AsyncCopilotClient from .misc.client import AsyncMiscClient # this is used as the default value for optional parameters @@ -207,12 +212,7 @@ def __init__( else httpx.Client(timeout=_defaulted_timeout), timeout=_defaulted_timeout, ) - self.copilot_integrations = CopilotIntegrationsClient(client_wrapper=self._client_wrapper) - self.copilot_for_your_enterprise = CopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) - self.evaluator = EvaluatorClient(client_wrapper=self._client_wrapper) - self.smart_gpt = SmartGptClient(client_wrapper=self._client_wrapper) - self.functions = FunctionsClient(client_wrapper=self._client_wrapper) - self.lip_syncing = LipSyncingClient(client_wrapper=self._client_wrapper) + self.copilot = CopilotClient(client_wrapper=self._client_wrapper) self.misc = MiscClient(client_wrapper=self._client_wrapper) def animate( @@ -334,9 +334,9 @@ def animate( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -371,36 +371,36 @@ def qr_code( *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[QrCodeRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - qr_code_data: typing.Optional[str] = OMIT, - qr_code_input_image: typing.Optional[core.File] = OMIT, - qr_code_vcard: typing.Optional[QrCodeRequestQrCodeVcard] = OMIT, - qr_code_file: typing.Optional[core.File] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - image_prompt: typing.Optional[str] = OMIT, + functions: typing.Optional[typing.List[QrCodeRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + qr_code_data: typing.Optional[str] = None, + qr_code_input_image: typing.Optional[core.File] = None, + qr_code_vcard: typing.Optional[QrCodeRequestQrCodeVcard] = None, + qr_code_file: typing.Optional[core.File] = None, + use_url_shortener: typing.Optional[bool] = None, + negative_prompt: typing.Optional[str] = None, + image_prompt: typing.Optional[str] = None, image_prompt_controlnet_models: typing.Optional[ typing.List[QrCodeRequestImagePromptControlnetModelsItem] - ] = OMIT, - image_prompt_strength: typing.Optional[float] = OMIT, - image_prompt_scale: typing.Optional[float] = OMIT, - image_prompt_pos_x: typing.Optional[float] = OMIT, - image_prompt_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[QrCodeRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - scheduler: typing.Optional[QrCodeRequestScheduler] = OMIT, - seed: typing.Optional[int] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + ] = None, + image_prompt_strength: typing.Optional[float] = None, + image_prompt_scale: typing.Optional[float] = None, + image_prompt_pos_x: typing.Optional[float] = None, + image_prompt_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + scheduler: typing.Optional[QrCodeRequestScheduler] = None, + seed: typing.Optional[int] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> QrCodeGeneratorPageOutput: """ @@ -542,9 +542,9 @@ def qr_code( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -731,9 +731,9 @@ def seo_people_also_ask( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -905,9 +905,9 @@ def seo_content( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -1094,9 +1094,9 @@ def web_search_llm( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -1228,9 +1228,9 @@ def personalize_email( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -1268,10 +1268,10 @@ def bulk_run( input_columns: typing.Dict[str, str], output_columns: typing.Dict[str, str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[BulkRunRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - eval_urls: typing.Optional[typing.List[str]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[BulkRunRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + eval_urls: typing.Optional[typing.List[str]] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> BulkRunnerPageOutput: """ @@ -1364,9 +1364,9 @@ def bulk_run( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -1396,27 +1396,177 @@ def bulk_run( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def synthesize_data( + def eval( self, *, - documents: typing.List[core.File], + documents: typing.Sequence[str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - sheet_url: typing.Optional[core.File] = OMIT, - selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[core.File] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = OMIT, + eval_prompts: typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] = OMIT, + agg_functions: typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] = OMIT, + selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, + ) -> BulkEvalPageOutput: + """ + Parameters + ---------- + documents : typing.Sequence[str] + + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. + + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + eval_prompts : typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] + + Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. + _The `columns` dictionary can be used to reference the spreadsheet columns._ + + + agg_functions : typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] + + Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + + + selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + BulkEvalPageOutput + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.eval( + documents=["documents"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/bulk-eval/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "documents": documents, + "eval_prompts": eval_prompts, + "agg_functions": agg_functions, + "selected_model": selected_model, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + BulkEvalPageOutput, + parse_obj_as( + type_=BulkEvalPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def synthesize_data( + self, + *, + documents: typing.List[core.File], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + sheet_url: typing.Optional[core.File] = None, + selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + task_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, ) -> DocExtractPageOutput: """ Parameters @@ -1517,9 +1667,9 @@ def synthesize_data( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -1645,9 +1795,9 @@ def llm( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -1823,9 +1973,9 @@ def rag( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -1855,52 +2005,45 @@ def rag( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def doc_summary( + def smart_gpt( self, *, - documents: typing.List[core.File], + input_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - merge_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSummaryRequestSelectedModel] = OMIT, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, - selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, + cot_prompt: typing.Optional[str] = OMIT, + reflexion_prompt: typing.Optional[str] = OMIT, + dera_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> DocSummaryPageOutput: + ) -> SmartGptPageOutput: """ Parameters ---------- - documents : typing.List[core.File] - See core.File for more documentation + input_prompt : str example_id : typing.Optional[str] - functions : typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSummaryRequestSelectedModel] + cot_prompt : typing.Optional[str] - chain_type : typing.Optional[typing.Literal["map_reduce"]] + reflexion_prompt : typing.Optional[str] - selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] + dera_prompt : typing.Optional[str] - google_translate_target : typing.Optional[str] + selected_model : typing.Optional[SmartGptPageRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -1912,7 +2055,7 @@ def doc_summary( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1921,7 +2064,7 @@ def doc_summary( Returns ------- - DocSummaryPageOutput + SmartGptPageOutput Successful Response Examples @@ -1931,23 +2074,24 @@ def doc_summary( client = Gooey( api_key="YOUR_API_KEY", ) - client.doc_summary() + client.smart_gpt( + input_prompt="input_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/doc-summary/async", + "v3/SmartGPT/async", method="POST", params={ "example_id": example_id, }, - data={ + json={ "functions": functions, "variables": variables, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, + "input_prompt": input_prompt, + "cot_prompt": cot_prompt, + "reflexion_prompt": reflexion_prompt, + "dera_prompt": dera_prompt, "selected_model": selected_model, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, @@ -1956,27 +2100,24 @@ def doc_summary( "response_format_type": response_format_type, "settings": settings, }, - files={ - "documents": documents, - }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - DocSummaryPageOutput, + SmartGptPageOutput, parse_obj_as( - type_=DocSummaryPageOutput, # type: ignore + type_=SmartGptPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -2006,128 +2147,86 @@ def doc_summary( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def lipsync_tts( + def doc_summary( self, *, - text_prompt: str, + documents: typing.List[core.File], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[core.File] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[LipsyncTtsRequestSadtalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + merge_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[DocSummaryRequestSelectedModel] = None, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, + selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> LipsyncTtsPageOutput: + ) -> DocSummaryPageOutput: """ Parameters ---------- - text_prompt : str + documents : typing.List[core.File] + See core.File for more documentation example_id : typing.Optional[str] - functions : typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] + functions : typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] + task_instructions : typing.Optional[str] - uberduck_voice_name : typing.Optional[str] + merge_instructions : typing.Optional[str] - uberduck_speaking_rate : typing.Optional[float] + selected_model : typing.Optional[DocSummaryRequestSelectedModel] - google_voice_name : typing.Optional[str] + chain_type : typing.Optional[typing.Literal["map_reduce"]] - google_speaking_rate : typing.Optional[float] + selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] - google_pitch : typing.Optional[float] + google_translate_target : typing.Optional[str] - bark_history_prompt : typing.Optional[str] + avoid_repetition : typing.Optional[bool] - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead + num_outputs : typing.Optional[int] - elevenlabs_api_key : typing.Optional[str] + quality : typing.Optional[float] - elevenlabs_voice_id : typing.Optional[str] + max_tokens : typing.Optional[int] - elevenlabs_model : typing.Optional[str] + sampling_temperature : typing.Optional[float] - elevenlabs_stability : typing.Optional[float] + response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] - elevenlabs_similarity_boost : typing.Optional[float] + settings : typing.Optional[RunSettings] - elevenlabs_style : typing.Optional[float] + request_options : typing.Optional[RequestOptions] + Request-specific configuration. - elevenlabs_speaker_boost : typing.Optional[bool] + Returns + ------- + DocSummaryPageOutput + Successful Response - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel] - - input_face : typing.Optional[core.File] - See core.File for more documentation - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[LipsyncTtsRequestSadtalkerSettings] - - selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncTtsPageOutput - Successful Response - - Examples - -------- - from gooey import Gooey + Examples + -------- + from gooey import Gooey client = Gooey( api_key="YOUR_API_KEY", ) - client.lipsync_tts( - text_prompt="text_prompt", - ) + client.doc_summary() """ _response = self._client_wrapper.httpx_client.request( - "v3/LipsyncTTS/async", + "v3/doc-summary/async", method="POST", params={ "example_id": example_id, @@ -2135,35 +2234,22 @@ def lipsync_tts( data={ "functions": functions, "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "settings": settings, }, files={ - "input_face": input_face, + "documents": documents, }, request_options=request_options, omit=OMIT, @@ -2171,18 +2257,18 @@ def lipsync_tts( try: if 200 <= _response.status_code < 300: return typing.cast( - LipsyncTtsPageOutput, + DocSummaryPageOutput, parse_obj_as( - type_=LipsyncTtsPageOutput, # type: ignore + type_=DocSummaryPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -2212,82 +2298,25 @@ def lipsync_tts( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def text_to_speech( + def functions( self, *, - text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]] = OMIT, + code: typing.Optional[str] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> TextToSpeechPageOutput: + ) -> FunctionsPageOutput: """ Parameters ---------- - text_prompt : str - example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]] + code : typing.Optional[str] + The JS code to be executed. variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] + Variables to be used in the code settings : typing.Optional[RunSettings] @@ -2296,7 +2325,7 @@ def text_to_speech( Returns ------- - TextToSpeechPageOutput + FunctionsPageOutput Successful Response Examples @@ -2306,38 +2335,17 @@ def text_to_speech( client = Gooey( api_key="YOUR_API_KEY", ) - client.text_to_speech( - text_prompt="text_prompt", - ) + client.functions() """ _response = self._client_wrapper.httpx_client.request( - "v3/TextToSpeech/async", + "v3/functions/async", method="POST", params={ "example_id": example_id, }, json={ - "functions": functions, + "code": code, "variables": variables, - "text_prompt": text_prompt, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, "settings": settings, }, request_options=request_options, @@ -2346,18 +2354,18 @@ def text_to_speech( try: if 200 <= _response.status_code < 300: return typing.cast( - TextToSpeechPageOutput, + FunctionsPageOutput, parse_obj_as( - type_=TextToSpeechPageOutput, # type: ignore + type_=FunctionsPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -2387,53 +2395,49 @@ def text_to_speech( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def speech_recognition( + def lipsync( self, *, - documents: typing.List[core.File], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = OMIT, - language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = OMIT, - output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[core.File] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[LipsyncRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[LipsyncRequestSadtalkerSettings] = None, + selected_model: typing.Optional[LipsyncRequestSelectedModel] = None, + input_audio: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> AsrPageOutput: + ) -> LipsyncPageOutput: """ Parameters ---------- - documents : typing.List[core.File] - See core.File for more documentation - example_id : typing.Optional[str] - functions : typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] + functions : typing.Optional[typing.List[LipsyncRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] + input_face : typing.Optional[core.File] + See core.File for more documentation - language : typing.Optional[str] + face_padding_top : typing.Optional[int] - translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] + face_padding_bottom : typing.Optional[int] - output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] + face_padding_left : typing.Optional[int] - google_translate_target : typing.Optional[str] - use `translation_model` & `translation_target` instead. + face_padding_right : typing.Optional[int] - translation_source : typing.Optional[str] + sadtalker_settings : typing.Optional[LipsyncRequestSadtalkerSettings] - translation_target : typing.Optional[str] + selected_model : typing.Optional[LipsyncRequestSelectedModel] - glossary_document : typing.Optional[core.File] + input_audio : typing.Optional[core.File] See core.File for more documentation settings : typing.Optional[RunSettings] @@ -2443,7 +2447,7 @@ def speech_recognition( Returns ------- - AsrPageOutput + LipsyncPageOutput Successful Response Examples @@ -2453,10 +2457,10 @@ def speech_recognition( client = Gooey( api_key="YOUR_API_KEY", ) - client.speech_recognition() + client.lipsync() """ _response = self._client_wrapper.httpx_client.request( - "v3/asr/async", + "v3/Lipsync/async", method="POST", params={ "example_id": example_id, @@ -2464,18 +2468,17 @@ def speech_recognition( data={ "functions": functions, "variables": variables, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, "selected_model": selected_model, - "language": language, - "translation_model": translation_model, - "output_format": output_format, - "google_translate_target": google_translate_target, - "translation_source": translation_source, - "translation_target": translation_target, "settings": settings, }, files={ - "documents": documents, - "glossary_document": glossary_document, + "input_face": input_face, + "input_audio": input_audio, }, request_options=request_options, omit=OMIT, @@ -2483,18 +2486,18 @@ def speech_recognition( try: if 200 <= _response.status_code < 300: return typing.cast( - AsrPageOutput, + LipsyncPageOutput, parse_obj_as( - type_=AsrPageOutput, # type: ignore + type_=LipsyncPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -2524,24 +2527,41 @@ def speech_recognition( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def text_to_music( + def lipsync_tts( self, *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - duration_sec: typing.Optional[float] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[LipsyncTtsRequestSadtalkerSettings] = None, + selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> Text2AudioPageOutput: + ) -> LipsyncTtsPageOutput: """ Parameters ---------- @@ -2549,35 +2569,71 @@ def text_to_music( example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]] + functions : typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - negative_prompt : typing.Optional[str] + tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] - duration_sec : typing.Optional[float] + uberduck_voice_name : typing.Optional[str] - num_outputs : typing.Optional[int] + uberduck_speaking_rate : typing.Optional[float] - quality : typing.Optional[int] + google_voice_name : typing.Optional[str] - guidance_scale : typing.Optional[float] + google_speaking_rate : typing.Optional[float] - seed : typing.Optional[int] + google_pitch : typing.Optional[float] - sd2upscaling : typing.Optional[bool] + bark_history_prompt : typing.Optional[str] - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead - settings : typing.Optional[RunSettings] + elevenlabs_api_key : typing.Optional[str] - request_options : typing.Optional[RequestOptions] - Request-specific configuration. + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel] + + input_face : typing.Optional[core.File] + See core.File for more documentation + + face_padding_top : typing.Optional[int] + + face_padding_bottom : typing.Optional[int] + + face_padding_left : typing.Optional[int] + + face_padding_right : typing.Optional[int] + + sadtalker_settings : typing.Optional[LipsyncTtsRequestSadtalkerSettings] + + selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. Returns ------- - Text2AudioPageOutput + LipsyncTtsPageOutput Successful Response Examples @@ -2587,48 +2643,67 @@ def text_to_music( client = Gooey( api_key="YOUR_API_KEY", ) - client.text_to_music( + client.lipsync_tts( text_prompt="text_prompt", ) """ _response = self._client_wrapper.httpx_client.request( - "v3/text2audio/async", + "v3/LipsyncTTS/async", method="POST", params={ "example_id": example_id, }, - json={ + data={ "functions": functions, "variables": variables, "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "duration_sec": duration_sec, - "num_outputs": num_outputs, - "quality": quality, - "guidance_scale": guidance_scale, - "seed": seed, - "sd_2_upscaling": sd2upscaling, - "selected_models": selected_models, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, + "selected_model": selected_model, "settings": settings, }, + files={ + "input_face": input_face, + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - Text2AudioPageOutput, + LipsyncTtsPageOutput, parse_obj_as( - type_=Text2AudioPageOutput, # type: ignore + type_=LipsyncTtsPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -2658,40 +2733,82 @@ def text_to_music( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def translate( + def text_to_speech( self, *, + text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[TranslateRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - texts: typing.Optional[typing.List[str]] = OMIT, - selected_model: typing.Optional[TranslateRequestSelectedModel] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[core.File] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + uberduck_voice_name: typing.Optional[str] = OMIT, + uberduck_speaking_rate: typing.Optional[float] = OMIT, + google_voice_name: typing.Optional[str] = OMIT, + google_speaking_rate: typing.Optional[float] = OMIT, + google_pitch: typing.Optional[float] = OMIT, + bark_history_prompt: typing.Optional[str] = OMIT, + elevenlabs_voice_name: typing.Optional[str] = OMIT, + elevenlabs_api_key: typing.Optional[str] = OMIT, + elevenlabs_voice_id: typing.Optional[str] = OMIT, + elevenlabs_model: typing.Optional[str] = OMIT, + elevenlabs_stability: typing.Optional[float] = OMIT, + elevenlabs_similarity_boost: typing.Optional[float] = OMIT, + elevenlabs_style: typing.Optional[float] = OMIT, + elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, + azure_voice_name: typing.Optional[str] = OMIT, + openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, + openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> TranslationPageOutput: + ) -> TextToSpeechPageOutput: """ Parameters ---------- + text_prompt : str + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[TranslateRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[TextToSpeechPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - texts : typing.Optional[typing.List[str]] + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] - selected_model : typing.Optional[TranslateRequestSelectedModel] + uberduck_voice_name : typing.Optional[str] - translation_source : typing.Optional[str] + uberduck_speaking_rate : typing.Optional[float] - translation_target : typing.Optional[str] + google_voice_name : typing.Optional[str] - glossary_document : typing.Optional[core.File] - See core.File for more documentation + google_speaking_rate : typing.Optional[float] + + google_pitch : typing.Optional[float] + + bark_history_prompt : typing.Optional[str] + + elevenlabs_voice_name : typing.Optional[str] + Use `elevenlabs_voice_id` instead + + elevenlabs_api_key : typing.Optional[str] + + elevenlabs_voice_id : typing.Optional[str] + + elevenlabs_model : typing.Optional[str] + + elevenlabs_stability : typing.Optional[float] + + elevenlabs_similarity_boost : typing.Optional[float] + + elevenlabs_style : typing.Optional[float] + + elevenlabs_speaker_boost : typing.Optional[bool] + + azure_voice_name : typing.Optional[str] + + openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] + + openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] settings : typing.Optional[RunSettings] @@ -2700,7 +2817,7 @@ def translate( Returns ------- - TranslationPageOutput + TextToSpeechPageOutput Successful Response Examples @@ -2710,44 +2827,58 @@ def translate( client = Gooey( api_key="YOUR_API_KEY", ) - client.translate() + client.text_to_speech( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/translate/async", + "v3/TextToSpeech/async", method="POST", params={ "example_id": example_id, }, - data={ + json={ "functions": functions, "variables": variables, - "texts": texts, - "selected_model": selected_model, - "translation_source": translation_source, - "translation_target": translation_target, + "text_prompt": text_prompt, + "tts_provider": tts_provider, + "uberduck_voice_name": uberduck_voice_name, + "uberduck_speaking_rate": uberduck_speaking_rate, + "google_voice_name": google_voice_name, + "google_speaking_rate": google_speaking_rate, + "google_pitch": google_pitch, + "bark_history_prompt": bark_history_prompt, + "elevenlabs_voice_name": elevenlabs_voice_name, + "elevenlabs_api_key": elevenlabs_api_key, + "elevenlabs_voice_id": elevenlabs_voice_id, + "elevenlabs_model": elevenlabs_model, + "elevenlabs_stability": elevenlabs_stability, + "elevenlabs_similarity_boost": elevenlabs_similarity_boost, + "elevenlabs_style": elevenlabs_style, + "elevenlabs_speaker_boost": elevenlabs_speaker_boost, + "azure_voice_name": azure_voice_name, + "openai_voice_name": openai_voice_name, + "openai_tts_model": openai_tts_model, "settings": settings, }, - files={ - "glossary_document": glossary_document, - }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - TranslationPageOutput, + TextToSpeechPageOutput, parse_obj_as( - type_=TranslationPageOutput, # type: ignore + type_=TextToSpeechPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -2777,67 +2908,54 @@ def translate( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def remix_image( + def speech_recognition( self, *, - input_image: core.File, + documents: typing.List[core.File], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RemixImageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - text_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RemixImageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, + language: typing.Optional[str] = None, + translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, + output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, + google_translate_target: typing.Optional[str] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> Img2ImgPageOutput: + ) -> AsrPageOutput: """ Parameters ---------- - input_image : core.File + documents : typing.List[core.File] See core.File for more documentation example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RemixImageRequestFunctionsItem]] + functions : typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - text_prompt : typing.Optional[str] - - selected_model : typing.Optional[RemixImageRequestSelectedModel] - - selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] + selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] - output_width : typing.Optional[int] + language : typing.Optional[str] - output_height : typing.Optional[int] + translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] - guidance_scale : typing.Optional[float] + output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] - prompt_strength : typing.Optional[float] + google_translate_target : typing.Optional[str] + use `translation_model` & `translation_target` instead. - controlnet_conditioning_scale : typing.Optional[typing.List[float]] + translation_source : typing.Optional[str] - seed : typing.Optional[int] + translation_target : typing.Optional[str] - image_guidance_scale : typing.Optional[float] + glossary_document : typing.Optional[core.File] + See core.File for more documentation settings : typing.Optional[RunSettings] @@ -2846,7 +2964,7 @@ def remix_image( Returns ------- - Img2ImgPageOutput + AsrPageOutput Successful Response Examples @@ -2856,10 +2974,10 @@ def remix_image( client = Gooey( api_key="YOUR_API_KEY", ) - client.remix_image() + client.speech_recognition() """ _response = self._client_wrapper.httpx_client.request( - "v3/Img2Img/async", + "v3/asr/async", method="POST", params={ "example_id": example_id, @@ -2867,23 +2985,18 @@ def remix_image( data={ "functions": functions, "variables": variables, - "text_prompt": text_prompt, "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, - "controlnet_conditioning_scale": controlnet_conditioning_scale, - "seed": seed, - "image_guidance_scale": image_guidance_scale, + "language": language, + "translation_model": translation_model, + "output_format": output_format, + "google_translate_target": google_translate_target, + "translation_source": translation_source, + "translation_target": translation_target, "settings": settings, }, files={ - "input_image": input_image, + "documents": documents, + "glossary_document": glossary_document, }, request_options=request_options, omit=OMIT, @@ -2891,18 +3004,18 @@ def remix_image( try: if 200 <= _response.status_code < 300: return typing.cast( - Img2ImgPageOutput, + AsrPageOutput, parse_obj_as( - type_=Img2ImgPageOutput, # type: ignore + type_=AsrPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -2932,30 +3045,24 @@ def remix_image( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def text_to_image( + def text_to_music( self, *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, negative_prompt: typing.Optional[str] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, + duration_sec: typing.Optional[float] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[int] = OMIT, - dall_e3quality: typing.Optional[str] = OMIT, - dall_e3style: typing.Optional[str] = OMIT, guidance_scale: typing.Optional[float] = OMIT, seed: typing.Optional[int] = OMIT, sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, - edit_instruction: typing.Optional[str] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> CompareText2ImgPageOutput: + ) -> Text2AudioPageOutput: """ Parameters ---------- @@ -2963,38 +3070,26 @@ def text_to_image( example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[Text2AudioPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments negative_prompt : typing.Optional[str] - output_width : typing.Optional[int] - - output_height : typing.Optional[int] + duration_sec : typing.Optional[float] num_outputs : typing.Optional[int] quality : typing.Optional[int] - dall_e3quality : typing.Optional[str] - - dall_e3style : typing.Optional[str] - guidance_scale : typing.Optional[float] seed : typing.Optional[int] sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - - edit_instruction : typing.Optional[str] - - image_guidance_scale : typing.Optional[float] + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] settings : typing.Optional[RunSettings] @@ -3003,7 +3098,7 @@ def text_to_image( Returns ------- - CompareText2ImgPageOutput + Text2AudioPageOutput Successful Response Examples @@ -3013,12 +3108,12 @@ def text_to_image( client = Gooey( api_key="YOUR_API_KEY", ) - client.text_to_image( + client.text_to_music( text_prompt="text_prompt", ) """ _response = self._client_wrapper.httpx_client.request( - "v3/CompareText2Img/async", + "v3/text2audio/async", method="POST", params={ "example_id": example_id, @@ -3028,19 +3123,13 @@ def text_to_image( "variables": variables, "text_prompt": text_prompt, "negative_prompt": negative_prompt, - "output_width": output_width, - "output_height": output_height, + "duration_sec": duration_sec, "num_outputs": num_outputs, "quality": quality, - "dall_e_3_quality": dall_e3quality, - "dall_e_3_style": dall_e3style, "guidance_scale": guidance_scale, "seed": seed, "sd_2_upscaling": sd2upscaling, "selected_models": selected_models, - "scheduler": scheduler, - "edit_instruction": edit_instruction, - "image_guidance_scale": image_guidance_scale, "settings": settings, }, request_options=request_options, @@ -3049,18 +3138,18 @@ def text_to_image( try: if 200 <= _response.status_code < 300: return typing.cast( - CompareText2ImgPageOutput, + Text2AudioPageOutput, parse_obj_as( - type_=CompareText2ImgPageOutput, # type: ignore + type_=Text2AudioPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -3090,70 +3179,40 @@ def text_to_image( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def product_image( + def translate( self, *, - input_image: core.File, - text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[ProductImageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - selected_model: typing.Optional[ProductImageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[TranslateRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + texts: typing.Optional[typing.List[str]] = None, + selected_model: typing.Optional[TranslateRequestSelectedModel] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> ObjectInpaintingPageOutput: + ) -> TranslationPageOutput: """ Parameters ---------- - input_image : core.File - See core.File for more documentation - - text_prompt : str - example_id : typing.Optional[str] - functions : typing.Optional[typing.List[ProductImageRequestFunctionsItem]] + functions : typing.Optional[typing.List[TranslateRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - obj_scale : typing.Optional[float] - - obj_pos_x : typing.Optional[float] - - obj_pos_y : typing.Optional[float] - - mask_threshold : typing.Optional[float] - - selected_model : typing.Optional[ProductImageRequestSelectedModel] - - negative_prompt : typing.Optional[str] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[int] - - output_width : typing.Optional[int] + texts : typing.Optional[typing.List[str]] - output_height : typing.Optional[int] + selected_model : typing.Optional[TranslateRequestSelectedModel] - guidance_scale : typing.Optional[float] + translation_source : typing.Optional[str] - sd2upscaling : typing.Optional[bool] + translation_target : typing.Optional[str] - seed : typing.Optional[int] + glossary_document : typing.Optional[core.File] + See core.File for more documentation settings : typing.Optional[RunSettings] @@ -3162,7 +3221,7 @@ def product_image( Returns ------- - ObjectInpaintingPageOutput + TranslationPageOutput Successful Response Examples @@ -3172,12 +3231,10 @@ def product_image( client = Gooey( api_key="YOUR_API_KEY", ) - client.product_image( - text_prompt="text_prompt", - ) + client.translate() """ _response = self._client_wrapper.httpx_client.request( - "v3/ObjectInpainting/async", + "v3/translate/async", method="POST", params={ "example_id": example_id, @@ -3185,24 +3242,14 @@ def product_image( data={ "functions": functions, "variables": variables, - "text_prompt": text_prompt, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, - "mask_threshold": mask_threshold, + "texts": texts, "selected_model": selected_model, - "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "sd_2_upscaling": sd2upscaling, - "seed": seed, + "translation_source": translation_source, + "translation_target": translation_target, "settings": settings, }, files={ - "input_image": input_image, + "glossary_document": glossary_document, }, request_options=request_options, omit=OMIT, @@ -3210,18 +3257,18 @@ def product_image( try: if 200 <= _response.status_code < 300: return typing.cast( - ObjectInpaintingPageOutput, + TranslationPageOutput, parse_obj_as( - type_=ObjectInpaintingPageOutput, # type: ignore + type_=TranslationPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -3251,51 +3298,47 @@ def product_image( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def portrait( + def remix_image( self, *, input_image: core.File, - text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[PortraitRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[PortraitRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[RemixImageRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + text_prompt: typing.Optional[str] = None, + selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + prompt_strength: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + seed: typing.Optional[int] = None, + image_guidance_scale: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> FaceInpaintingPageOutput: + ) -> Img2ImgPageOutput: """ Parameters ---------- input_image : core.File See core.File for more documentation - text_prompt : str - example_id : typing.Optional[str] - functions : typing.Optional[typing.List[PortraitRequestFunctionsItem]] + functions : typing.Optional[typing.List[RemixImageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] + text_prompt : typing.Optional[str] - face_pos_y : typing.Optional[float] + selected_model : typing.Optional[RemixImageRequestSelectedModel] - selected_model : typing.Optional[PortraitRequestSelectedModel] + selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] negative_prompt : typing.Optional[str] @@ -3303,16 +3346,20 @@ def portrait( quality : typing.Optional[int] - upscale_factor : typing.Optional[float] - output_width : typing.Optional[int] output_height : typing.Optional[int] guidance_scale : typing.Optional[float] + prompt_strength : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.List[float]] + seed : typing.Optional[int] + image_guidance_scale : typing.Optional[float] + settings : typing.Optional[RunSettings] request_options : typing.Optional[RequestOptions] @@ -3320,7 +3367,7 @@ def portrait( Returns ------- - FaceInpaintingPageOutput + Img2ImgPageOutput Successful Response Examples @@ -3330,12 +3377,10 @@ def portrait( client = Gooey( api_key="YOUR_API_KEY", ) - client.portrait( - text_prompt="text_prompt", - ) + client.remix_image() """ _response = self._client_wrapper.httpx_client.request( - "v3/FaceInpainting/async", + "v3/Img2Img/async", method="POST", params={ "example_id": example_id, @@ -3344,18 +3389,18 @@ def portrait( "functions": functions, "variables": variables, "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, - "upscale_factor": upscale_factor, "output_width": output_width, "output_height": output_height, "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "controlnet_conditioning_scale": controlnet_conditioning_scale, "seed": seed, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, files={ @@ -3367,18 +3412,18 @@ def portrait( try: if 200 <= _response.status_code < 300: return typing.cast( - FaceInpaintingPageOutput, + Img2ImgPageOutput, parse_obj_as( - type_=FaceInpaintingPageOutput, # type: ignore + type_=Img2ImgPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -3408,38 +3453,30 @@ def portrait( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def image_from_email( + def text_to_image( self, *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - email_address: typing.Optional[str] = OMIT, - twitter_handle: typing.Optional[str] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, output_width: typing.Optional[int] = OMIT, output_height: typing.Optional[int] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + dall_e3quality: typing.Optional[str] = OMIT, + dall_e3style: typing.Optional[str] = OMIT, guidance_scale: typing.Optional[float] = OMIT, - should_send_email: typing.Optional[bool] = OMIT, - email_from: typing.Optional[str] = OMIT, - email_cc: typing.Optional[str] = OMIT, - email_bcc: typing.Optional[str] = OMIT, - email_subject: typing.Optional[str] = OMIT, - email_body: typing.Optional[str] = OMIT, - email_body_enable_html: typing.Optional[bool] = OMIT, - fallback_email_body: typing.Optional[str] = OMIT, seed: typing.Optional[int] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + edit_instruction: typing.Optional[str] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> EmailFaceInpaintingPageOutput: + ) -> CompareText2ImgPageOutput: """ Parameters ---------- @@ -3447,54 +3484,38 @@ def image_from_email( example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[CompareText2ImgPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - email_address : typing.Optional[str] + negative_prompt : typing.Optional[str] - twitter_handle : typing.Optional[str] + output_width : typing.Optional[int] - face_scale : typing.Optional[float] - - face_pos_x : typing.Optional[float] - - face_pos_y : typing.Optional[float] - - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] - - negative_prompt : typing.Optional[str] + output_height : typing.Optional[int] num_outputs : typing.Optional[int] quality : typing.Optional[int] - upscale_factor : typing.Optional[float] - - output_width : typing.Optional[int] + dall_e3quality : typing.Optional[str] - output_height : typing.Optional[int] + dall_e3style : typing.Optional[str] guidance_scale : typing.Optional[float] - should_send_email : typing.Optional[bool] - - email_from : typing.Optional[str] - - email_cc : typing.Optional[str] - - email_bcc : typing.Optional[str] + seed : typing.Optional[int] - email_subject : typing.Optional[str] + sd2upscaling : typing.Optional[bool] - email_body : typing.Optional[str] + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - email_body_enable_html : typing.Optional[bool] + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] - fallback_email_body : typing.Optional[str] + edit_instruction : typing.Optional[str] - seed : typing.Optional[int] + image_guidance_scale : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -3503,7 +3524,7 @@ def image_from_email( Returns ------- - EmailFaceInpaintingPageOutput + CompareText2ImgPageOutput Successful Response Examples @@ -3513,13 +3534,12 @@ def image_from_email( client = Gooey( api_key="YOUR_API_KEY", ) - client.image_from_email( - email_address="sean@dara.network", - text_prompt="winter's day in paris", + client.text_to_image( + text_prompt="text_prompt", ) """ _response = self._client_wrapper.httpx_client.request( - "v3/EmailFaceInpainting/async", + "v3/CompareText2Img/async", method="POST", params={ "example_id": example_id, @@ -3527,29 +3547,21 @@ def image_from_email( json={ "functions": functions, "variables": variables, - "email_address": email_address, - "twitter_handle": twitter_handle, "text_prompt": text_prompt, - "face_scale": face_scale, - "face_pos_x": face_pos_x, - "face_pos_y": face_pos_y, - "selected_model": selected_model, "negative_prompt": negative_prompt, - "num_outputs": num_outputs, - "quality": quality, - "upscale_factor": upscale_factor, "output_width": output_width, "output_height": output_height, + "num_outputs": num_outputs, + "quality": quality, + "dall_e_3_quality": dall_e3quality, + "dall_e_3_style": dall_e3style, "guidance_scale": guidance_scale, - "should_send_email": should_send_email, - "email_from": email_from, - "email_cc": email_cc, - "email_bcc": email_bcc, - "email_subject": email_subject, - "email_body": email_body, - "email_body_enable_html": email_body_enable_html, - "fallback_email_body": fallback_email_body, "seed": seed, + "sd_2_upscaling": sd2upscaling, + "selected_models": selected_models, + "scheduler": scheduler, + "edit_instruction": edit_instruction, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, request_options=request_options, @@ -3558,18 +3570,18 @@ def image_from_email( try: if 200 <= _response.status_code < 300: return typing.cast( - EmailFaceInpaintingPageOutput, + CompareText2ImgPageOutput, parse_obj_as( - type_=EmailFaceInpaintingPageOutput, # type: ignore + type_=CompareText2ImgPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -3599,48 +3611,54 @@ def image_from_email( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def image_from_web_search( + def product_image( self, *, - search_query: str, + input_image: core.File, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[ProductImageRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + mask_threshold: typing.Optional[float] = None, + selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + sd2upscaling: typing.Optional[bool] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> GoogleImageGenPageOutput: + ) -> ObjectInpaintingPageOutput: """ Parameters ---------- - search_query : str + input_image : core.File + See core.File for more documentation text_prompt : str example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]] + functions : typing.Optional[typing.List[ProductImageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - serp_search_location : typing.Optional[SerpSearchLocation] + obj_scale : typing.Optional[float] - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead + obj_pos_x : typing.Optional[float] - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] + obj_pos_y : typing.Optional[float] + + mask_threshold : typing.Optional[float] + + selected_model : typing.Optional[ProductImageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -3648,16 +3666,16 @@ def image_from_web_search( quality : typing.Optional[int] - guidance_scale : typing.Optional[float] + output_width : typing.Optional[int] - prompt_strength : typing.Optional[float] + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] sd2upscaling : typing.Optional[bool] seed : typing.Optional[int] - image_guidance_scale : typing.Optional[float] - settings : typing.Optional[RunSettings] request_options : typing.Optional[RequestOptions] @@ -3665,7 +3683,7 @@ def image_from_web_search( Returns ------- - GoogleImageGenPageOutput + ObjectInpaintingPageOutput Successful Response Examples @@ -3675,53 +3693,56 @@ def image_from_web_search( client = Gooey( api_key="YOUR_API_KEY", ) - client.image_from_web_search( - search_query="search_query", + client.product_image( text_prompt="text_prompt", ) """ _response = self._client_wrapper.httpx_client.request( - "v3/GoogleImageGen/async", + "v3/ObjectInpainting/async", method="POST", params={ "example_id": example_id, }, - json={ + data={ "functions": functions, "variables": variables, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "search_query": search_query, "text_prompt": text_prompt, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "mask_threshold": mask_threshold, "selected_model": selected_model, "negative_prompt": negative_prompt, "num_outputs": num_outputs, "quality": quality, + "output_width": output_width, + "output_height": output_height, "guidance_scale": guidance_scale, - "prompt_strength": prompt_strength, "sd_2_upscaling": sd2upscaling, "seed": seed, - "image_guidance_scale": image_guidance_scale, "settings": settings, }, + files={ + "input_image": input_image, + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GoogleImageGenPageOutput, + ObjectInpaintingPageOutput, parse_obj_as( - type_=GoogleImageGenPageOutput, # type: ignore + type_=ObjectInpaintingPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -3751,49 +3772,67 @@ def image_from_web_search( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def remove_background( + def portrait( self, *, input_image: core.File, + text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - rect_persepective_transform: typing.Optional[bool] = OMIT, - reflection_opacity: typing.Optional[float] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[PortraitRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + face_scale: typing.Optional[float] = None, + face_pos_x: typing.Optional[float] = None, + face_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PortraitRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + upscale_factor: typing.Optional[float] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> ImageSegmentationPageOutput: + ) -> FaceInpaintingPageOutput: """ Parameters ---------- input_image : core.File See core.File for more documentation + text_prompt : str + example_id : typing.Optional[str] - functions : typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] + functions : typing.Optional[typing.List[PortraitRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] + face_scale : typing.Optional[float] - mask_threshold : typing.Optional[float] + face_pos_x : typing.Optional[float] - rect_persepective_transform : typing.Optional[bool] + face_pos_y : typing.Optional[float] - reflection_opacity : typing.Optional[float] + selected_model : typing.Optional[PortraitRequestSelectedModel] - obj_scale : typing.Optional[float] + negative_prompt : typing.Optional[str] - obj_pos_x : typing.Optional[float] + num_outputs : typing.Optional[int] - obj_pos_y : typing.Optional[float] + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -3802,7 +3841,7 @@ def remove_background( Returns ------- - ImageSegmentationPageOutput + FaceInpaintingPageOutput Successful Response Examples @@ -3812,10 +3851,12 @@ def remove_background( client = Gooey( api_key="YOUR_API_KEY", ) - client.remove_background() + client.portrait( + text_prompt="text_prompt", + ) """ _response = self._client_wrapper.httpx_client.request( - "v3/ImageSegmentation/async", + "v3/FaceInpainting/async", method="POST", params={ "example_id": example_id, @@ -3823,13 +3864,19 @@ def remove_background( data={ "functions": functions, "variables": variables, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, "selected_model": selected_model, - "mask_threshold": mask_threshold, - "rect_persepective_transform": rect_persepective_transform, - "reflection_opacity": reflection_opacity, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "seed": seed, "settings": settings, }, files={ @@ -3841,18 +3888,18 @@ def remove_background( try: if 200 <= _response.status_code < 300: return typing.cast( - ImageSegmentationPageOutput, + FaceInpaintingPageOutput, parse_obj_as( - type_=ImageSegmentationPageOutput, # type: ignore + type_=FaceInpaintingPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -3882,42 +3929,93 @@ def remove_background( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def upscale( + def image_from_email( self, *, - scale: int, + text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[UpscaleRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_image: typing.Optional[core.File] = OMIT, - input_video: typing.Optional[core.File] = OMIT, - selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = OMIT, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, + email_address: typing.Optional[str] = OMIT, + twitter_handle: typing.Optional[str] = OMIT, + face_scale: typing.Optional[float] = OMIT, + face_pos_x: typing.Optional[float] = OMIT, + face_pos_y: typing.Optional[float] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + upscale_factor: typing.Optional[float] = OMIT, + output_width: typing.Optional[int] = OMIT, + output_height: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + should_send_email: typing.Optional[bool] = OMIT, + email_from: typing.Optional[str] = OMIT, + email_cc: typing.Optional[str] = OMIT, + email_bcc: typing.Optional[str] = OMIT, + email_subject: typing.Optional[str] = OMIT, + email_body: typing.Optional[str] = OMIT, + email_body_enable_html: typing.Optional[bool] = OMIT, + fallback_email_body: typing.Optional[str] = OMIT, + seed: typing.Optional[int] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> CompareUpscalerPageOutput: + ) -> EmailFaceInpaintingPageOutput: """ Parameters ---------- - scale : int - The final upsampling scale of the image + text_prompt : str example_id : typing.Optional[str] - functions : typing.Optional[typing.List[UpscaleRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[EmailFaceInpaintingPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - input_image : typing.Optional[core.File] - See core.File for more documentation + email_address : typing.Optional[str] - input_video : typing.Optional[core.File] - See core.File for more documentation + twitter_handle : typing.Optional[str] - selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] + face_scale : typing.Optional[float] - selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + face_pos_x : typing.Optional[float] + + face_pos_y : typing.Optional[float] + + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + upscale_factor : typing.Optional[float] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + should_send_email : typing.Optional[bool] + + email_from : typing.Optional[str] + + email_cc : typing.Optional[str] + + email_bcc : typing.Optional[str] + + email_subject : typing.Optional[str] + + email_body : typing.Optional[str] + + email_body_enable_html : typing.Optional[bool] + + fallback_email_body : typing.Optional[str] + + seed : typing.Optional[int] settings : typing.Optional[RunSettings] @@ -3926,7 +4024,7 @@ def upscale( Returns ------- - CompareUpscalerPageOutput + EmailFaceInpaintingPageOutput Successful Response Examples @@ -3936,46 +4034,63 @@ def upscale( client = Gooey( api_key="YOUR_API_KEY", ) - client.upscale( - scale=1, + client.image_from_email( + email_address="sean@dara.network", + text_prompt="winter's day in paris", ) """ _response = self._client_wrapper.httpx_client.request( - "v3/compare-ai-upscalers/async", + "v3/EmailFaceInpainting/async", method="POST", params={ "example_id": example_id, }, - data={ + json={ "functions": functions, "variables": variables, - "scale": scale, - "selected_models": selected_models, - "selected_bg_model": selected_bg_model, + "email_address": email_address, + "twitter_handle": twitter_handle, + "text_prompt": text_prompt, + "face_scale": face_scale, + "face_pos_x": face_pos_x, + "face_pos_y": face_pos_y, + "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "upscale_factor": upscale_factor, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "should_send_email": should_send_email, + "email_from": email_from, + "email_cc": email_cc, + "email_bcc": email_bcc, + "email_subject": email_subject, + "email_body": email_body, + "email_body_enable_html": email_body_enable_html, + "fallback_email_body": fallback_email_body, + "seed": seed, "settings": settings, }, - files={ - "input_image": input_image, - "input_video": input_video, - }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - CompareUpscalerPageOutput, + EmailFaceInpaintingPageOutput, parse_obj_as( - type_=CompareUpscalerPageOutput, # type: ignore + type_=EmailFaceInpaintingPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -4005,30 +4120,64 @@ def upscale( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def embed( + def image_from_web_search( self, *, - texts: typing.Sequence[str], + search_query: str, + text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + negative_prompt: typing.Optional[str] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[int] = OMIT, + guidance_scale: typing.Optional[float] = OMIT, + prompt_strength: typing.Optional[float] = OMIT, + sd2upscaling: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + image_guidance_scale: typing.Optional[float] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> EmbeddingsPageOutput: + ) -> GoogleImageGenPageOutput: """ Parameters ---------- - texts : typing.Sequence[str] + search_query : str + + text_prompt : str example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[GoogleImageGenPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] + + negative_prompt : typing.Optional[str] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + prompt_strength : typing.Optional[float] + + sd2upscaling : typing.Optional[bool] + + seed : typing.Optional[int] + + image_guidance_scale : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -4037,7 +4186,7 @@ def embed( Returns ------- - EmbeddingsPageOutput + GoogleImageGenPageOutput Successful Response Examples @@ -4047,12 +4196,13 @@ def embed( client = Gooey( api_key="YOUR_API_KEY", ) - client.embed( - texts=["texts"], + client.image_from_web_search( + search_query="search_query", + text_prompt="text_prompt", ) """ _response = self._client_wrapper.httpx_client.request( - "v3/embeddings/async", + "v3/GoogleImageGen/async", method="POST", params={ "example_id": example_id, @@ -4060,8 +4210,19 @@ def embed( json={ "functions": functions, "variables": variables, - "texts": texts, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "search_query": search_query, + "text_prompt": text_prompt, "selected_model": selected_model, + "negative_prompt": negative_prompt, + "num_outputs": num_outputs, + "quality": quality, + "guidance_scale": guidance_scale, + "prompt_strength": prompt_strength, + "sd_2_upscaling": sd2upscaling, + "seed": seed, + "image_guidance_scale": image_guidance_scale, "settings": settings, }, request_options=request_options, @@ -4070,18 +4231,18 @@ def embed( try: if 200 <= _response.status_code < 300: return typing.cast( - EmbeddingsPageOutput, + GoogleImageGenPageOutput, parse_obj_as( - type_=EmbeddingsPageOutput, # type: ignore + type_=GoogleImageGenPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -4111,99 +4272,49 @@ def embed( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def seo_people_also_ask_doc( + def remove_background( self, *, - search_query: str, + input_image: core.File, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, + mask_threshold: typing.Optional[float] = None, + rect_persepective_transform: typing.Optional[bool] = None, + reflection_opacity: typing.Optional[float] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> RelatedQnADocPageOutput: + ) -> ImageSegmentationPageOutput: """ Parameters ---------- - search_query : str + input_image : core.File + See core.File for more documentation example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]] + functions : typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] - - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] + selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] - sampling_temperature : typing.Optional[float] + mask_threshold : typing.Optional[float] - response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + rect_persepective_transform : typing.Optional[bool] - serp_search_location : typing.Optional[SerpSearchLocation] + reflection_opacity : typing.Optional[float] - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead + obj_scale : typing.Optional[float] - serp_search_type : typing.Optional[SerpSearchType] + obj_pos_x : typing.Optional[float] - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + obj_pos_y : typing.Optional[float] settings : typing.Optional[RunSettings] @@ -4212,7 +4323,7 @@ def seo_people_also_ask_doc( Returns ------- - RelatedQnADocPageOutput + ImageSegmentationPageOutput Successful Response Examples @@ -4222,62 +4333,47 @@ def seo_people_also_ask_doc( client = Gooey( api_key="YOUR_API_KEY", ) - client.seo_people_also_ask_doc( - search_query="search_query", - ) + client.remove_background() """ _response = self._client_wrapper.httpx_client.request( - "v3/related-qna-maker-doc/async", + "v3/ImageSegmentation/async", method="POST", params={ "example_id": example_id, }, - json={ + data={ "functions": functions, "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "mask_threshold": mask_threshold, + "rect_persepective_transform": rect_persepective_transform, + "reflection_opacity": reflection_opacity, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, "settings": settings, }, + files={ + "input_image": input_image, + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - RelatedQnADocPageOutput, + ImageSegmentationPageOutput, parse_obj_as( - type_=RelatedQnADocPageOutput, # type: ignore + type_=ImageSegmentationPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -4307,18 +4403,51 @@ def seo_people_also_ask_doc( raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - def health_status_get( - self, *, request_options: typing.Optional[RequestOptions] = None - ) -> typing.Optional[typing.Any]: + def upscale( + self, + *, + scale: int, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[UpscaleRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_image: typing.Optional[core.File] = None, + input_video: typing.Optional[core.File] = None, + selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> CompareUpscalerPageOutput: """ Parameters ---------- + scale : int + The final upsampling scale of the image + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[UpscaleRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + input_image : typing.Optional[core.File] + See core.File for more documentation + + input_video : typing.Optional[core.File] + See core.File for more documentation + + selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] + + selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] + + settings : typing.Optional[RunSettings] + request_options : typing.Optional[RequestOptions] Request-specific configuration. Returns ------- - typing.Optional[typing.Any] + CompareUpscalerPageOutput Successful Response Examples @@ -4328,150 +4457,1324 @@ def health_status_get( client = Gooey( api_key="YOUR_API_KEY", ) - client.health_status_get() + client.upscale( + scale=1, + ) """ _response = self._client_wrapper.httpx_client.request( - "status", - method="GET", + "v3/compare-ai-upscalers/async", + method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "scale": scale, + "selected_models": selected_models, + "selected_bg_model": selected_bg_model, + "settings": settings, + }, + files={ + "input_image": input_image, + "input_video": input_video, + }, request_options=request_options, + omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - typing.Optional[typing.Any], + CompareUpscalerPageOutput, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=CompareUpscalerPageOutput, # type: ignore object_=_response.json(), ), ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) _response_json = _response.json() except JSONDecodeError: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - -class AsyncGooey: - """ - Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. - - Parameters - ---------- - base_url : typing.Optional[str] - The base url to use for requests from the client. - - environment : GooeyEnvironment - The environment to use for requests from the client. from .environment import GooeyEnvironment - - - - Defaults to GooeyEnvironment.DEFAULT - - - - api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]] - timeout : typing.Optional[float] - The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. - - follow_redirects : typing.Optional[bool] - Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. - - httpx_client : typing.Optional[httpx.AsyncClient] - The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. - - Examples - -------- - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - """ - - def __init__( - self, - *, - base_url: typing.Optional[str] = None, - environment: GooeyEnvironment = GooeyEnvironment.DEFAULT, - api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), - timeout: typing.Optional[float] = None, - follow_redirects: typing.Optional[bool] = True, - httpx_client: typing.Optional[httpx.AsyncClient] = None, - ): - _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None - if api_key is None: - raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY") - self._client_wrapper = AsyncClientWrapper( - base_url=_get_base_url(base_url=base_url, environment=environment), - api_key=api_key, - httpx_client=httpx_client - if httpx_client is not None - else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) - if follow_redirects is not None - else httpx.AsyncClient(timeout=_defaulted_timeout), - timeout=_defaulted_timeout, - ) - self.copilot_integrations = AsyncCopilotIntegrationsClient(client_wrapper=self._client_wrapper) - self.copilot_for_your_enterprise = AsyncCopilotForYourEnterpriseClient(client_wrapper=self._client_wrapper) - self.evaluator = AsyncEvaluatorClient(client_wrapper=self._client_wrapper) - self.smart_gpt = AsyncSmartGptClient(client_wrapper=self._client_wrapper) - self.functions = AsyncFunctionsClient(client_wrapper=self._client_wrapper) - self.lip_syncing = AsyncLipSyncingClient(client_wrapper=self._client_wrapper) - self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) - - async def animate( + def embed( self, *, - animation_prompts: typing.Sequence[DeforumSdPageRequestAnimationPromptsItem], + texts: typing.Sequence[str], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[DeforumSdPageRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, - animation_mode: typing.Optional[str] = OMIT, - zoom: typing.Optional[str] = OMIT, - translation_x: typing.Optional[str] = OMIT, - translation_y: typing.Optional[str] = OMIT, - rotation3d_x: typing.Optional[str] = OMIT, - rotation3d_y: typing.Optional[str] = OMIT, - rotation3d_z: typing.Optional[str] = OMIT, - fps: typing.Optional[int] = OMIT, - seed: typing.Optional[int] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> DeforumSdPageOutput: + ) -> EmbeddingsPageOutput: """ Parameters ---------- - animation_prompts : typing.Sequence[DeforumSdPageRequestAnimationPromptsItem] + texts : typing.Sequence[str] example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[DeforumSdPageRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[EmbeddingsPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - max_frames : typing.Optional[int] + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + EmbeddingsPageOutput + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.embed( + texts=["texts"], + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/embeddings/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "texts": texts, + "selected_model": selected_model, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + EmbeddingsPageOutput, + parse_obj_as( + type_=EmbeddingsPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def seo_people_also_ask_doc( + self, + *, + search_query: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnADocPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnADocPageOutput: + """ + Parameters + ---------- + search_query : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RelatedQnADocPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] + + documents : typing.Optional[typing.Sequence[str]] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnADocPageRequestSelectedModel] + + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RelatedQnADocPageOutput + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.seo_people_also_ask_doc( + search_query="search_query", + ) + """ + _response = self._client_wrapper.httpx_client.request( + "v3/related-qna-maker-doc/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "keyword_query": keyword_query, + "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + RelatedQnADocPageOutput, + parse_obj_as( + type_=RelatedQnADocPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + def health_status_get( + self, *, request_options: typing.Optional[RequestOptions] = None + ) -> typing.Optional[typing.Any]: + """ + Parameters + ---------- + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + typing.Optional[typing.Any] + Successful Response + + Examples + -------- + from gooey import Gooey + + client = Gooey( + api_key="YOUR_API_KEY", + ) + client.health_status_get() + """ + _response = self._client_wrapper.httpx_client.request( + "status", + method="GET", + request_options=request_options, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + typing.Optional[typing.Any], + parse_obj_as( + type_=typing.Optional[typing.Any], # type: ignore + object_=_response.json(), + ), + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + +class AsyncGooey: + """ + Use this class to access the different functions within the SDK. You can instantiate any number of clients with different configuration that will propagate to these functions. + + Parameters + ---------- + base_url : typing.Optional[str] + The base url to use for requests from the client. + + environment : GooeyEnvironment + The environment to use for requests from the client. from .environment import GooeyEnvironment + + + + Defaults to GooeyEnvironment.DEFAULT + + + + api_key : typing.Optional[typing.Union[str, typing.Callable[[], str]]] + timeout : typing.Optional[float] + The timeout to be used, in seconds, for requests. By default the timeout is 60 seconds, unless a custom httpx client is used, in which case this default is not enforced. + + follow_redirects : typing.Optional[bool] + Whether the default httpx client follows redirects or not, this is irrelevant if a custom httpx client is passed in. + + httpx_client : typing.Optional[httpx.AsyncClient] + The httpx client to use for making requests, a preconfigured client is used by default, however this is useful should you want to pass in any custom httpx configuration. + + Examples + -------- + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + """ + + def __init__( + self, + *, + base_url: typing.Optional[str] = None, + environment: GooeyEnvironment = GooeyEnvironment.DEFAULT, + api_key: typing.Optional[typing.Union[str, typing.Callable[[], str]]] = os.getenv("GOOEY_API_KEY"), + timeout: typing.Optional[float] = None, + follow_redirects: typing.Optional[bool] = True, + httpx_client: typing.Optional[httpx.AsyncClient] = None, + ): + _defaulted_timeout = timeout if timeout is not None else 60 if httpx_client is None else None + if api_key is None: + raise ApiError(body="The client must be instantiated be either passing in api_key or setting GOOEY_API_KEY") + self._client_wrapper = AsyncClientWrapper( + base_url=_get_base_url(base_url=base_url, environment=environment), + api_key=api_key, + httpx_client=httpx_client + if httpx_client is not None + else httpx.AsyncClient(timeout=_defaulted_timeout, follow_redirects=follow_redirects) + if follow_redirects is not None + else httpx.AsyncClient(timeout=_defaulted_timeout), + timeout=_defaulted_timeout, + ) + self.copilot = AsyncCopilotClient(client_wrapper=self._client_wrapper) + self.misc = AsyncMiscClient(client_wrapper=self._client_wrapper) + + async def animate( + self, + *, + animation_prompts: typing.Sequence[DeforumSdPageRequestAnimationPromptsItem], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[DeforumSdPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + max_frames: typing.Optional[int] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + animation_mode: typing.Optional[str] = OMIT, + zoom: typing.Optional[str] = OMIT, + translation_x: typing.Optional[str] = OMIT, + translation_y: typing.Optional[str] = OMIT, + rotation3d_x: typing.Optional[str] = OMIT, + rotation3d_y: typing.Optional[str] = OMIT, + rotation3d_z: typing.Optional[str] = OMIT, + fps: typing.Optional[int] = OMIT, + seed: typing.Optional[int] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> DeforumSdPageOutput: + """ + Parameters + ---------- + animation_prompts : typing.Sequence[DeforumSdPageRequestAnimationPromptsItem] + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[DeforumSdPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + max_frames : typing.Optional[int] + + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] + + animation_mode : typing.Optional[str] + + zoom : typing.Optional[str] + + translation_x : typing.Optional[str] + + translation_y : typing.Optional[str] + + rotation3d_x : typing.Optional[str] + + rotation3d_y : typing.Optional[str] + + rotation3d_z : typing.Optional[str] + + fps : typing.Optional[int] + + seed : typing.Optional[int] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + DeforumSdPageOutput + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey, DeforumSdPageRequestAnimationPromptsItem + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.animate( + animation_prompts=[ + DeforumSdPageRequestAnimationPromptsItem( + frame="frame", + prompt="prompt", + ) + ], + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/DeforumSD/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "animation_prompts": animation_prompts, + "max_frames": max_frames, + "selected_model": selected_model, + "animation_mode": animation_mode, + "zoom": zoom, + "translation_x": translation_x, + "translation_y": translation_y, + "rotation_3d_x": rotation3d_x, + "rotation_3d_y": rotation3d_y, + "rotation_3d_z": rotation3d_z, + "fps": fps, + "seed": seed, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + DeforumSdPageOutput, + parse_obj_as( + type_=DeforumSdPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def qr_code( + self, + *, + text_prompt: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[QrCodeRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + qr_code_data: typing.Optional[str] = None, + qr_code_input_image: typing.Optional[core.File] = None, + qr_code_vcard: typing.Optional[QrCodeRequestQrCodeVcard] = None, + qr_code_file: typing.Optional[core.File] = None, + use_url_shortener: typing.Optional[bool] = None, + negative_prompt: typing.Optional[str] = None, + image_prompt: typing.Optional[str] = None, + image_prompt_controlnet_models: typing.Optional[ + typing.List[QrCodeRequestImagePromptControlnetModelsItem] + ] = None, + image_prompt_strength: typing.Optional[float] = None, + image_prompt_scale: typing.Optional[float] = None, + image_prompt_pos_x: typing.Optional[float] = None, + image_prompt_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + scheduler: typing.Optional[QrCodeRequestScheduler] = None, + seed: typing.Optional[int] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> QrCodeGeneratorPageOutput: + """ + Parameters + ---------- + text_prompt : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.List[QrCodeRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + qr_code_data : typing.Optional[str] + + qr_code_input_image : typing.Optional[core.File] + See core.File for more documentation + + qr_code_vcard : typing.Optional[QrCodeRequestQrCodeVcard] + + qr_code_file : typing.Optional[core.File] + See core.File for more documentation + + use_url_shortener : typing.Optional[bool] + + negative_prompt : typing.Optional[str] + + image_prompt : typing.Optional[str] + + image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] + + image_prompt_strength : typing.Optional[float] + + image_prompt_scale : typing.Optional[float] + + image_prompt_pos_x : typing.Optional[float] + + image_prompt_pos_y : typing.Optional[float] + + selected_model : typing.Optional[QrCodeRequestSelectedModel] + + selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] + + output_width : typing.Optional[int] + + output_height : typing.Optional[int] + + guidance_scale : typing.Optional[float] + + controlnet_conditioning_scale : typing.Optional[typing.List[float]] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[int] + + scheduler : typing.Optional[QrCodeRequestScheduler] + + seed : typing.Optional[int] + + obj_scale : typing.Optional[float] + + obj_pos_x : typing.Optional[float] + + obj_pos_y : typing.Optional[float] + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + QrCodeGeneratorPageOutput + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.qr_code( + text_prompt="text_prompt", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/art-qr-code/async", + method="POST", + params={ + "example_id": example_id, + }, + data={ + "functions": functions, + "variables": variables, + "qr_code_data": qr_code_data, + "qr_code_vcard": qr_code_vcard, + "use_url_shortener": use_url_shortener, + "text_prompt": text_prompt, + "negative_prompt": negative_prompt, + "image_prompt": image_prompt, + "image_prompt_controlnet_models": image_prompt_controlnet_models, + "image_prompt_strength": image_prompt_strength, + "image_prompt_scale": image_prompt_scale, + "image_prompt_pos_x": image_prompt_pos_x, + "image_prompt_pos_y": image_prompt_pos_y, + "selected_model": selected_model, + "selected_controlnet_model": selected_controlnet_model, + "output_width": output_width, + "output_height": output_height, + "guidance_scale": guidance_scale, + "controlnet_conditioning_scale": controlnet_conditioning_scale, + "num_outputs": num_outputs, + "quality": quality, + "scheduler": scheduler, + "seed": seed, + "obj_scale": obj_scale, + "obj_pos_x": obj_pos_x, + "obj_pos_y": obj_pos_y, + "settings": settings, + }, + files={ + "qr_code_input_image": qr_code_input_image, + "qr_code_file": qr_code_file, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + QrCodeGeneratorPageOutput, + parse_obj_as( + type_=QrCodeGeneratorPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def seo_people_also_ask( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[RelatedQnAPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> RelatedQnAPageOutput: + """ + Parameters + ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[RelatedQnAPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + RelatedQnAPageOutput + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.seo_people_also_ask( + search_query="search_query", + site_filter="site_filter", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/related-qna-maker/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "functions": functions, + "variables": variables, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + RelatedQnAPageOutput, + parse_obj_as( + type_=RelatedQnAPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def seo_content( + self, + *, + search_query: str, + keywords: str, + title: str, + company_url: str, + example_id: typing.Optional[str] = None, + task_instructions: typing.Optional[str] = OMIT, + enable_html: typing.Optional[bool] = OMIT, + selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + enable_crosslinks: typing.Optional[bool] = OMIT, + seed: typing.Optional[int] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> SeoSummaryPageOutput: + """ + Parameters + ---------- + search_query : str + + keywords : str + + title : str + + company_url : str + + example_id : typing.Optional[str] + + task_instructions : typing.Optional[str] + + enable_html : typing.Optional[bool] + + selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + enable_crosslinks : typing.Optional[bool] + + seed : typing.Optional[int] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] + + serp_search_location : typing.Optional[SerpSearchLocation] + + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead + + serp_search_type : typing.Optional[SerpSearchType] + + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead + + settings : typing.Optional[RunSettings] + + request_options : typing.Optional[RequestOptions] + Request-specific configuration. + + Returns + ------- + SeoSummaryPageOutput + Successful Response + + Examples + -------- + import asyncio + + from gooey import AsyncGooey + + client = AsyncGooey( + api_key="YOUR_API_KEY", + ) + + + async def main() -> None: + await client.seo_content( + search_query="search_query", + keywords="keywords", + title="title", + company_url="company_url", + ) + + + asyncio.run(main()) + """ + _response = await self._client_wrapper.httpx_client.request( + "v3/SEOSummary/async", + method="POST", + params={ + "example_id": example_id, + }, + json={ + "search_query": search_query, + "keywords": keywords, + "title": title, + "company_url": company_url, + "task_instructions": task_instructions, + "enable_html": enable_html, + "selected_model": selected_model, + "max_search_urls": max_search_urls, + "enable_crosslinks": enable_crosslinks, + "seed": seed, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, + "settings": settings, + }, + request_options=request_options, + omit=OMIT, + ) + try: + if 200 <= _response.status_code < 300: + return typing.cast( + SeoSummaryPageOutput, + parse_obj_as( + type_=SeoSummaryPageOutput, # type: ignore + object_=_response.json(), + ), + ) + if _response.status_code == 402: + raise PaymentRequiredError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 422: + raise UnprocessableEntityError( + typing.cast( + HttpValidationError, + parse_obj_as( + type_=HttpValidationError, # type: ignore + object_=_response.json(), + ), + ) + ) + if _response.status_code == 429: + raise TooManyRequestsError( + typing.cast( + GenericErrorResponse, + parse_obj_as( + type_=GenericErrorResponse, # type: ignore + object_=_response.json(), + ), + ) + ) + _response_json = _response.json() + except JSONDecodeError: + raise ApiError(status_code=_response.status_code, body=_response.text) + raise ApiError(status_code=_response.status_code, body=_response_json) + + async def web_search_llm( + self, + *, + search_query: str, + site_filter: str, + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.Sequence[GoogleGptPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, + max_search_urls: typing.Optional[int] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, + serp_search_type: typing.Optional[SerpSearchType] = OMIT, + scaleserp_search_field: typing.Optional[str] = OMIT, + settings: typing.Optional[RunSettings] = OMIT, + request_options: typing.Optional[RequestOptions] = None, + ) -> GoogleGptPageOutput: + """ + Parameters + ---------- + search_query : str + + site_filter : str + + example_id : typing.Optional[str] + + functions : typing.Optional[typing.Sequence[GoogleGptPageRequestFunctionsItem]] + + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments + + task_instructions : typing.Optional[str] + + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] + + max_search_urls : typing.Optional[int] + + max_references : typing.Optional[int] + + max_context_words : typing.Optional[int] + + scroll_jump : typing.Optional[int] + + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + + dense_weight : typing.Optional[float] + + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] + avoid_repetition : typing.Optional[bool] - animation_mode : typing.Optional[str] + num_outputs : typing.Optional[int] - zoom : typing.Optional[str] + quality : typing.Optional[float] - translation_x : typing.Optional[str] + max_tokens : typing.Optional[int] - translation_y : typing.Optional[str] + sampling_temperature : typing.Optional[float] - rotation3d_x : typing.Optional[str] + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] - rotation3d_y : typing.Optional[str] + serp_search_location : typing.Optional[SerpSearchLocation] - rotation3d_z : typing.Optional[str] + scaleserp_locations : typing.Optional[typing.Sequence[str]] + DEPRECATED: use `serp_search_location` instead - fps : typing.Optional[int] + serp_search_type : typing.Optional[SerpSearchType] - seed : typing.Optional[int] + scaleserp_search_field : typing.Optional[str] + DEPRECATED: use `serp_search_type` instead settings : typing.Optional[RunSettings] @@ -4480,14 +5783,14 @@ async def animate( Returns ------- - DeforumSdPageOutput + GoogleGptPageOutput Successful Response Examples -------- import asyncio - from gooey import AsyncGooey, DeforumSdPageRequestAnimationPromptsItem + from gooey import AsyncGooey client = AsyncGooey( api_key="YOUR_API_KEY", @@ -4495,20 +5798,16 @@ async def animate( async def main() -> None: - await client.animate( - animation_prompts=[ - DeforumSdPageRequestAnimationPromptsItem( - frame="frame", - prompt="prompt", - ) - ], + await client.web_search_llm( + search_query="search_query", + site_filter="site_filter", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/DeforumSD/async", + "v3/google-gpt/async", method="POST", params={ "example_id": example_id, @@ -4516,18 +5815,27 @@ async def main() -> None: json={ "functions": functions, "variables": variables, - "animation_prompts": animation_prompts, - "max_frames": max_frames, + "search_query": search_query, + "site_filter": site_filter, + "task_instructions": task_instructions, + "query_instructions": query_instructions, "selected_model": selected_model, - "animation_mode": animation_mode, - "zoom": zoom, - "translation_x": translation_x, - "translation_y": translation_y, - "rotation_3d_x": rotation3d_x, - "rotation_3d_y": rotation3d_y, - "rotation_3d_z": rotation3d_z, - "fps": fps, - "seed": seed, + "max_search_urls": max_search_urls, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "serp_search_location": serp_search_location, + "scaleserp_locations": scaleserp_locations, + "serp_search_type": serp_search_type, + "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, request_options=request_options, @@ -4536,18 +5844,18 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - DeforumSdPageOutput, + GoogleGptPageOutput, parse_obj_as( - type_=DeforumSdPageOutput, # type: ignore + type_=GoogleGptPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -4577,106 +5885,51 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def qr_code( + async def personalize_email( self, *, - text_prompt: str, + email_address: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[QrCodeRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[SocialLookupEmailPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - qr_code_data: typing.Optional[str] = OMIT, - qr_code_input_image: typing.Optional[core.File] = OMIT, - qr_code_vcard: typing.Optional[QrCodeRequestQrCodeVcard] = OMIT, - qr_code_file: typing.Optional[core.File] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - image_prompt: typing.Optional[str] = OMIT, - image_prompt_controlnet_models: typing.Optional[ - typing.List[QrCodeRequestImagePromptControlnetModelsItem] - ] = OMIT, - image_prompt_strength: typing.Optional[float] = OMIT, - image_prompt_scale: typing.Optional[float] = OMIT, - image_prompt_pos_x: typing.Optional[float] = OMIT, - image_prompt_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[QrCodeRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = OMIT, + input_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - scheduler: typing.Optional[QrCodeRequestScheduler] = OMIT, - seed: typing.Optional[int] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> QrCodeGeneratorPageOutput: + ) -> SocialLookupEmailPageOutput: """ Parameters ---------- - text_prompt : str + email_address : str example_id : typing.Optional[str] - functions : typing.Optional[typing.List[QrCodeRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[SocialLookupEmailPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - qr_code_data : typing.Optional[str] - - qr_code_input_image : typing.Optional[core.File] - See core.File for more documentation - - qr_code_vcard : typing.Optional[QrCodeRequestQrCodeVcard] - - qr_code_file : typing.Optional[core.File] - See core.File for more documentation - - use_url_shortener : typing.Optional[bool] - - negative_prompt : typing.Optional[str] - - image_prompt : typing.Optional[str] - - image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] - - image_prompt_strength : typing.Optional[float] - - image_prompt_scale : typing.Optional[float] - - image_prompt_pos_x : typing.Optional[float] - - image_prompt_pos_y : typing.Optional[float] - - selected_model : typing.Optional[QrCodeRequestSelectedModel] - - selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] - - output_width : typing.Optional[int] - - output_height : typing.Optional[int] + input_prompt : typing.Optional[str] - guidance_scale : typing.Optional[float] + selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] - controlnet_conditioning_scale : typing.Optional[typing.List[float]] + avoid_repetition : typing.Optional[bool] num_outputs : typing.Optional[int] - quality : typing.Optional[int] - - scheduler : typing.Optional[QrCodeRequestScheduler] - - seed : typing.Optional[int] + quality : typing.Optional[float] - obj_scale : typing.Optional[float] + max_tokens : typing.Optional[int] - obj_pos_x : typing.Optional[float] + sampling_temperature : typing.Optional[float] - obj_pos_y : typing.Optional[float] + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -4685,7 +5938,7 @@ async def qr_code( Returns ------- - QrCodeGeneratorPageOutput + SocialLookupEmailPageOutput Successful Response Examples @@ -4700,70 +5953,51 @@ async def qr_code( async def main() -> None: - await client.qr_code( - text_prompt="text_prompt", + await client.personalize_email( + email_address="email_address", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/art-qr-code/async", + "v3/SocialLookupEmail/async", method="POST", params={ "example_id": example_id, }, - data={ + json={ "functions": functions, "variables": variables, - "qr_code_data": qr_code_data, - "qr_code_vcard": qr_code_vcard, - "use_url_shortener": use_url_shortener, - "text_prompt": text_prompt, - "negative_prompt": negative_prompt, - "image_prompt": image_prompt, - "image_prompt_controlnet_models": image_prompt_controlnet_models, - "image_prompt_strength": image_prompt_strength, - "image_prompt_scale": image_prompt_scale, - "image_prompt_pos_x": image_prompt_pos_x, - "image_prompt_pos_y": image_prompt_pos_y, + "email_address": email_address, + "input_prompt": input_prompt, "selected_model": selected_model, - "selected_controlnet_model": selected_controlnet_model, - "output_width": output_width, - "output_height": output_height, - "guidance_scale": guidance_scale, - "controlnet_conditioning_scale": controlnet_conditioning_scale, + "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, - "scheduler": scheduler, - "seed": seed, - "obj_scale": obj_scale, - "obj_pos_x": obj_pos_x, - "obj_pos_y": obj_pos_y, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, "settings": settings, }, - files={ - "qr_code_input_image": qr_code_input_image, - "qr_code_file": qr_code_file, - }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - QrCodeGeneratorPageOutput, + SocialLookupEmailPageOutput, parse_obj_as( - type_=QrCodeGeneratorPageOutput, # type: ignore + type_=SocialLookupEmailPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -4793,93 +6027,53 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def seo_people_also_ask( + async def bulk_run( self, *, - search_query: str, - site_filter: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[RelatedQnAPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RelatedQnAPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> RelatedQnAPageOutput: - """ - Parameters - ---------- - search_query : str - - site_filter : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[RelatedQnAPageRequestFunctionsItem]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[RelatedQnAPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] + documents: typing.List[core.File], + run_urls: typing.List[str], + input_columns: typing.Dict[str, str], + output_columns: typing.Dict[str, str], + example_id: typing.Optional[str] = None, + functions: typing.Optional[typing.List[BulkRunRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + eval_urls: typing.Optional[typing.List[str]] = None, + settings: typing.Optional[RunSettings] = None, + request_options: typing.Optional[RequestOptions] = None, + ) -> BulkRunnerPageOutput: + """ + Parameters + ---------- + documents : typing.List[core.File] + See core.File for more documentation - scroll_jump : typing.Optional[int] + run_urls : typing.List[str] - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] + Provide one or more Gooey.AI workflow runs. + You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. - dense_weight : typing.Optional[float] - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + input_columns : typing.Dict[str, str] + For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. - avoid_repetition : typing.Optional[bool] - num_outputs : typing.Optional[int] + output_columns : typing.Dict[str, str] - quality : typing.Optional[float] + For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. - max_tokens : typing.Optional[int] - sampling_temperature : typing.Optional[float] + example_id : typing.Optional[str] - response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] + functions : typing.Optional[typing.List[BulkRunRequestFunctionsItem]] - serp_search_location : typing.Optional[SerpSearchLocation] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead + eval_urls : typing.Optional[typing.List[str]] - serp_search_type : typing.Optional[SerpSearchType] + _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead settings : typing.Optional[RunSettings] @@ -4888,7 +6082,7 @@ async def seo_people_also_ask( Returns ------- - RelatedQnAPageOutput + BulkRunnerPageOutput Successful Response Examples @@ -4903,64 +6097,51 @@ async def seo_people_also_ask( async def main() -> None: - await client.seo_people_also_ask( - search_query="search_query", - site_filter="site_filter", + await client.bulk_run( + run_urls=["run_urls"], + input_columns={"key": "value"}, + output_columns={"key": "value"}, ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/related-qna-maker/async", + "v3/bulk-runner/async", method="POST", params={ "example_id": example_id, }, - json={ + data={ "functions": functions, "variables": variables, - "search_query": search_query, - "site_filter": site_filter, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "run_urls": run_urls, + "input_columns": input_columns, + "output_columns": output_columns, + "eval_urls": eval_urls, "settings": settings, }, + files={ + "documents": documents, + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - RelatedQnAPageOutput, + BulkRunnerPageOutput, parse_obj_as( - type_=RelatedQnAPageOutput, # type: ignore + type_=BulkRunnerPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -4990,57 +6171,54 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def seo_content( + async def eval( self, *, - search_query: str, - keywords: str, - title: str, - company_url: str, + documents: typing.Sequence[str], example_id: typing.Optional[str] = None, - task_instructions: typing.Optional[str] = OMIT, - enable_html: typing.Optional[bool] = OMIT, - selected_model: typing.Optional[SeoSummaryPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - enable_crosslinks: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, + functions: typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] = OMIT, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, + eval_prompts: typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] = OMIT, + agg_functions: typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] = OMIT, + selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> SeoSummaryPageOutput: + ) -> BulkEvalPageOutput: """ Parameters ---------- - search_query : str + documents : typing.Sequence[str] - keywords : str - - title : str + Upload or link to a CSV or google sheet that contains your sample input data. + For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. + Remember to includes header names in your CSV too. - company_url : str example_id : typing.Optional[str] - task_instructions : typing.Optional[str] + functions : typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] - enable_html : typing.Optional[bool] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[SeoSummaryPageRequestSelectedModel] + eval_prompts : typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] - max_search_urls : typing.Optional[int] + Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. + _The `columns` dictionary can be used to reference the spreadsheet columns._ - enable_crosslinks : typing.Optional[bool] - seed : typing.Optional[int] + agg_functions : typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] + + Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). + + + selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -5052,17 +6230,7 @@ async def seo_content( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -5071,7 +6239,7 @@ async def seo_content( Returns ------- - SeoSummaryPageOutput + BulkEvalPageOutput Successful Response Examples @@ -5086,43 +6254,32 @@ async def seo_content( async def main() -> None: - await client.seo_content( - search_query="search_query", - keywords="keywords", - title="title", - company_url="company_url", + await client.eval( + documents=["documents"], ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SEOSummary/async", + "v3/bulk-eval/async", method="POST", params={ "example_id": example_id, }, json={ - "search_query": search_query, - "keywords": keywords, - "title": title, - "company_url": company_url, - "task_instructions": task_instructions, - "enable_html": enable_html, + "functions": functions, + "variables": variables, + "documents": documents, + "eval_prompts": eval_prompts, + "agg_functions": agg_functions, "selected_model": selected_model, - "max_search_urls": max_search_urls, - "enable_crosslinks": enable_crosslinks, - "seed": seed, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, "max_tokens": max_tokens, "sampling_temperature": sampling_temperature, "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, "settings": settings, }, request_options=request_options, @@ -5131,18 +6288,18 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - SeoSummaryPageOutput, + BulkEvalPageOutput, parse_obj_as( - type_=SeoSummaryPageOutput, # type: ignore + type_=BulkEvalPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -5172,71 +6329,54 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def web_search_llm( + async def synthesize_data( self, *, - search_query: str, - site_filter: str, + documents: typing.List[core.File], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[GoogleGptPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[GoogleGptPageRequestSelectedModel] = OMIT, - max_search_urls: typing.Optional[int] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, - scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - serp_search_type: typing.Optional[SerpSearchType] = OMIT, - scaleserp_search_field: typing.Optional[str] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + sheet_url: typing.Optional[core.File] = None, + selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + task_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> GoogleGptPageOutput: + ) -> DocExtractPageOutput: """ Parameters ---------- - search_query : str - - site_filter : str + documents : typing.List[core.File] + See core.File for more documentation example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[GoogleGptPageRequestFunctionsItem]] + functions : typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[GoogleGptPageRequestSelectedModel] - - max_search_urls : typing.Optional[int] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] + sheet_url : typing.Optional[core.File] + See core.File for more documentation - scroll_jump : typing.Optional[int] + selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + google_translate_target : typing.Optional[str] - dense_weight : typing.Optional[float] + glossary_document : typing.Optional[core.File] + See core.File for more documentation - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + task_instructions : typing.Optional[str] + selected_model : typing.Optional[SynthesizeDataRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -5248,17 +6388,7 @@ async def web_search_llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] - - serp_search_location : typing.Optional[SerpSearchLocation] - - scaleserp_locations : typing.Optional[typing.Sequence[str]] - DEPRECATED: use `serp_search_location` instead - - serp_search_type : typing.Optional[SerpSearchType] - - scaleserp_search_field : typing.Optional[str] - DEPRECATED: use `serp_search_type` instead + response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -5267,7 +6397,7 @@ async def web_search_llm( Returns ------- - GoogleGptPageOutput + DocExtractPageOutput Successful Response Examples @@ -5282,64 +6412,55 @@ async def web_search_llm( async def main() -> None: - await client.web_search_llm( - search_query="search_query", - site_filter="site_filter", - ) + await client.synthesize_data() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/google-gpt/async", + "v3/doc-extract/async", method="POST", params={ "example_id": example_id, }, - json={ + data={ "functions": functions, "variables": variables, - "search_query": search_query, - "site_filter": site_filter, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, "task_instructions": task_instructions, - "query_instructions": query_instructions, "selected_model": selected_model, - "max_search_urls": max_search_urls, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, "max_tokens": max_tokens, "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "serp_search_location": serp_search_location, - "scaleserp_locations": scaleserp_locations, - "serp_search_type": serp_search_type, - "scaleserp_search_field": scaleserp_search_field, + "response_format_type": response_format_type, "settings": settings, }, + files={ + "documents": documents, + "sheet_url": sheet_url, + "glossary_document": glossary_document, + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - GoogleGptPageOutput, + DocExtractPageOutput, parse_obj_as( - type_=GoogleGptPageOutput, # type: ignore + type_=DocExtractPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -5369,39 +6490,36 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def personalize_email( + async def llm( self, *, - email_address: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[SocialLookupEmailPageRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[CompareLlmPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, input_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SocialLookupEmailPageRequestSelectedModel] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> SocialLookupEmailPageOutput: + ) -> CompareLlmPageOutput: """ Parameters ---------- - email_address : str - example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[SocialLookupEmailPageRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[CompareLlmPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments input_prompt : typing.Optional[str] - selected_model : typing.Optional[SocialLookupEmailPageRequestSelectedModel] + selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] avoid_repetition : typing.Optional[bool] @@ -5413,7 +6531,7 @@ async def personalize_email( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -5422,7 +6540,7 @@ async def personalize_email( Returns ------- - SocialLookupEmailPageOutput + CompareLlmPageOutput Successful Response Examples @@ -5437,15 +6555,13 @@ async def personalize_email( async def main() -> None: - await client.personalize_email( - email_address="email_address", - ) + await client.llm() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/SocialLookupEmail/async", + "v3/CompareLLM/async", method="POST", params={ "example_id": example_id, @@ -5453,9 +6569,8 @@ async def main() -> None: json={ "functions": functions, "variables": variables, - "email_address": email_address, "input_prompt": input_prompt, - "selected_model": selected_model, + "selected_models": selected_models, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, @@ -5470,18 +6585,18 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - SocialLookupEmailPageOutput, + CompareLlmPageOutput, parse_obj_as( - type_=SocialLookupEmailPageOutput, # type: ignore + type_=CompareLlmPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -5511,53 +6626,85 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def bulk_run( + async def rag( self, *, - documents: typing.List[core.File], - run_urls: typing.List[str], - input_columns: typing.Dict[str, str], - output_columns: typing.Dict[str, str], + search_query: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[BulkRunRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[DocSearchPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - eval_urls: typing.Optional[typing.List[str]] = OMIT, + keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + documents: typing.Optional[typing.Sequence[str]] = OMIT, + max_references: typing.Optional[int] = OMIT, + max_context_words: typing.Optional[int] = OMIT, + scroll_jump: typing.Optional[int] = OMIT, + doc_extract_url: typing.Optional[str] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + dense_weight: typing.Optional[float] = OMIT, + task_instructions: typing.Optional[str] = OMIT, + query_instructions: typing.Optional[str] = OMIT, + selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + avoid_repetition: typing.Optional[bool] = OMIT, + num_outputs: typing.Optional[int] = OMIT, + quality: typing.Optional[float] = OMIT, + max_tokens: typing.Optional[int] = OMIT, + sampling_temperature: typing.Optional[float] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> BulkRunnerPageOutput: + ) -> DocSearchPageOutput: """ Parameters ---------- - documents : typing.List[core.File] - See core.File for more documentation + search_query : str - run_urls : typing.List[str] + example_id : typing.Optional[str] - Provide one or more Gooey.AI workflow runs. - You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them. + functions : typing.Optional[typing.Sequence[DocSearchPageRequestFunctionsItem]] + variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] + Variables to be used as Jinja prompt templates and in functions as arguments - input_columns : typing.Dict[str, str] + keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it. + documents : typing.Optional[typing.Sequence[str]] + max_references : typing.Optional[int] - output_columns : typing.Dict[str, str] + max_context_words : typing.Optional[int] - For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data. + scroll_jump : typing.Optional[int] + + doc_extract_url : typing.Optional[str] + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] - example_id : typing.Optional[str] + dense_weight : typing.Optional[float] - functions : typing.Optional[typing.List[BulkRunRequestFunctionsItem]] + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - eval_urls : typing.Optional[typing.List[str]] + task_instructions : typing.Optional[str] - _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs. + query_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSearchPageRequestSelectedModel] + + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] + + avoid_repetition : typing.Optional[bool] + + num_outputs : typing.Optional[int] + + quality : typing.Optional[float] + + max_tokens : typing.Optional[int] + + sampling_temperature : typing.Optional[float] + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -5566,7 +6713,7 @@ async def bulk_run( Returns ------- - BulkRunnerPageOutput + DocSearchPageOutput Successful Response Examples @@ -5581,32 +6728,42 @@ async def bulk_run( async def main() -> None: - await client.bulk_run( - run_urls=["run_urls"], - input_columns={"key": "value"}, - output_columns={"key": "value"}, + await client.rag( + search_query="search_query", ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-runner/async", + "v3/doc-search/async", method="POST", params={ "example_id": example_id, }, - data={ + json={ "functions": functions, "variables": variables, - "run_urls": run_urls, - "input_columns": input_columns, - "output_columns": output_columns, - "eval_urls": eval_urls, - "settings": settings, - }, - files={ + "search_query": search_query, + "keyword_query": keyword_query, "documents": documents, + "max_references": max_references, + "max_context_words": max_context_words, + "scroll_jump": scroll_jump, + "doc_extract_url": doc_extract_url, + "embedding_model": embedding_model, + "dense_weight": dense_weight, + "task_instructions": task_instructions, + "query_instructions": query_instructions, + "selected_model": selected_model, + "citation_style": citation_style, + "avoid_repetition": avoid_repetition, + "num_outputs": num_outputs, + "quality": quality, + "max_tokens": max_tokens, + "sampling_temperature": sampling_temperature, + "response_format_type": response_format_type, + "settings": settings, }, request_options=request_options, omit=OMIT, @@ -5614,18 +6771,18 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - BulkRunnerPageOutput, + DocSearchPageOutput, parse_obj_as( - type_=BulkRunnerPageOutput, # type: ignore + type_=DocSearchPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -5655,54 +6812,45 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def synthesize_data( + async def smart_gpt( self, *, - documents: typing.List[core.File], + input_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] = OMIT, + functions: typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - sheet_url: typing.Optional[core.File] = OMIT, - selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[core.File] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SynthesizeDataRequestSelectedModel] = OMIT, + cot_prompt: typing.Optional[str] = OMIT, + reflexion_prompt: typing.Optional[str] = OMIT, + dera_prompt: typing.Optional[str] = OMIT, + selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> DocExtractPageOutput: + ) -> SmartGptPageOutput: """ Parameters ---------- - documents : typing.List[core.File] - See core.File for more documentation + input_prompt : str example_id : typing.Optional[str] - functions : typing.Optional[typing.List[SynthesizeDataRequestFunctionsItem]] + functions : typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - sheet_url : typing.Optional[core.File] - See core.File for more documentation - - selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] - - google_translate_target : typing.Optional[str] + cot_prompt : typing.Optional[str] - glossary_document : typing.Optional[core.File] - See core.File for more documentation + reflexion_prompt : typing.Optional[str] - task_instructions : typing.Optional[str] + dera_prompt : typing.Optional[str] - selected_model : typing.Optional[SynthesizeDataRequestSelectedModel] + selected_model : typing.Optional[SmartGptPageRequestSelectedModel] avoid_repetition : typing.Optional[bool] @@ -5714,7 +6862,7 @@ async def synthesize_data( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -5723,7 +6871,7 @@ async def synthesize_data( Returns ------- - DocExtractPageOutput + SmartGptPageOutput Successful Response Examples @@ -5738,23 +6886,26 @@ async def synthesize_data( async def main() -> None: - await client.synthesize_data() + await client.smart_gpt( + input_prompt="input_prompt", + ) asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-extract/async", + "v3/SmartGPT/async", method="POST", params={ "example_id": example_id, }, - data={ + json={ "functions": functions, "variables": variables, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "task_instructions": task_instructions, + "input_prompt": input_prompt, + "cot_prompt": cot_prompt, + "reflexion_prompt": reflexion_prompt, + "dera_prompt": dera_prompt, "selected_model": selected_model, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, @@ -5764,29 +6915,24 @@ async def main() -> None: "response_format_type": response_format_type, "settings": settings, }, - files={ - "documents": documents, - "sheet_url": sheet_url, - "glossary_document": glossary_document, - }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - DocExtractPageOutput, + SmartGptPageOutput, parse_obj_as( - type_=DocExtractPageOutput, # type: ignore + type_=SmartGptPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -5816,36 +6962,52 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def llm( + async def doc_summary( self, *, + documents: typing.List[core.File], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[CompareLlmPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + task_instructions: typing.Optional[str] = None, + merge_instructions: typing.Optional[str] = None, + selected_model: typing.Optional[DocSummaryRequestSelectedModel] = None, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, + selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, + google_translate_target: typing.Optional[str] = None, + avoid_repetition: typing.Optional[bool] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[float] = None, + max_tokens: typing.Optional[int] = None, + sampling_temperature: typing.Optional[float] = None, + response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> CompareLlmPageOutput: + ) -> DocSummaryPageOutput: """ Parameters ---------- + documents : typing.List[core.File] + See core.File for more documentation + example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[CompareLlmPageRequestFunctionsItem]] + functions : typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - input_prompt : typing.Optional[str] + task_instructions : typing.Optional[str] - selected_models : typing.Optional[typing.Sequence[CompareLlmPageRequestSelectedModelsItem]] + merge_instructions : typing.Optional[str] + + selected_model : typing.Optional[DocSummaryRequestSelectedModel] + + chain_type : typing.Optional[typing.Literal["map_reduce"]] + + selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] + + google_translate_target : typing.Optional[str] avoid_repetition : typing.Optional[bool] @@ -5857,7 +7019,7 @@ async def llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] + response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -5866,7 +7028,7 @@ async def llm( Returns ------- - CompareLlmPageOutput + DocSummaryPageOutput Successful Response Examples @@ -5881,22 +7043,26 @@ async def llm( async def main() -> None: - await client.llm() + await client.doc_summary() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/CompareLLM/async", + "v3/doc-summary/async", method="POST", params={ "example_id": example_id, }, - json={ + data={ "functions": functions, "variables": variables, - "input_prompt": input_prompt, - "selected_models": selected_models, + "task_instructions": task_instructions, + "merge_instructions": merge_instructions, + "selected_model": selected_model, + "chain_type": chain_type, + "selected_asr_model": selected_asr_model, + "google_translate_target": google_translate_target, "avoid_repetition": avoid_repetition, "num_outputs": num_outputs, "quality": quality, @@ -5905,24 +7071,27 @@ async def main() -> None: "response_format_type": response_format_type, "settings": settings, }, + files={ + "documents": documents, + }, request_options=request_options, omit=OMIT, ) try: if 200 <= _response.status_code < 300: return typing.cast( - CompareLlmPageOutput, + DocSummaryPageOutput, parse_obj_as( - type_=CompareLlmPageOutput, # type: ignore + type_=DocSummaryPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -5952,85 +7121,25 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def rag( + async def functions( self, *, - search_query: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[DocSearchPageRequestFunctionsItem]] = OMIT, + code: typing.Optional[str] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSearchPageRequestSelectedModel] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, - ) -> DocSearchPageOutput: + ) -> FunctionsPageOutput: """ Parameters ---------- - search_query : str - example_id : typing.Optional[str] - functions : typing.Optional[typing.Sequence[DocSearchPageRequestFunctionsItem]] + code : typing.Optional[str] + The JS code to be executed. variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - doc_extract_url : typing.Optional[str] - - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSearchPageRequestSelectedModel] - - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] + Variables to be used in the code settings : typing.Optional[RunSettings] @@ -6039,7 +7148,7 @@ async def rag( Returns ------- - DocSearchPageOutput + FunctionsPageOutput Successful Response Examples @@ -6054,41 +7163,20 @@ async def rag( async def main() -> None: - await client.rag( - search_query="search_query", - ) + await client.functions() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-search/async", + "v3/functions/async", method="POST", params={ "example_id": example_id, }, json={ - "functions": functions, + "code": code, "variables": variables, - "search_query": search_query, - "keyword_query": keyword_query, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "doc_extract_url": doc_extract_url, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "selected_model": selected_model, - "citation_style": citation_style, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, "settings": settings, }, request_options=request_options, @@ -6097,18 +7185,18 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - DocSearchPageOutput, + FunctionsPageOutput, parse_obj_as( - type_=DocSearchPageOutput, # type: ignore + type_=FunctionsPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -6138,64 +7226,50 @@ async def main() -> None: raise ApiError(status_code=_response.status_code, body=_response.text) raise ApiError(status_code=_response.status_code, body=_response_json) - async def doc_summary( + async def lipsync( self, *, - documents: typing.List[core.File], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - merge_instructions: typing.Optional[str] = OMIT, - selected_model: typing.Optional[DocSummaryRequestSelectedModel] = OMIT, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT, - selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[LipsyncRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[LipsyncRequestSadtalkerSettings] = None, + selected_model: typing.Optional[LipsyncRequestSelectedModel] = None, + input_audio: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, - ) -> DocSummaryPageOutput: + ) -> LipsyncPageOutput: """ Parameters ---------- - documents : typing.List[core.File] - See core.File for more documentation - example_id : typing.Optional[str] - functions : typing.Optional[typing.List[DocSummaryRequestFunctionsItem]] + functions : typing.Optional[typing.List[LipsyncRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - task_instructions : typing.Optional[str] - - merge_instructions : typing.Optional[str] - - selected_model : typing.Optional[DocSummaryRequestSelectedModel] - - chain_type : typing.Optional[typing.Literal["map_reduce"]] - - selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] + input_face : typing.Optional[core.File] + See core.File for more documentation - google_translate_target : typing.Optional[str] + face_padding_top : typing.Optional[int] - avoid_repetition : typing.Optional[bool] + face_padding_bottom : typing.Optional[int] - num_outputs : typing.Optional[int] + face_padding_left : typing.Optional[int] - quality : typing.Optional[float] + face_padding_right : typing.Optional[int] - max_tokens : typing.Optional[int] + sadtalker_settings : typing.Optional[LipsyncRequestSadtalkerSettings] - sampling_temperature : typing.Optional[float] + selected_model : typing.Optional[LipsyncRequestSelectedModel] - response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] + input_audio : typing.Optional[core.File] + See core.File for more documentation settings : typing.Optional[RunSettings] @@ -6204,7 +7278,7 @@ async def doc_summary( Returns ------- - DocSummaryPageOutput + LipsyncPageOutput Successful Response Examples @@ -6219,13 +7293,13 @@ async def doc_summary( async def main() -> None: - await client.doc_summary() + await client.lipsync() asyncio.run(main()) """ _response = await self._client_wrapper.httpx_client.request( - "v3/doc-summary/async", + "v3/Lipsync/async", method="POST", params={ "example_id": example_id, @@ -6233,22 +7307,17 @@ async def main() -> None: data={ "functions": functions, "variables": variables, - "task_instructions": task_instructions, - "merge_instructions": merge_instructions, + "face_padding_top": face_padding_top, + "face_padding_bottom": face_padding_bottom, + "face_padding_left": face_padding_left, + "face_padding_right": face_padding_right, + "sadtalker_settings": sadtalker_settings, "selected_model": selected_model, - "chain_type": chain_type, - "selected_asr_model": selected_asr_model, - "google_translate_target": google_translate_target, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, "settings": settings, }, files={ - "documents": documents, + "input_face": input_face, + "input_audio": input_audio, }, request_options=request_options, omit=OMIT, @@ -6256,18 +7325,18 @@ async def main() -> None: try: if 200 <= _response.status_code < 300: return typing.cast( - DocSummaryPageOutput, + LipsyncPageOutput, parse_obj_as( - type_=DocSummaryPageOutput, # type: ignore + type_=LipsyncPageOutput, # type: ignore object_=_response.json(), ), ) if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -6302,34 +7371,34 @@ async def lipsync_tts( *, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[core.File] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[LipsyncTtsRequestSadtalkerSettings] = OMIT, - selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[LipsyncTtsRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, + uberduck_voice_name: typing.Optional[str] = None, + uberduck_speaking_rate: typing.Optional[float] = None, + google_voice_name: typing.Optional[str] = None, + google_speaking_rate: typing.Optional[float] = None, + google_pitch: typing.Optional[float] = None, + bark_history_prompt: typing.Optional[str] = None, + elevenlabs_voice_name: typing.Optional[str] = None, + elevenlabs_api_key: typing.Optional[str] = None, + elevenlabs_voice_id: typing.Optional[str] = None, + elevenlabs_model: typing.Optional[str] = None, + elevenlabs_stability: typing.Optional[float] = None, + elevenlabs_similarity_boost: typing.Optional[float] = None, + elevenlabs_style: typing.Optional[float] = None, + elevenlabs_speaker_boost: typing.Optional[bool] = None, + azure_voice_name: typing.Optional[str] = None, + openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None, + input_face: typing.Optional[core.File] = None, + face_padding_top: typing.Optional[int] = None, + face_padding_bottom: typing.Optional[int] = None, + face_padding_left: typing.Optional[int] = None, + face_padding_right: typing.Optional[int] = None, + sadtalker_settings: typing.Optional[LipsyncTtsRequestSadtalkerSettings] = None, + selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> LipsyncTtsPageOutput: """ @@ -6479,9 +7548,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -6662,9 +7731,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -6699,17 +7768,17 @@ async def speech_recognition( *, documents: typing.List[core.File], example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = OMIT, - language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = OMIT, - output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = OMIT, - google_translate_target: typing.Optional[str] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[core.File] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[SpeechRecognitionRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, + language: typing.Optional[str] = None, + translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, + output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, + google_translate_target: typing.Optional[str] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> AsrPageOutput: """ @@ -6807,9 +7876,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -6949,9 +8018,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -6985,14 +8054,14 @@ async def translate( self, *, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[TranslateRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - texts: typing.Optional[typing.List[str]] = OMIT, - selected_model: typing.Optional[TranslateRequestSelectedModel] = OMIT, - translation_source: typing.Optional[str] = OMIT, - translation_target: typing.Optional[str] = OMIT, - glossary_document: typing.Optional[core.File] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[TranslateRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + texts: typing.Optional[typing.List[str]] = None, + selected_model: typing.Optional[TranslateRequestSelectedModel] = None, + translation_source: typing.Optional[str] = None, + translation_target: typing.Optional[str] = None, + glossary_document: typing.Optional[core.File] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> TranslationPageOutput: """ @@ -7076,9 +8145,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -7113,22 +8182,22 @@ async def remix_image( *, input_image: core.File, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RemixImageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - text_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[RemixImageRequestSelectedModel] = OMIT, - selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - prompt_strength: typing.Optional[float] = OMIT, - controlnet_conditioning_scale: typing.Optional[typing.List[float]] = OMIT, - seed: typing.Optional[int] = OMIT, - image_guidance_scale: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[RemixImageRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + text_prompt: typing.Optional[str] = None, + selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + prompt_strength: typing.Optional[float] = None, + controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, + seed: typing.Optional[int] = None, + image_guidance_scale: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> Img2ImgPageOutput: """ @@ -7239,9 +8308,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -7405,9 +8474,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -7443,22 +8512,22 @@ async def product_image( input_image: core.File, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[ProductImageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - selected_model: typing.Optional[ProductImageRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - sd2upscaling: typing.Optional[bool] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[ProductImageRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + mask_threshold: typing.Optional[float] = None, + selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + sd2upscaling: typing.Optional[bool] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ObjectInpaintingPageOutput: """ @@ -7574,9 +8643,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -7612,21 +8681,21 @@ async def portrait( input_image: core.File, text_prompt: str, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[PortraitRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - face_scale: typing.Optional[float] = OMIT, - face_pos_x: typing.Optional[float] = OMIT, - face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[PortraitRequestSelectedModel] = OMIT, - negative_prompt: typing.Optional[str] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[int] = OMIT, - upscale_factor: typing.Optional[float] = OMIT, - output_width: typing.Optional[int] = OMIT, - output_height: typing.Optional[int] = OMIT, - guidance_scale: typing.Optional[float] = OMIT, - seed: typing.Optional[int] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[PortraitRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + face_scale: typing.Optional[float] = None, + face_pos_x: typing.Optional[float] = None, + face_pos_y: typing.Optional[float] = None, + selected_model: typing.Optional[PortraitRequestSelectedModel] = None, + negative_prompt: typing.Optional[str] = None, + num_outputs: typing.Optional[int] = None, + quality: typing.Optional[int] = None, + upscale_factor: typing.Optional[float] = None, + output_width: typing.Optional[int] = None, + output_height: typing.Optional[int] = None, + guidance_scale: typing.Optional[float] = None, + seed: typing.Optional[int] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> FaceInpaintingPageOutput: """ @@ -7739,9 +8808,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -7938,9 +9007,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -8098,9 +9167,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -8135,16 +9204,16 @@ async def remove_background( *, input_image: core.File, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = OMIT, - mask_threshold: typing.Optional[float] = OMIT, - rect_persepective_transform: typing.Optional[bool] = OMIT, - reflection_opacity: typing.Optional[float] = OMIT, - obj_scale: typing.Optional[float] = OMIT, - obj_pos_x: typing.Optional[float] = OMIT, - obj_pos_y: typing.Optional[float] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[RemoveBackgroundRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, + mask_threshold: typing.Optional[float] = None, + rect_persepective_transform: typing.Optional[bool] = None, + reflection_opacity: typing.Optional[float] = None, + obj_scale: typing.Optional[float] = None, + obj_pos_x: typing.Optional[float] = None, + obj_pos_y: typing.Optional[float] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> ImageSegmentationPageOutput: """ @@ -8237,9 +9306,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -8274,13 +9343,13 @@ async def upscale( *, scale: int, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[UpscaleRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_image: typing.Optional[core.File] = OMIT, - input_video: typing.Optional[core.File] = OMIT, - selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = OMIT, - selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, + functions: typing.Optional[typing.List[UpscaleRequestFunctionsItem]] = None, + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, + input_image: typing.Optional[core.File] = None, + input_video: typing.Optional[core.File] = None, + selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, + selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, + settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> CompareUpscalerPageOutput: """ @@ -8368,9 +9437,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -8482,9 +9551,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -8686,9 +9755,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) diff --git a/src/gooey/copilot/__init__.py b/src/gooey/copilot/__init__.py new file mode 100644 index 0000000..5e2b26d --- /dev/null +++ b/src/gooey/copilot/__init__.py @@ -0,0 +1,47 @@ +# This file was auto-generated by Fern from our API Definition. + +from .types import ( + CopilotCompletionRequestAsrModel, + CopilotCompletionRequestCitationStyle, + CopilotCompletionRequestEmbeddingModel, + CopilotCompletionRequestFunctionsItem, + CopilotCompletionRequestFunctionsItemTrigger, + CopilotCompletionRequestLipsyncModel, + CopilotCompletionRequestMessagesItem, + CopilotCompletionRequestMessagesItemContent, + CopilotCompletionRequestMessagesItemContentItem, + CopilotCompletionRequestMessagesItemContentItem_ImageUrl, + CopilotCompletionRequestMessagesItemContentItem_Text, + CopilotCompletionRequestMessagesItemRole, + CopilotCompletionRequestOpenaiTtsModel, + CopilotCompletionRequestOpenaiVoiceName, + CopilotCompletionRequestResponseFormatType, + CopilotCompletionRequestSadtalkerSettings, + CopilotCompletionRequestSadtalkerSettingsPreprocess, + CopilotCompletionRequestSelectedModel, + CopilotCompletionRequestTranslationModel, + CopilotCompletionRequestTtsProvider, +) + +__all__ = [ + "CopilotCompletionRequestAsrModel", + "CopilotCompletionRequestCitationStyle", + "CopilotCompletionRequestEmbeddingModel", + "CopilotCompletionRequestFunctionsItem", + "CopilotCompletionRequestFunctionsItemTrigger", + "CopilotCompletionRequestLipsyncModel", + "CopilotCompletionRequestMessagesItem", + "CopilotCompletionRequestMessagesItemContent", + "CopilotCompletionRequestMessagesItemContentItem", + "CopilotCompletionRequestMessagesItemContentItem_ImageUrl", + "CopilotCompletionRequestMessagesItemContentItem_Text", + "CopilotCompletionRequestMessagesItemRole", + "CopilotCompletionRequestOpenaiTtsModel", + "CopilotCompletionRequestOpenaiVoiceName", + "CopilotCompletionRequestResponseFormatType", + "CopilotCompletionRequestSadtalkerSettings", + "CopilotCompletionRequestSadtalkerSettingsPreprocess", + "CopilotCompletionRequestSelectedModel", + "CopilotCompletionRequestTranslationModel", + "CopilotCompletionRequestTtsProvider", +] diff --git a/src/gooey/copilot_for_your_enterprise/client.py b/src/gooey/copilot/client.py similarity index 81% rename from src/gooey/copilot_for_your_enterprise/client.py rename to src/gooey/copilot/client.py index f2b6d71..ef6336c 100644 --- a/src/gooey/copilot_for_your_enterprise/client.py +++ b/src/gooey/copilot/client.py @@ -2,29 +2,29 @@ import typing from ..core.client_wrapper import SyncClientWrapper -from .types.async_video_bots_request_functions_item import AsyncVideoBotsRequestFunctionsItem +from .types.copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem from .. import core -from .types.async_video_bots_request_messages_item import AsyncVideoBotsRequestMessagesItem -from .types.async_video_bots_request_selected_model import AsyncVideoBotsRequestSelectedModel -from .types.async_video_bots_request_embedding_model import AsyncVideoBotsRequestEmbeddingModel -from .types.async_video_bots_request_citation_style import AsyncVideoBotsRequestCitationStyle -from .types.async_video_bots_request_asr_model import AsyncVideoBotsRequestAsrModel -from .types.async_video_bots_request_translation_model import AsyncVideoBotsRequestTranslationModel -from .types.async_video_bots_request_lipsync_model import AsyncVideoBotsRequestLipsyncModel -from .types.async_video_bots_request_response_format_type import AsyncVideoBotsRequestResponseFormatType -from .types.async_video_bots_request_tts_provider import AsyncVideoBotsRequestTtsProvider -from .types.async_video_bots_request_openai_voice_name import AsyncVideoBotsRequestOpenaiVoiceName -from .types.async_video_bots_request_openai_tts_model import AsyncVideoBotsRequestOpenaiTtsModel -from .types.async_video_bots_request_sadtalker_settings import AsyncVideoBotsRequestSadtalkerSettings +from .types.copilot_completion_request_messages_item import CopilotCompletionRequestMessagesItem +from .types.copilot_completion_request_selected_model import CopilotCompletionRequestSelectedModel +from .types.copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel +from .types.copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle +from .types.copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel +from .types.copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel +from .types.copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel +from .types.copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType +from .types.copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider +from .types.copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName +from .types.copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel +from .types.copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings from ..types.run_settings import RunSettings from ..core.request_options import RequestOptions from ..types.video_bots_page_output import VideoBotsPageOutput from ..core.pydantic_utilities import parse_obj_as from ..errors.payment_required_error import PaymentRequiredError +from ..types.generic_error_response import GenericErrorResponse from ..errors.unprocessable_entity_error import UnprocessableEntityError from ..types.http_validation_error import HttpValidationError from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse from json.decoder import JSONDecodeError from ..core.api_error import ApiError from ..core.client_wrapper import AsyncClientWrapper @@ -33,24 +33,24 @@ OMIT = typing.cast(typing.Any, ...) -class CopilotForYourEnterpriseClient: +class CopilotClient: def __init__(self, *, client_wrapper: SyncClientWrapper): self._client_wrapper = client_wrapper - def async_video_bots( + def completion( self, *, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]] = None, + functions: typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, input_prompt: typing.Optional[str] = None, input_audio: typing.Optional[str] = None, input_images: typing.Optional[typing.List[core.File]] = None, input_documents: typing.Optional[typing.List[core.File]] = None, doc_extract_url: typing.Optional[str] = None, - messages: typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]] = None, + messages: typing.Optional[typing.List[CopilotCompletionRequestMessagesItem]] = None, bot_script: typing.Optional[str] = None, - selected_model: typing.Optional[AsyncVideoBotsRequestSelectedModel] = None, + selected_model: typing.Optional[CopilotCompletionRequestSelectedModel] = None, document_model: typing.Optional[str] = None, task_instructions: typing.Optional[str] = None, query_instructions: typing.Optional[str] = None, @@ -59,25 +59,25 @@ def async_video_bots( max_references: typing.Optional[int] = None, max_context_words: typing.Optional[int] = None, scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[AsyncVideoBotsRequestEmbeddingModel] = None, + embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None, dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[AsyncVideoBotsRequestCitationStyle] = None, + citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None, use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[AsyncVideoBotsRequestAsrModel] = None, + asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None, asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[AsyncVideoBotsRequestTranslationModel] = None, + translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None, user_language: typing.Optional[str] = None, input_glossary_document: typing.Optional[core.File] = None, output_glossary_document: typing.Optional[core.File] = None, - lipsync_model: typing.Optional[AsyncVideoBotsRequestLipsyncModel] = None, + lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None, tools: typing.Optional[typing.List[typing.Literal["json_to_pdf"]]] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[AsyncVideoBotsRequestResponseFormatType] = None, - tts_provider: typing.Optional[AsyncVideoBotsRequestTtsProvider] = None, + response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None, + tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -93,14 +93,14 @@ def async_video_bots( elevenlabs_style: typing.Optional[float] = None, elevenlabs_speaker_boost: typing.Optional[bool] = None, azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel] = None, + openai_voice_name: typing.Optional[CopilotCompletionRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[CopilotCompletionRequestOpenaiTtsModel] = None, input_face: typing.Optional[core.File] = None, face_padding_top: typing.Optional[int] = None, face_padding_bottom: typing.Optional[int] = None, face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[AsyncVideoBotsRequestSadtalkerSettings] = None, + sadtalker_settings: typing.Optional[CopilotCompletionRequestSadtalkerSettings] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> VideoBotsPageOutput: @@ -109,7 +109,7 @@ def async_video_bots( ---------- example_id : typing.Optional[str] - functions : typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]] + functions : typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments @@ -127,11 +127,11 @@ def async_video_bots( doc_extract_url : typing.Optional[str] Select a workflow to extract text from documents and images. - messages : typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]] + messages : typing.Optional[typing.List[CopilotCompletionRequestMessagesItem]] bot_script : typing.Optional[str] - selected_model : typing.Optional[AsyncVideoBotsRequestSelectedModel] + selected_model : typing.Optional[CopilotCompletionRequestSelectedModel] document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) @@ -151,7 +151,7 @@ def async_video_bots( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[AsyncVideoBotsRequestEmbeddingModel] + embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -159,17 +159,17 @@ def async_video_bots( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[AsyncVideoBotsRequestCitationStyle] + citation_style : typing.Optional[CopilotCompletionRequestCitationStyle] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[AsyncVideoBotsRequestAsrModel] + asr_model : typing.Optional[CopilotCompletionRequestAsrModel] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[AsyncVideoBotsRequestTranslationModel] + translation_model : typing.Optional[CopilotCompletionRequestTranslationModel] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -180,7 +180,7 @@ def async_video_bots( output_glossary_document : typing.Optional[core.File] See core.File for more documentation - lipsync_model : typing.Optional[AsyncVideoBotsRequestLipsyncModel] + lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel] tools : typing.Optional[typing.List[typing.Literal["json_to_pdf"]]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -195,9 +195,9 @@ def async_video_bots( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[AsyncVideoBotsRequestResponseFormatType] + response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType] - tts_provider : typing.Optional[AsyncVideoBotsRequestTtsProvider] + tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -230,9 +230,9 @@ def async_video_bots( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[CopilotCompletionRequestOpenaiVoiceName] - openai_tts_model : typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[CopilotCompletionRequestOpenaiTtsModel] input_face : typing.Optional[core.File] See core.File for more documentation @@ -245,7 +245,7 @@ def async_video_bots( face_padding_right : typing.Optional[int] - sadtalker_settings : typing.Optional[AsyncVideoBotsRequestSadtalkerSettings] + sadtalker_settings : typing.Optional[CopilotCompletionRequestSadtalkerSettings] settings : typing.Optional[RunSettings] @@ -264,7 +264,7 @@ def async_video_bots( client = Gooey( api_key="YOUR_API_KEY", ) - client.copilot_for_your_enterprise.async_video_bots() + client.copilot.completion() """ _response = self._client_wrapper.httpx_client.request( "v3/video-bots/async", @@ -352,9 +352,9 @@ def async_video_bots( if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) @@ -385,24 +385,24 @@ def async_video_bots( raise ApiError(status_code=_response.status_code, body=_response_json) -class AsyncCopilotForYourEnterpriseClient: +class AsyncCopilotClient: def __init__(self, *, client_wrapper: AsyncClientWrapper): self._client_wrapper = client_wrapper - async def async_video_bots( + async def completion( self, *, example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]] = None, + functions: typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, input_prompt: typing.Optional[str] = None, input_audio: typing.Optional[str] = None, input_images: typing.Optional[typing.List[core.File]] = None, input_documents: typing.Optional[typing.List[core.File]] = None, doc_extract_url: typing.Optional[str] = None, - messages: typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]] = None, + messages: typing.Optional[typing.List[CopilotCompletionRequestMessagesItem]] = None, bot_script: typing.Optional[str] = None, - selected_model: typing.Optional[AsyncVideoBotsRequestSelectedModel] = None, + selected_model: typing.Optional[CopilotCompletionRequestSelectedModel] = None, document_model: typing.Optional[str] = None, task_instructions: typing.Optional[str] = None, query_instructions: typing.Optional[str] = None, @@ -411,25 +411,25 @@ async def async_video_bots( max_references: typing.Optional[int] = None, max_context_words: typing.Optional[int] = None, scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[AsyncVideoBotsRequestEmbeddingModel] = None, + embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None, dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[AsyncVideoBotsRequestCitationStyle] = None, + citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None, use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[AsyncVideoBotsRequestAsrModel] = None, + asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None, asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[AsyncVideoBotsRequestTranslationModel] = None, + translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None, user_language: typing.Optional[str] = None, input_glossary_document: typing.Optional[core.File] = None, output_glossary_document: typing.Optional[core.File] = None, - lipsync_model: typing.Optional[AsyncVideoBotsRequestLipsyncModel] = None, + lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None, tools: typing.Optional[typing.List[typing.Literal["json_to_pdf"]]] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[AsyncVideoBotsRequestResponseFormatType] = None, - tts_provider: typing.Optional[AsyncVideoBotsRequestTtsProvider] = None, + response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None, + tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -445,14 +445,14 @@ async def async_video_bots( elevenlabs_style: typing.Optional[float] = None, elevenlabs_speaker_boost: typing.Optional[bool] = None, azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel] = None, + openai_voice_name: typing.Optional[CopilotCompletionRequestOpenaiVoiceName] = None, + openai_tts_model: typing.Optional[CopilotCompletionRequestOpenaiTtsModel] = None, input_face: typing.Optional[core.File] = None, face_padding_top: typing.Optional[int] = None, face_padding_bottom: typing.Optional[int] = None, face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[AsyncVideoBotsRequestSadtalkerSettings] = None, + sadtalker_settings: typing.Optional[CopilotCompletionRequestSadtalkerSettings] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> VideoBotsPageOutput: @@ -461,7 +461,7 @@ async def async_video_bots( ---------- example_id : typing.Optional[str] - functions : typing.Optional[typing.List[AsyncVideoBotsRequestFunctionsItem]] + functions : typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]] variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments @@ -479,11 +479,11 @@ async def async_video_bots( doc_extract_url : typing.Optional[str] Select a workflow to extract text from documents and images. - messages : typing.Optional[typing.List[AsyncVideoBotsRequestMessagesItem]] + messages : typing.Optional[typing.List[CopilotCompletionRequestMessagesItem]] bot_script : typing.Optional[str] - selected_model : typing.Optional[AsyncVideoBotsRequestSelectedModel] + selected_model : typing.Optional[CopilotCompletionRequestSelectedModel] document_model : typing.Optional[str] When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) @@ -503,7 +503,7 @@ async def async_video_bots( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[AsyncVideoBotsRequestEmbeddingModel] + embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -511,17 +511,17 @@ async def async_video_bots( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[AsyncVideoBotsRequestCitationStyle] + citation_style : typing.Optional[CopilotCompletionRequestCitationStyle] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[AsyncVideoBotsRequestAsrModel] + asr_model : typing.Optional[CopilotCompletionRequestAsrModel] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[AsyncVideoBotsRequestTranslationModel] + translation_model : typing.Optional[CopilotCompletionRequestTranslationModel] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -532,7 +532,7 @@ async def async_video_bots( output_glossary_document : typing.Optional[core.File] See core.File for more documentation - lipsync_model : typing.Optional[AsyncVideoBotsRequestLipsyncModel] + lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel] tools : typing.Optional[typing.List[typing.Literal["json_to_pdf"]]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -547,9 +547,9 @@ async def async_video_bots( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[AsyncVideoBotsRequestResponseFormatType] + response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType] - tts_provider : typing.Optional[AsyncVideoBotsRequestTtsProvider] + tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -582,9 +582,9 @@ async def async_video_bots( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[AsyncVideoBotsRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[CopilotCompletionRequestOpenaiVoiceName] - openai_tts_model : typing.Optional[AsyncVideoBotsRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[CopilotCompletionRequestOpenaiTtsModel] input_face : typing.Optional[core.File] See core.File for more documentation @@ -597,7 +597,7 @@ async def async_video_bots( face_padding_right : typing.Optional[int] - sadtalker_settings : typing.Optional[AsyncVideoBotsRequestSadtalkerSettings] + sadtalker_settings : typing.Optional[CopilotCompletionRequestSadtalkerSettings] settings : typing.Optional[RunSettings] @@ -621,7 +621,7 @@ async def async_video_bots( async def main() -> None: - await client.copilot_for_your_enterprise.async_video_bots() + await client.copilot.completion() asyncio.run(main()) @@ -712,9 +712,9 @@ async def main() -> None: if _response.status_code == 402: raise PaymentRequiredError( typing.cast( - typing.Optional[typing.Any], + GenericErrorResponse, parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore + type_=GenericErrorResponse, # type: ignore object_=_response.json(), ), ) diff --git a/src/gooey/copilot/types/__init__.py b/src/gooey/copilot/types/__init__.py new file mode 100644 index 0000000..211d87d --- /dev/null +++ b/src/gooey/copilot/types/__init__.py @@ -0,0 +1,49 @@ +# This file was auto-generated by Fern from our API Definition. + +from .copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel +from .copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle +from .copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel +from .copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem +from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger +from .copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel +from .copilot_completion_request_messages_item import CopilotCompletionRequestMessagesItem +from .copilot_completion_request_messages_item_content import CopilotCompletionRequestMessagesItemContent +from .copilot_completion_request_messages_item_content_item import ( + CopilotCompletionRequestMessagesItemContentItem, + CopilotCompletionRequestMessagesItemContentItem_ImageUrl, + CopilotCompletionRequestMessagesItemContentItem_Text, +) +from .copilot_completion_request_messages_item_role import CopilotCompletionRequestMessagesItemRole +from .copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel +from .copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName +from .copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType +from .copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings +from .copilot_completion_request_sadtalker_settings_preprocess import ( + CopilotCompletionRequestSadtalkerSettingsPreprocess, +) +from .copilot_completion_request_selected_model import CopilotCompletionRequestSelectedModel +from .copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel +from .copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider + +__all__ = [ + "CopilotCompletionRequestAsrModel", + "CopilotCompletionRequestCitationStyle", + "CopilotCompletionRequestEmbeddingModel", + "CopilotCompletionRequestFunctionsItem", + "CopilotCompletionRequestFunctionsItemTrigger", + "CopilotCompletionRequestLipsyncModel", + "CopilotCompletionRequestMessagesItem", + "CopilotCompletionRequestMessagesItemContent", + "CopilotCompletionRequestMessagesItemContentItem", + "CopilotCompletionRequestMessagesItemContentItem_ImageUrl", + "CopilotCompletionRequestMessagesItemContentItem_Text", + "CopilotCompletionRequestMessagesItemRole", + "CopilotCompletionRequestOpenaiTtsModel", + "CopilotCompletionRequestOpenaiVoiceName", + "CopilotCompletionRequestResponseFormatType", + "CopilotCompletionRequestSadtalkerSettings", + "CopilotCompletionRequestSadtalkerSettingsPreprocess", + "CopilotCompletionRequestSelectedModel", + "CopilotCompletionRequestTranslationModel", + "CopilotCompletionRequestTtsProvider", +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_asr_model.py b/src/gooey/copilot/types/copilot_completion_request_asr_model.py similarity index 90% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_asr_model.py rename to src/gooey/copilot/types/copilot_completion_request_asr_model.py index 99ea71d..65ae0f5 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_asr_model.py +++ b/src/gooey/copilot/types/copilot_completion_request_asr_model.py @@ -2,7 +2,7 @@ import typing -AsyncVideoBotsRequestAsrModel = typing.Union[ +CopilotCompletionRequestAsrModel = typing.Union[ typing.Literal[ "whisper_large_v2", "whisper_large_v3", diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_citation_style.py b/src/gooey/copilot/types/copilot_completion_request_citation_style.py similarity index 90% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_citation_style.py rename to src/gooey/copilot/types/copilot_completion_request_citation_style.py index 5d6c4b0..1bb273a 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_citation_style.py +++ b/src/gooey/copilot/types/copilot_completion_request_citation_style.py @@ -2,7 +2,7 @@ import typing -AsyncVideoBotsRequestCitationStyle = typing.Union[ +CopilotCompletionRequestCitationStyle = typing.Union[ typing.Literal[ "number", "title", diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_embedding_model.py b/src/gooey/copilot/types/copilot_completion_request_embedding_model.py similarity index 86% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_embedding_model.py rename to src/gooey/copilot/types/copilot_completion_request_embedding_model.py index 6bc8751..4655801 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_embedding_model.py +++ b/src/gooey/copilot/types/copilot_completion_request_embedding_model.py @@ -2,7 +2,7 @@ import typing -AsyncVideoBotsRequestEmbeddingModel = typing.Union[ +CopilotCompletionRequestEmbeddingModel = typing.Union[ typing.Literal[ "openai_3_large", "openai_3_small", diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item.py b/src/gooey/copilot/types/copilot_completion_request_functions_item.py similarity index 71% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item.py rename to src/gooey/copilot/types/copilot_completion_request_functions_item.py index c6399d0..c9654f1 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item.py +++ b/src/gooey/copilot/types/copilot_completion_request_functions_item.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. from ...core.pydantic_utilities import UniversalBaseModel -from .async_video_bots_request_functions_item_trigger import AsyncVideoBotsRequestFunctionsItemTrigger +from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger import pydantic from ...core.pydantic_utilities import IS_PYDANTIC_V2 import typing -class AsyncVideoBotsRequestFunctionsItem(UniversalBaseModel): +class CopilotCompletionRequestFunctionsItem(UniversalBaseModel): url: str - trigger: AsyncVideoBotsRequestFunctionsItemTrigger = pydantic.Field() + trigger: CopilotCompletionRequestFunctionsItemTrigger = pydantic.Field() """ When to run this function. `pre` runs before the recipe, `post` runs after the recipe. """ diff --git a/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py b/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py new file mode 100644 index 0000000..cf3e214 --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py b/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py new file mode 100644 index 0000000..865bc4b --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item.py b/src/gooey/copilot/types/copilot_completion_request_messages_item.py similarity index 59% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item.py rename to src/gooey/copilot/types/copilot_completion_request_messages_item.py index e7692a0..2cd2637 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item.py +++ b/src/gooey/copilot/types/copilot_completion_request_messages_item.py @@ -1,16 +1,16 @@ # This file was auto-generated by Fern from our API Definition. from ...core.pydantic_utilities import UniversalBaseModel -from .async_video_bots_request_messages_item_role import AsyncVideoBotsRequestMessagesItemRole -from .async_video_bots_request_messages_item_content import AsyncVideoBotsRequestMessagesItemContent +from .copilot_completion_request_messages_item_role import CopilotCompletionRequestMessagesItemRole +from .copilot_completion_request_messages_item_content import CopilotCompletionRequestMessagesItemContent import typing from ...core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic -class AsyncVideoBotsRequestMessagesItem(UniversalBaseModel): - role: AsyncVideoBotsRequestMessagesItemRole - content: AsyncVideoBotsRequestMessagesItemContent +class CopilotCompletionRequestMessagesItem(UniversalBaseModel): + role: CopilotCompletionRequestMessagesItemRole + content: CopilotCompletionRequestMessagesItemContent display_name: typing.Optional[str] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/copilot/types/copilot_completion_request_messages_item_content.py b/src/gooey/copilot/types/copilot_completion_request_messages_item_content.py new file mode 100644 index 0000000..960ab0b --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_messages_item_content.py @@ -0,0 +1,8 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .copilot_completion_request_messages_item_content_item import CopilotCompletionRequestMessagesItemContentItem + +CopilotCompletionRequestMessagesItemContent = typing.Union[ + str, typing.List[CopilotCompletionRequestMessagesItemContentItem] +] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content_item.py b/src/gooey/copilot/types/copilot_completion_request_messages_item_content_item.py similarity index 75% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content_item.py rename to src/gooey/copilot/types/copilot_completion_request_messages_item_content_item.py index cf537ff..266cd54 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content_item.py +++ b/src/gooey/copilot/types/copilot_completion_request_messages_item_content_item.py @@ -8,7 +8,7 @@ from ...types.image_url import ImageUrl -class AsyncVideoBotsRequestMessagesItemContentItem_Text(UniversalBaseModel): +class CopilotCompletionRequestMessagesItemContentItem_Text(UniversalBaseModel): type: typing.Literal["text"] = "text" text: typing.Optional[str] = None @@ -22,7 +22,7 @@ class Config: extra = pydantic.Extra.allow -class AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl(UniversalBaseModel): +class CopilotCompletionRequestMessagesItemContentItem_ImageUrl(UniversalBaseModel): type: typing.Literal["image_url"] = "image_url" image_url: typing.Optional[ImageUrl] = None @@ -36,6 +36,6 @@ class Config: extra = pydantic.Extra.allow -AsyncVideoBotsRequestMessagesItemContentItem = typing.Union[ - AsyncVideoBotsRequestMessagesItemContentItem_Text, AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl +CopilotCompletionRequestMessagesItemContentItem = typing.Union[ + CopilotCompletionRequestMessagesItemContentItem_Text, CopilotCompletionRequestMessagesItemContentItem_ImageUrl ] diff --git a/src/gooey/copilot/types/copilot_completion_request_messages_item_role.py b/src/gooey/copilot/types/copilot_completion_request_messages_item_role.py new file mode 100644 index 0000000..e974d99 --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_messages_item_role.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestMessagesItemRole = typing.Union[typing.Literal["user", "system", "assistant"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py b/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py new file mode 100644 index 0000000..4f4a35b --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_voice_name.py b/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py similarity index 74% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_voice_name.py rename to src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py index 5ebcb9c..f60a6b3 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_voice_name.py +++ b/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py @@ -2,6 +2,6 @@ import typing -AsyncVideoBotsRequestOpenaiVoiceName = typing.Union[ +CopilotCompletionRequestOpenaiVoiceName = typing.Union[ typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any ] diff --git a/src/gooey/copilot/types/copilot_completion_request_response_format_type.py b/src/gooey/copilot/types/copilot_completion_request_response_format_type.py new file mode 100644 index 0000000..3c9dbb0 --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings.py b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py similarity index 83% rename from src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings.py rename to src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py index 4540fe5..12ae458 100644 --- a/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings.py +++ b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py @@ -2,14 +2,16 @@ from ...core.pydantic_utilities import UniversalBaseModel import typing -from .async_lipsync_request_sadtalker_settings_preprocess import AsyncLipsyncRequestSadtalkerSettingsPreprocess +from .copilot_completion_request_sadtalker_settings_preprocess import ( + CopilotCompletionRequestSadtalkerSettingsPreprocess, +) import pydantic from ...core.pydantic_utilities import IS_PYDANTIC_V2 -class AsyncLipsyncRequestSadtalkerSettings(UniversalBaseModel): +class CopilotCompletionRequestSadtalkerSettings(UniversalBaseModel): still: typing.Optional[bool] = None - preprocess: typing.Optional[AsyncLipsyncRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) + preprocess: typing.Optional[CopilotCompletionRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) """ SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping. """ diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings_preprocess.py b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py similarity index 70% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings_preprocess.py rename to src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py index 587424a..88add2e 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings_preprocess.py +++ b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py @@ -2,6 +2,6 @@ import typing -AsyncVideoBotsRequestSadtalkerSettingsPreprocess = typing.Union[ +CopilotCompletionRequestSadtalkerSettingsPreprocess = typing.Union[ typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any ] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_selected_model.py b/src/gooey/copilot/types/copilot_completion_request_selected_model.py similarity index 95% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_selected_model.py rename to src/gooey/copilot/types/copilot_completion_request_selected_model.py index 8e05ce3..34cea21 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_selected_model.py +++ b/src/gooey/copilot/types/copilot_completion_request_selected_model.py @@ -2,7 +2,7 @@ import typing -AsyncVideoBotsRequestSelectedModel = typing.Union[ +CopilotCompletionRequestSelectedModel = typing.Union[ typing.Literal[ "gpt_4_o", "gpt_4_o_mini", diff --git a/src/gooey/copilot/types/copilot_completion_request_translation_model.py b/src/gooey/copilot/types/copilot_completion_request_translation_model.py new file mode 100644 index 0000000..10b0b5a --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_tts_provider.py b/src/gooey/copilot/types/copilot_completion_request_tts_provider.py similarity index 78% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_tts_provider.py rename to src/gooey/copilot/types/copilot_completion_request_tts_provider.py index f492ea1..4dec4b0 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_tts_provider.py +++ b/src/gooey/copilot/types/copilot_completion_request_tts_provider.py @@ -2,6 +2,6 @@ import typing -AsyncVideoBotsRequestTtsProvider = typing.Union[ +CopilotCompletionRequestTtsProvider = typing.Union[ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any ] diff --git a/src/gooey/copilot_for_your_enterprise/__init__.py b/src/gooey/copilot_for_your_enterprise/__init__.py deleted file mode 100644 index 70d45c7..0000000 --- a/src/gooey/copilot_for_your_enterprise/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .types import ( - AsyncVideoBotsRequestAsrModel, - AsyncVideoBotsRequestCitationStyle, - AsyncVideoBotsRequestEmbeddingModel, - AsyncVideoBotsRequestFunctionsItem, - AsyncVideoBotsRequestFunctionsItemTrigger, - AsyncVideoBotsRequestLipsyncModel, - AsyncVideoBotsRequestMessagesItem, - AsyncVideoBotsRequestMessagesItemContent, - AsyncVideoBotsRequestMessagesItemContentItem, - AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl, - AsyncVideoBotsRequestMessagesItemContentItem_Text, - AsyncVideoBotsRequestMessagesItemRole, - AsyncVideoBotsRequestOpenaiTtsModel, - AsyncVideoBotsRequestOpenaiVoiceName, - AsyncVideoBotsRequestResponseFormatType, - AsyncVideoBotsRequestSadtalkerSettings, - AsyncVideoBotsRequestSadtalkerSettingsPreprocess, - AsyncVideoBotsRequestSelectedModel, - AsyncVideoBotsRequestTranslationModel, - AsyncVideoBotsRequestTtsProvider, -) - -__all__ = [ - "AsyncVideoBotsRequestAsrModel", - "AsyncVideoBotsRequestCitationStyle", - "AsyncVideoBotsRequestEmbeddingModel", - "AsyncVideoBotsRequestFunctionsItem", - "AsyncVideoBotsRequestFunctionsItemTrigger", - "AsyncVideoBotsRequestLipsyncModel", - "AsyncVideoBotsRequestMessagesItem", - "AsyncVideoBotsRequestMessagesItemContent", - "AsyncVideoBotsRequestMessagesItemContentItem", - "AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl", - "AsyncVideoBotsRequestMessagesItemContentItem_Text", - "AsyncVideoBotsRequestMessagesItemRole", - "AsyncVideoBotsRequestOpenaiTtsModel", - "AsyncVideoBotsRequestOpenaiVoiceName", - "AsyncVideoBotsRequestResponseFormatType", - "AsyncVideoBotsRequestSadtalkerSettings", - "AsyncVideoBotsRequestSadtalkerSettingsPreprocess", - "AsyncVideoBotsRequestSelectedModel", - "AsyncVideoBotsRequestTranslationModel", - "AsyncVideoBotsRequestTtsProvider", -] diff --git a/src/gooey/copilot_for_your_enterprise/types/__init__.py b/src/gooey/copilot_for_your_enterprise/types/__init__.py deleted file mode 100644 index 23fe00a..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .async_video_bots_request_asr_model import AsyncVideoBotsRequestAsrModel -from .async_video_bots_request_citation_style import AsyncVideoBotsRequestCitationStyle -from .async_video_bots_request_embedding_model import AsyncVideoBotsRequestEmbeddingModel -from .async_video_bots_request_functions_item import AsyncVideoBotsRequestFunctionsItem -from .async_video_bots_request_functions_item_trigger import AsyncVideoBotsRequestFunctionsItemTrigger -from .async_video_bots_request_lipsync_model import AsyncVideoBotsRequestLipsyncModel -from .async_video_bots_request_messages_item import AsyncVideoBotsRequestMessagesItem -from .async_video_bots_request_messages_item_content import AsyncVideoBotsRequestMessagesItemContent -from .async_video_bots_request_messages_item_content_item import ( - AsyncVideoBotsRequestMessagesItemContentItem, - AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl, - AsyncVideoBotsRequestMessagesItemContentItem_Text, -) -from .async_video_bots_request_messages_item_role import AsyncVideoBotsRequestMessagesItemRole -from .async_video_bots_request_openai_tts_model import AsyncVideoBotsRequestOpenaiTtsModel -from .async_video_bots_request_openai_voice_name import AsyncVideoBotsRequestOpenaiVoiceName -from .async_video_bots_request_response_format_type import AsyncVideoBotsRequestResponseFormatType -from .async_video_bots_request_sadtalker_settings import AsyncVideoBotsRequestSadtalkerSettings -from .async_video_bots_request_sadtalker_settings_preprocess import AsyncVideoBotsRequestSadtalkerSettingsPreprocess -from .async_video_bots_request_selected_model import AsyncVideoBotsRequestSelectedModel -from .async_video_bots_request_translation_model import AsyncVideoBotsRequestTranslationModel -from .async_video_bots_request_tts_provider import AsyncVideoBotsRequestTtsProvider - -__all__ = [ - "AsyncVideoBotsRequestAsrModel", - "AsyncVideoBotsRequestCitationStyle", - "AsyncVideoBotsRequestEmbeddingModel", - "AsyncVideoBotsRequestFunctionsItem", - "AsyncVideoBotsRequestFunctionsItemTrigger", - "AsyncVideoBotsRequestLipsyncModel", - "AsyncVideoBotsRequestMessagesItem", - "AsyncVideoBotsRequestMessagesItemContent", - "AsyncVideoBotsRequestMessagesItemContentItem", - "AsyncVideoBotsRequestMessagesItemContentItem_ImageUrl", - "AsyncVideoBotsRequestMessagesItemContentItem_Text", - "AsyncVideoBotsRequestMessagesItemRole", - "AsyncVideoBotsRequestOpenaiTtsModel", - "AsyncVideoBotsRequestOpenaiVoiceName", - "AsyncVideoBotsRequestResponseFormatType", - "AsyncVideoBotsRequestSadtalkerSettings", - "AsyncVideoBotsRequestSadtalkerSettingsPreprocess", - "AsyncVideoBotsRequestSelectedModel", - "AsyncVideoBotsRequestTranslationModel", - "AsyncVideoBotsRequestTtsProvider", -] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item_trigger.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item_trigger.py deleted file mode 100644 index 807a5d7..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_functions_item_trigger.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncVideoBotsRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_lipsync_model.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_lipsync_model.py deleted file mode 100644 index c8646cd..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_lipsync_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncVideoBotsRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content.py deleted file mode 100644 index e346909..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_content.py +++ /dev/null @@ -1,6 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from .async_video_bots_request_messages_item_content_item import AsyncVideoBotsRequestMessagesItemContentItem - -AsyncVideoBotsRequestMessagesItemContent = typing.Union[str, typing.List[AsyncVideoBotsRequestMessagesItemContentItem]] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_role.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_role.py deleted file mode 100644 index 3cede39..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_messages_item_role.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncVideoBotsRequestMessagesItemRole = typing.Union[typing.Literal["user", "system", "assistant"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_tts_model.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_tts_model.py deleted file mode 100644 index 5b4c798..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncVideoBotsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_response_format_type.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_response_format_type.py deleted file mode 100644 index 875ddbf..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncVideoBotsRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_translation_model.py b/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_translation_model.py deleted file mode 100644 index 35d4b1a..0000000 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_translation_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncVideoBotsRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/copilot_integrations/__init__.py b/src/gooey/copilot_integrations/__init__.py deleted file mode 100644 index 8d66257..0000000 --- a/src/gooey/copilot_integrations/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .types import ( - CreateStreamRequestAsrModel, - CreateStreamRequestCitationStyle, - CreateStreamRequestEmbeddingModel, - CreateStreamRequestLipsyncModel, - CreateStreamRequestOpenaiTtsModel, - CreateStreamRequestOpenaiVoiceName, - CreateStreamRequestResponseFormatType, - CreateStreamRequestSelectedModel, - CreateStreamRequestTranslationModel, - CreateStreamRequestTtsProvider, - VideoBotsStreamResponse, -) - -__all__ = [ - "CreateStreamRequestAsrModel", - "CreateStreamRequestCitationStyle", - "CreateStreamRequestEmbeddingModel", - "CreateStreamRequestLipsyncModel", - "CreateStreamRequestOpenaiTtsModel", - "CreateStreamRequestOpenaiVoiceName", - "CreateStreamRequestResponseFormatType", - "CreateStreamRequestSelectedModel", - "CreateStreamRequestTranslationModel", - "CreateStreamRequestTtsProvider", - "VideoBotsStreamResponse", -] diff --git a/src/gooey/copilot_integrations/client.py b/src/gooey/copilot_integrations/client.py deleted file mode 100644 index 5606096..0000000 --- a/src/gooey/copilot_integrations/client.py +++ /dev/null @@ -1,921 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from ..types.button_pressed import ButtonPressed -from ..types.recipe_function import RecipeFunction -from ..types.conversation_entry import ConversationEntry -from .types.create_stream_request_selected_model import CreateStreamRequestSelectedModel -from .types.create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel -from .types.create_stream_request_citation_style import CreateStreamRequestCitationStyle -from .types.create_stream_request_asr_model import CreateStreamRequestAsrModel -from .types.create_stream_request_translation_model import CreateStreamRequestTranslationModel -from .types.create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel -from ..types.llm_tools import LlmTools -from .types.create_stream_request_response_format_type import CreateStreamRequestResponseFormatType -from .types.create_stream_request_tts_provider import CreateStreamRequestTtsProvider -from .types.create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName -from .types.create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel -from ..types.sad_talker_settings import SadTalkerSettings -from ..core.request_options import RequestOptions -from ..types.create_stream_response import CreateStreamResponse -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from .types.video_bots_stream_response import VideoBotsStreamResponse -from ..core.jsonable_encoder import jsonable_encoder -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class CopilotIntegrationsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def video_bots_stream_create( - self, - *, - integration_id: str, - conversation_id: typing.Optional[str] = OMIT, - user_id: typing.Optional[str] = OMIT, - user_message_id: typing.Optional[str] = OMIT, - button_pressed: typing.Optional[ButtonPressed] = OMIT, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - input_audio: typing.Optional[str] = OMIT, - input_images: typing.Optional[typing.Sequence[str]] = OMIT, - input_documents: typing.Optional[typing.Sequence[str]] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, - bot_script: typing.Optional[str] = OMIT, - selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT, - document_model: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - keyword_instructions: typing.Optional[str] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - citation_style: typing.Optional[CreateStreamRequestCitationStyle] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - asr_model: typing.Optional[CreateStreamRequestAsrModel] = OMIT, - asr_language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[CreateStreamRequestTranslationModel] = OMIT, - user_language: typing.Optional[str] = OMIT, - input_glossary_document: typing.Optional[str] = OMIT, - output_glossary_document: typing.Optional[str] = OMIT, - lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT, - tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = OMIT, - tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - input_text: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateStreamResponse: - """ - Parameters - ---------- - integration_id : str - Your Integration ID as shown in the Copilot Integrations tab - - conversation_id : typing.Optional[str] - The gooey conversation ID. - - If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. - - Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. - - user_id : typing.Optional[str] - Your app's custom user ID. - - If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation. - - user_message_id : typing.Optional[str] - Your app's custom message ID for the user message. - - If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. - - button_pressed : typing.Optional[ButtonPressed] - The button that was pressed by the user. - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - input_audio : typing.Optional[str] - - input_images : typing.Optional[typing.Sequence[str]] - - input_documents : typing.Optional[typing.Sequence[str]] - - doc_extract_url : typing.Optional[str] - Select a workflow to extract text from documents and images. - - messages : typing.Optional[typing.Sequence[ConversationEntry]] - - bot_script : typing.Optional[str] - - selected_model : typing.Optional[CreateStreamRequestSelectedModel] - - document_model : typing.Optional[str] - When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - keyword_instructions : typing.Optional[str] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[CreateStreamRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - citation_style : typing.Optional[CreateStreamRequestCitationStyle] - - use_url_shortener : typing.Optional[bool] - - asr_model : typing.Optional[CreateStreamRequestAsrModel] - Choose a model to transcribe incoming audio messages to text. - - asr_language : typing.Optional[str] - Choose a language to transcribe incoming audio messages to text. - - translation_model : typing.Optional[CreateStreamRequestTranslationModel] - - user_language : typing.Optional[str] - Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - - input_glossary_document : typing.Optional[str] - - Translation Glossary for User Langauge -> LLM Language (English) - - - output_glossary_document : typing.Optional[str] - - Translation Glossary for LLM Language (English) -> User Langauge - - - lipsync_model : typing.Optional[CreateStreamRequestLipsyncModel] - - tools : typing.Optional[typing.Sequence[LlmTools]] - Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[CreateStreamRequestResponseFormatType] - - tts_provider : typing.Optional[CreateStreamRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[CreateStreamRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[CreateStreamRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - input_text : typing.Optional[str] - Use `input_prompt` instead - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateStreamResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.copilot_integrations.video_bots_stream_create( - integration_id="integration_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/integrations/stream", - method="POST", - json={ - "integration_id": integration_id, - "conversation_id": conversation_id, - "user_id": user_id, - "user_message_id": user_message_id, - "button_pressed": button_pressed, - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "input_audio": input_audio, - "input_images": input_images, - "input_documents": input_documents, - "doc_extract_url": doc_extract_url, - "messages": messages, - "bot_script": bot_script, - "selected_model": selected_model, - "document_model": document_model, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "keyword_instructions": keyword_instructions, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "citation_style": citation_style, - "use_url_shortener": use_url_shortener, - "asr_model": asr_model, - "asr_language": asr_language, - "translation_model": translation_model, - "user_language": user_language, - "input_glossary_document": input_glossary_document, - "output_glossary_document": output_glossary_document, - "lipsync_model": lipsync_model, - "tools": tools, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "input_text": input_text, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreateStreamResponse, - parse_obj_as( - type_=CreateStreamResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - def video_bots_stream( - self, request_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> VideoBotsStreamResponse: - """ - Parameters - ---------- - request_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsStreamResponse - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.copilot_integrations.video_bots_stream( - request_id="request_id", - ) - """ - _response = self._client_wrapper.httpx_client.request( - f"v3/integrations/stream/{jsonable_encoder(request_id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - VideoBotsStreamResponse, - parse_obj_as( - type_=VideoBotsStreamResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncCopilotIntegrationsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def video_bots_stream_create( - self, - *, - integration_id: str, - conversation_id: typing.Optional[str] = OMIT, - user_id: typing.Optional[str] = OMIT, - user_message_id: typing.Optional[str] = OMIT, - button_pressed: typing.Optional[ButtonPressed] = OMIT, - functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - input_prompt: typing.Optional[str] = OMIT, - input_audio: typing.Optional[str] = OMIT, - input_images: typing.Optional[typing.Sequence[str]] = OMIT, - input_documents: typing.Optional[typing.Sequence[str]] = OMIT, - doc_extract_url: typing.Optional[str] = OMIT, - messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT, - bot_script: typing.Optional[str] = OMIT, - selected_model: typing.Optional[CreateStreamRequestSelectedModel] = OMIT, - document_model: typing.Optional[str] = OMIT, - task_instructions: typing.Optional[str] = OMIT, - query_instructions: typing.Optional[str] = OMIT, - keyword_instructions: typing.Optional[str] = OMIT, - documents: typing.Optional[typing.Sequence[str]] = OMIT, - max_references: typing.Optional[int] = OMIT, - max_context_words: typing.Optional[int] = OMIT, - scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = OMIT, - dense_weight: typing.Optional[float] = OMIT, - citation_style: typing.Optional[CreateStreamRequestCitationStyle] = OMIT, - use_url_shortener: typing.Optional[bool] = OMIT, - asr_model: typing.Optional[CreateStreamRequestAsrModel] = OMIT, - asr_language: typing.Optional[str] = OMIT, - translation_model: typing.Optional[CreateStreamRequestTranslationModel] = OMIT, - user_language: typing.Optional[str] = OMIT, - input_glossary_document: typing.Optional[str] = OMIT, - output_glossary_document: typing.Optional[str] = OMIT, - lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = OMIT, - tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = OMIT, - tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = OMIT, - uberduck_voice_name: typing.Optional[str] = OMIT, - uberduck_speaking_rate: typing.Optional[float] = OMIT, - google_voice_name: typing.Optional[str] = OMIT, - google_speaking_rate: typing.Optional[float] = OMIT, - google_pitch: typing.Optional[float] = OMIT, - bark_history_prompt: typing.Optional[str] = OMIT, - elevenlabs_voice_name: typing.Optional[str] = OMIT, - elevenlabs_api_key: typing.Optional[str] = OMIT, - elevenlabs_voice_id: typing.Optional[str] = OMIT, - elevenlabs_model: typing.Optional[str] = OMIT, - elevenlabs_stability: typing.Optional[float] = OMIT, - elevenlabs_similarity_boost: typing.Optional[float] = OMIT, - elevenlabs_style: typing.Optional[float] = OMIT, - elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, - azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = OMIT, - input_face: typing.Optional[str] = OMIT, - face_padding_top: typing.Optional[int] = OMIT, - face_padding_bottom: typing.Optional[int] = OMIT, - face_padding_left: typing.Optional[int] = OMIT, - face_padding_right: typing.Optional[int] = OMIT, - sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT, - input_text: typing.Optional[str] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> CreateStreamResponse: - """ - Parameters - ---------- - integration_id : str - Your Integration ID as shown in the Copilot Integrations tab - - conversation_id : typing.Optional[str] - The gooey conversation ID. - - If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. - - Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. - - user_id : typing.Optional[str] - Your app's custom user ID. - - If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation. - - user_message_id : typing.Optional[str] - Your app's custom message ID for the user message. - - If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. - - button_pressed : typing.Optional[ButtonPressed] - The button that was pressed by the user. - - functions : typing.Optional[typing.Sequence[RecipeFunction]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_prompt : typing.Optional[str] - - input_audio : typing.Optional[str] - - input_images : typing.Optional[typing.Sequence[str]] - - input_documents : typing.Optional[typing.Sequence[str]] - - doc_extract_url : typing.Optional[str] - Select a workflow to extract text from documents and images. - - messages : typing.Optional[typing.Sequence[ConversationEntry]] - - bot_script : typing.Optional[str] - - selected_model : typing.Optional[CreateStreamRequestSelectedModel] - - document_model : typing.Optional[str] - When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) - - task_instructions : typing.Optional[str] - - query_instructions : typing.Optional[str] - - keyword_instructions : typing.Optional[str] - - documents : typing.Optional[typing.Sequence[str]] - - max_references : typing.Optional[int] - - max_context_words : typing.Optional[int] - - scroll_jump : typing.Optional[int] - - embedding_model : typing.Optional[CreateStreamRequestEmbeddingModel] - - dense_weight : typing.Optional[float] - - Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. - Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - - - citation_style : typing.Optional[CreateStreamRequestCitationStyle] - - use_url_shortener : typing.Optional[bool] - - asr_model : typing.Optional[CreateStreamRequestAsrModel] - Choose a model to transcribe incoming audio messages to text. - - asr_language : typing.Optional[str] - Choose a language to transcribe incoming audio messages to text. - - translation_model : typing.Optional[CreateStreamRequestTranslationModel] - - user_language : typing.Optional[str] - Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. - - input_glossary_document : typing.Optional[str] - - Translation Glossary for User Langauge -> LLM Language (English) - - - output_glossary_document : typing.Optional[str] - - Translation Glossary for LLM Language (English) -> User Langauge - - - lipsync_model : typing.Optional[CreateStreamRequestLipsyncModel] - - tools : typing.Optional[typing.Sequence[LlmTools]] - Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[CreateStreamRequestResponseFormatType] - - tts_provider : typing.Optional[CreateStreamRequestTtsProvider] - - uberduck_voice_name : typing.Optional[str] - - uberduck_speaking_rate : typing.Optional[float] - - google_voice_name : typing.Optional[str] - - google_speaking_rate : typing.Optional[float] - - google_pitch : typing.Optional[float] - - bark_history_prompt : typing.Optional[str] - - elevenlabs_voice_name : typing.Optional[str] - Use `elevenlabs_voice_id` instead - - elevenlabs_api_key : typing.Optional[str] - - elevenlabs_voice_id : typing.Optional[str] - - elevenlabs_model : typing.Optional[str] - - elevenlabs_stability : typing.Optional[float] - - elevenlabs_similarity_boost : typing.Optional[float] - - elevenlabs_style : typing.Optional[float] - - elevenlabs_speaker_boost : typing.Optional[bool] - - azure_voice_name : typing.Optional[str] - - openai_voice_name : typing.Optional[CreateStreamRequestOpenaiVoiceName] - - openai_tts_model : typing.Optional[CreateStreamRequestOpenaiTtsModel] - - input_face : typing.Optional[str] - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[SadTalkerSettings] - - input_text : typing.Optional[str] - Use `input_prompt` instead - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - CreateStreamResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.copilot_integrations.video_bots_stream_create( - integration_id="integration_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/integrations/stream", - method="POST", - json={ - "integration_id": integration_id, - "conversation_id": conversation_id, - "user_id": user_id, - "user_message_id": user_message_id, - "button_pressed": button_pressed, - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "input_audio": input_audio, - "input_images": input_images, - "input_documents": input_documents, - "doc_extract_url": doc_extract_url, - "messages": messages, - "bot_script": bot_script, - "selected_model": selected_model, - "document_model": document_model, - "task_instructions": task_instructions, - "query_instructions": query_instructions, - "keyword_instructions": keyword_instructions, - "documents": documents, - "max_references": max_references, - "max_context_words": max_context_words, - "scroll_jump": scroll_jump, - "embedding_model": embedding_model, - "dense_weight": dense_weight, - "citation_style": citation_style, - "use_url_shortener": use_url_shortener, - "asr_model": asr_model, - "asr_language": asr_language, - "translation_model": translation_model, - "user_language": user_language, - "input_glossary_document": input_glossary_document, - "output_glossary_document": output_glossary_document, - "lipsync_model": lipsync_model, - "tools": tools, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "tts_provider": tts_provider, - "uberduck_voice_name": uberduck_voice_name, - "uberduck_speaking_rate": uberduck_speaking_rate, - "google_voice_name": google_voice_name, - "google_speaking_rate": google_speaking_rate, - "google_pitch": google_pitch, - "bark_history_prompt": bark_history_prompt, - "elevenlabs_voice_name": elevenlabs_voice_name, - "elevenlabs_api_key": elevenlabs_api_key, - "elevenlabs_voice_id": elevenlabs_voice_id, - "elevenlabs_model": elevenlabs_model, - "elevenlabs_stability": elevenlabs_stability, - "elevenlabs_similarity_boost": elevenlabs_similarity_boost, - "elevenlabs_style": elevenlabs_style, - "elevenlabs_speaker_boost": elevenlabs_speaker_boost, - "azure_voice_name": azure_voice_name, - "openai_voice_name": openai_voice_name, - "openai_tts_model": openai_tts_model, - "input_face": input_face, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "input_text": input_text, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - CreateStreamResponse, - parse_obj_as( - type_=CreateStreamResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - async def video_bots_stream( - self, request_id: str, *, request_options: typing.Optional[RequestOptions] = None - ) -> VideoBotsStreamResponse: - """ - Parameters - ---------- - request_id : str - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - VideoBotsStreamResponse - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.copilot_integrations.video_bots_stream( - request_id="request_id", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - f"v3/integrations/stream/{jsonable_encoder(request_id)}", - method="GET", - request_options=request_options, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - VideoBotsStreamResponse, - parse_obj_as( - type_=VideoBotsStreamResponse, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/copilot_integrations/types/__init__.py b/src/gooey/copilot_integrations/types/__init__.py deleted file mode 100644 index 1224051..0000000 --- a/src/gooey/copilot_integrations/types/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .create_stream_request_asr_model import CreateStreamRequestAsrModel -from .create_stream_request_citation_style import CreateStreamRequestCitationStyle -from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel -from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel -from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel -from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName -from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType -from .create_stream_request_selected_model import CreateStreamRequestSelectedModel -from .create_stream_request_translation_model import CreateStreamRequestTranslationModel -from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider -from .video_bots_stream_response import VideoBotsStreamResponse - -__all__ = [ - "CreateStreamRequestAsrModel", - "CreateStreamRequestCitationStyle", - "CreateStreamRequestEmbeddingModel", - "CreateStreamRequestLipsyncModel", - "CreateStreamRequestOpenaiTtsModel", - "CreateStreamRequestOpenaiVoiceName", - "CreateStreamRequestResponseFormatType", - "CreateStreamRequestSelectedModel", - "CreateStreamRequestTranslationModel", - "CreateStreamRequestTtsProvider", - "VideoBotsStreamResponse", -] diff --git a/src/gooey/copilot_integrations/types/video_bots_stream_response.py b/src/gooey/copilot_integrations/types/video_bots_stream_response.py deleted file mode 100644 index 22f8858..0000000 --- a/src/gooey/copilot_integrations/types/video_bots_stream_response.py +++ /dev/null @@ -1,10 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ...types.conversation_start import ConversationStart -from ...types.run_start import RunStart -from ...types.message_part import MessagePart -from ...types.final_response import FinalResponse -from ...types.stream_error import StreamError - -VideoBotsStreamResponse = typing.Union[ConversationStart, RunStart, MessagePart, FinalResponse, StreamError] diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py index 4bd75e2..65c8508 100644 --- a/src/gooey/core/client_wrapper.py +++ b/src/gooey/core/client_wrapper.py @@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "gooeyai", - "X-Fern-SDK-Version": "0.0.1-beta12", + "X-Fern-SDK-Version": "0.0.1-beta13", } headers["Authorization"] = f"Bearer {self._get_api_key()}" return headers diff --git a/src/gooey/errors/payment_required_error.py b/src/gooey/errors/payment_required_error.py index 81da343..03a7766 100644 --- a/src/gooey/errors/payment_required_error.py +++ b/src/gooey/errors/payment_required_error.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. from ..core.api_error import ApiError -import typing +from ..types.generic_error_response import GenericErrorResponse class PaymentRequiredError(ApiError): - def __init__(self, body: typing.Optional[typing.Any]): + def __init__(self, body: GenericErrorResponse): super().__init__(status_code=402, body=body) diff --git a/src/gooey/evaluator/__init__.py b/src/gooey/evaluator/__init__.py deleted file mode 100644 index d4ba20f..0000000 --- a/src/gooey/evaluator/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .types import ( - BulkEvalPageRequestAggFunctionsItem, - BulkEvalPageRequestAggFunctionsItemFunction, - BulkEvalPageRequestEvalPromptsItem, - BulkEvalPageRequestFunctionsItem, - BulkEvalPageRequestFunctionsItemTrigger, - BulkEvalPageRequestResponseFormatType, - BulkEvalPageRequestSelectedModel, -) - -__all__ = [ - "BulkEvalPageRequestAggFunctionsItem", - "BulkEvalPageRequestAggFunctionsItemFunction", - "BulkEvalPageRequestEvalPromptsItem", - "BulkEvalPageRequestFunctionsItem", - "BulkEvalPageRequestFunctionsItemTrigger", - "BulkEvalPageRequestResponseFormatType", - "BulkEvalPageRequestSelectedModel", -] diff --git a/src/gooey/evaluator/client.py b/src/gooey/evaluator/client.py deleted file mode 100644 index d771f1d..0000000 --- a/src/gooey/evaluator/client.py +++ /dev/null @@ -1,342 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from .types.bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem -from .types.bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem -from .types.bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem -from .types.bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel -from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType -from ..types.run_settings import RunSettings -from ..core.request_options import RequestOptions -from ..types.bulk_eval_page_output import BulkEvalPageOutput -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class EvaluatorClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def async_bulk_eval( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - eval_prompts: typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] = OMIT, - agg_functions: typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] = OMIT, - selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> BulkEvalPageOutput: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_prompts : typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] - - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - - - agg_functions : typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] - - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - - - selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageOutput - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.evaluator.async_bulk_eval( - documents=["documents"], - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - BulkEvalPageOutput, - parse_obj_as( - type_=BulkEvalPageOutput, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncEvaluatorClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def async_bulk_eval( - self, - *, - documents: typing.Sequence[str], - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - eval_prompts: typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] = OMIT, - agg_functions: typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] = OMIT, - selected_model: typing.Optional[BulkEvalPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> BulkEvalPageOutput: - """ - Parameters - ---------- - documents : typing.Sequence[str] - - Upload or link to a CSV or google sheet that contains your sample input data. - For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs. - Remember to includes header names in your CSV too. - - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[BulkEvalPageRequestFunctionsItem]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - eval_prompts : typing.Optional[typing.Sequence[BulkEvalPageRequestEvalPromptsItem]] - - Specify custom LLM prompts to calculate metrics that evaluate each row of the input data. The output should be a JSON object mapping the metric names to values. - _The `columns` dictionary can be used to reference the spreadsheet columns._ - - - agg_functions : typing.Optional[typing.Sequence[BulkEvalPageRequestAggFunctionsItem]] - - Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/pandas-docs/stable/reference/groupby.html#dataframegroupby-computations-descriptive-stats). - - - selected_model : typing.Optional[BulkEvalPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - BulkEvalPageOutput - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.evaluator.async_bulk_eval( - documents=["documents"], - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/bulk-eval/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "documents": documents, - "eval_prompts": eval_prompts, - "agg_functions": agg_functions, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - BulkEvalPageOutput, - parse_obj_as( - type_=BulkEvalPageOutput, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/evaluator/types/__init__.py b/src/gooey/evaluator/types/__init__.py deleted file mode 100644 index 87bb267..0000000 --- a/src/gooey/evaluator/types/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem -from .bulk_eval_page_request_agg_functions_item_function import BulkEvalPageRequestAggFunctionsItemFunction -from .bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem -from .bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem -from .bulk_eval_page_request_functions_item_trigger import BulkEvalPageRequestFunctionsItemTrigger -from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType -from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel - -__all__ = [ - "BulkEvalPageRequestAggFunctionsItem", - "BulkEvalPageRequestAggFunctionsItemFunction", - "BulkEvalPageRequestEvalPromptsItem", - "BulkEvalPageRequestFunctionsItem", - "BulkEvalPageRequestFunctionsItemTrigger", - "BulkEvalPageRequestResponseFormatType", - "BulkEvalPageRequestSelectedModel", -] diff --git a/src/gooey/functions/__init__.py b/src/gooey/functions/__init__.py deleted file mode 100644 index f3ea265..0000000 --- a/src/gooey/functions/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - diff --git a/src/gooey/functions/client.py b/src/gooey/functions/client.py deleted file mode 100644 index 0047ff0..0000000 --- a/src/gooey/functions/client.py +++ /dev/null @@ -1,231 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from ..types.run_settings import RunSettings -from ..core.request_options import RequestOptions -from ..types.functions_page_output import FunctionsPageOutput -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class FunctionsClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def async_functions( - self, - *, - example_id: typing.Optional[str] = None, - code: typing.Optional[str] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FunctionsPageOutput: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - code : typing.Optional[str] - The JS code to be executed. - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used in the code - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FunctionsPageOutput - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.functions.async_functions() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/functions/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "code": code, - "variables": variables, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FunctionsPageOutput, - parse_obj_as( - type_=FunctionsPageOutput, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncFunctionsClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def async_functions( - self, - *, - example_id: typing.Optional[str] = None, - code: typing.Optional[str] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> FunctionsPageOutput: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - code : typing.Optional[str] - The JS code to be executed. - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used in the code - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - FunctionsPageOutput - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.functions.async_functions() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/functions/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "code": code, - "variables": variables, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - FunctionsPageOutput, - parse_obj_as( - type_=FunctionsPageOutput, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/lip_syncing/__init__.py b/src/gooey/lip_syncing/__init__.py deleted file mode 100644 index b03755d..0000000 --- a/src/gooey/lip_syncing/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .types import ( - AsyncLipsyncRequestFunctionsItem, - AsyncLipsyncRequestFunctionsItemTrigger, - AsyncLipsyncRequestSadtalkerSettings, - AsyncLipsyncRequestSadtalkerSettingsPreprocess, - AsyncLipsyncRequestSelectedModel, -) - -__all__ = [ - "AsyncLipsyncRequestFunctionsItem", - "AsyncLipsyncRequestFunctionsItemTrigger", - "AsyncLipsyncRequestSadtalkerSettings", - "AsyncLipsyncRequestSadtalkerSettingsPreprocess", - "AsyncLipsyncRequestSelectedModel", -] diff --git a/src/gooey/lip_syncing/client.py b/src/gooey/lip_syncing/client.py deleted file mode 100644 index 73f492c..0000000 --- a/src/gooey/lip_syncing/client.py +++ /dev/null @@ -1,305 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from .types.async_lipsync_request_functions_item import AsyncLipsyncRequestFunctionsItem -from .. import core -from .types.async_lipsync_request_sadtalker_settings import AsyncLipsyncRequestSadtalkerSettings -from .types.async_lipsync_request_selected_model import AsyncLipsyncRequestSelectedModel -from ..types.run_settings import RunSettings -from ..core.request_options import RequestOptions -from ..types.lipsync_page_output import LipsyncPageOutput -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class LipSyncingClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def async_lipsync( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_face: typing.Optional[core.File] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[AsyncLipsyncRequestSadtalkerSettings] = None, - selected_model: typing.Optional[AsyncLipsyncRequestSelectedModel] = None, - input_audio: typing.Optional[core.File] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> LipsyncPageOutput: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_face : typing.Optional[core.File] - See core.File for more documentation - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[AsyncLipsyncRequestSadtalkerSettings] - - selected_model : typing.Optional[AsyncLipsyncRequestSelectedModel] - - input_audio : typing.Optional[core.File] - See core.File for more documentation - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageOutput - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.lip_syncing.async_lipsync() - """ - _response = self._client_wrapper.httpx_client.request( - "v3/Lipsync/async", - method="POST", - params={ - "example_id": example_id, - }, - data={ - "functions": functions, - "variables": variables, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, - files={ - "input_face": input_face, - "input_audio": input_audio, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LipsyncPageOutput, - parse_obj_as( - type_=LipsyncPageOutput, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncLipSyncingClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def async_lipsync( - self, - *, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]] = None, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - input_face: typing.Optional[core.File] = None, - face_padding_top: typing.Optional[int] = None, - face_padding_bottom: typing.Optional[int] = None, - face_padding_left: typing.Optional[int] = None, - face_padding_right: typing.Optional[int] = None, - sadtalker_settings: typing.Optional[AsyncLipsyncRequestSadtalkerSettings] = None, - selected_model: typing.Optional[AsyncLipsyncRequestSelectedModel] = None, - input_audio: typing.Optional[core.File] = None, - settings: typing.Optional[RunSettings] = None, - request_options: typing.Optional[RequestOptions] = None, - ) -> LipsyncPageOutput: - """ - Parameters - ---------- - example_id : typing.Optional[str] - - functions : typing.Optional[typing.List[AsyncLipsyncRequestFunctionsItem]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - input_face : typing.Optional[core.File] - See core.File for more documentation - - face_padding_top : typing.Optional[int] - - face_padding_bottom : typing.Optional[int] - - face_padding_left : typing.Optional[int] - - face_padding_right : typing.Optional[int] - - sadtalker_settings : typing.Optional[AsyncLipsyncRequestSadtalkerSettings] - - selected_model : typing.Optional[AsyncLipsyncRequestSelectedModel] - - input_audio : typing.Optional[core.File] - See core.File for more documentation - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - LipsyncPageOutput - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.lip_syncing.async_lipsync() - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/Lipsync/async", - method="POST", - params={ - "example_id": example_id, - }, - data={ - "functions": functions, - "variables": variables, - "face_padding_top": face_padding_top, - "face_padding_bottom": face_padding_bottom, - "face_padding_left": face_padding_left, - "face_padding_right": face_padding_right, - "sadtalker_settings": sadtalker_settings, - "selected_model": selected_model, - "settings": settings, - }, - files={ - "input_face": input_face, - "input_audio": input_audio, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - LipsyncPageOutput, - parse_obj_as( - type_=LipsyncPageOutput, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/lip_syncing/types/__init__.py b/src/gooey/lip_syncing/types/__init__.py deleted file mode 100644 index bb6b073..0000000 --- a/src/gooey/lip_syncing/types/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .async_lipsync_request_functions_item import AsyncLipsyncRequestFunctionsItem -from .async_lipsync_request_functions_item_trigger import AsyncLipsyncRequestFunctionsItemTrigger -from .async_lipsync_request_sadtalker_settings import AsyncLipsyncRequestSadtalkerSettings -from .async_lipsync_request_sadtalker_settings_preprocess import AsyncLipsyncRequestSadtalkerSettingsPreprocess -from .async_lipsync_request_selected_model import AsyncLipsyncRequestSelectedModel - -__all__ = [ - "AsyncLipsyncRequestFunctionsItem", - "AsyncLipsyncRequestFunctionsItemTrigger", - "AsyncLipsyncRequestSadtalkerSettings", - "AsyncLipsyncRequestSadtalkerSettingsPreprocess", - "AsyncLipsyncRequestSelectedModel", -] diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_functions_item_trigger.py b/src/gooey/lip_syncing/types/async_lipsync_request_functions_item_trigger.py deleted file mode 100644 index e329956..0000000 --- a/src/gooey/lip_syncing/types/async_lipsync_request_functions_item_trigger.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncLipsyncRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_selected_model.py b/src/gooey/lip_syncing/types/async_lipsync_request_selected_model.py deleted file mode 100644 index 12da6d9..0000000 --- a/src/gooey/lip_syncing/types/async_lipsync_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsyncLipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/smart_gpt/__init__.py b/src/gooey/smart_gpt/__init__.py deleted file mode 100644 index e5f60f5..0000000 --- a/src/gooey/smart_gpt/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .types import ( - SmartGptPageRequestFunctionsItem, - SmartGptPageRequestFunctionsItemTrigger, - SmartGptPageRequestResponseFormatType, - SmartGptPageRequestSelectedModel, -) - -__all__ = [ - "SmartGptPageRequestFunctionsItem", - "SmartGptPageRequestFunctionsItemTrigger", - "SmartGptPageRequestResponseFormatType", - "SmartGptPageRequestSelectedModel", -] diff --git a/src/gooey/smart_gpt/client.py b/src/gooey/smart_gpt/client.py deleted file mode 100644 index b100c7d..0000000 --- a/src/gooey/smart_gpt/client.py +++ /dev/null @@ -1,324 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from ..core.client_wrapper import SyncClientWrapper -from .types.smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem -from .types.smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel -from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType -from ..types.run_settings import RunSettings -from ..core.request_options import RequestOptions -from ..types.smart_gpt_page_output import SmartGptPageOutput -from ..core.pydantic_utilities import parse_obj_as -from ..errors.payment_required_error import PaymentRequiredError -from ..errors.unprocessable_entity_error import UnprocessableEntityError -from ..types.http_validation_error import HttpValidationError -from ..errors.too_many_requests_error import TooManyRequestsError -from ..types.generic_error_response import GenericErrorResponse -from json.decoder import JSONDecodeError -from ..core.api_error import ApiError -from ..core.client_wrapper import AsyncClientWrapper - -# this is used as the default value for optional parameters -OMIT = typing.cast(typing.Any, ...) - - -class SmartGptClient: - def __init__(self, *, client_wrapper: SyncClientWrapper): - self._client_wrapper = client_wrapper - - def async_smart_gpt( - self, - *, - input_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - cot_prompt: typing.Optional[str] = OMIT, - reflexion_prompt: typing.Optional[str] = OMIT, - dera_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> SmartGptPageOutput: - """ - Parameters - ---------- - input_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - cot_prompt : typing.Optional[str] - - reflexion_prompt : typing.Optional[str] - - dera_prompt : typing.Optional[str] - - selected_model : typing.Optional[SmartGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SmartGptPageOutput - Successful Response - - Examples - -------- - from gooey import Gooey - - client = Gooey( - api_key="YOUR_API_KEY", - ) - client.smart_gpt.async_smart_gpt( - input_prompt="input_prompt", - ) - """ - _response = self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "cot_prompt": cot_prompt, - "reflexion_prompt": reflexion_prompt, - "dera_prompt": dera_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SmartGptPageOutput, - parse_obj_as( - type_=SmartGptPageOutput, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) - - -class AsyncSmartGptClient: - def __init__(self, *, client_wrapper: AsyncClientWrapper): - self._client_wrapper = client_wrapper - - async def async_smart_gpt( - self, - *, - input_prompt: str, - example_id: typing.Optional[str] = None, - functions: typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] = OMIT, - variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - cot_prompt: typing.Optional[str] = OMIT, - reflexion_prompt: typing.Optional[str] = OMIT, - dera_prompt: typing.Optional[str] = OMIT, - selected_model: typing.Optional[SmartGptPageRequestSelectedModel] = OMIT, - avoid_repetition: typing.Optional[bool] = OMIT, - num_outputs: typing.Optional[int] = OMIT, - quality: typing.Optional[float] = OMIT, - max_tokens: typing.Optional[int] = OMIT, - sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, - settings: typing.Optional[RunSettings] = OMIT, - request_options: typing.Optional[RequestOptions] = None, - ) -> SmartGptPageOutput: - """ - Parameters - ---------- - input_prompt : str - - example_id : typing.Optional[str] - - functions : typing.Optional[typing.Sequence[SmartGptPageRequestFunctionsItem]] - - variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] - Variables to be used as Jinja prompt templates and in functions as arguments - - cot_prompt : typing.Optional[str] - - reflexion_prompt : typing.Optional[str] - - dera_prompt : typing.Optional[str] - - selected_model : typing.Optional[SmartGptPageRequestSelectedModel] - - avoid_repetition : typing.Optional[bool] - - num_outputs : typing.Optional[int] - - quality : typing.Optional[float] - - max_tokens : typing.Optional[int] - - sampling_temperature : typing.Optional[float] - - response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] - - settings : typing.Optional[RunSettings] - - request_options : typing.Optional[RequestOptions] - Request-specific configuration. - - Returns - ------- - SmartGptPageOutput - Successful Response - - Examples - -------- - import asyncio - - from gooey import AsyncGooey - - client = AsyncGooey( - api_key="YOUR_API_KEY", - ) - - - async def main() -> None: - await client.smart_gpt.async_smart_gpt( - input_prompt="input_prompt", - ) - - - asyncio.run(main()) - """ - _response = await self._client_wrapper.httpx_client.request( - "v3/SmartGPT/async", - method="POST", - params={ - "example_id": example_id, - }, - json={ - "functions": functions, - "variables": variables, - "input_prompt": input_prompt, - "cot_prompt": cot_prompt, - "reflexion_prompt": reflexion_prompt, - "dera_prompt": dera_prompt, - "selected_model": selected_model, - "avoid_repetition": avoid_repetition, - "num_outputs": num_outputs, - "quality": quality, - "max_tokens": max_tokens, - "sampling_temperature": sampling_temperature, - "response_format_type": response_format_type, - "settings": settings, - }, - request_options=request_options, - omit=OMIT, - ) - try: - if 200 <= _response.status_code < 300: - return typing.cast( - SmartGptPageOutput, - parse_obj_as( - type_=SmartGptPageOutput, # type: ignore - object_=_response.json(), - ), - ) - if _response.status_code == 402: - raise PaymentRequiredError( - typing.cast( - typing.Optional[typing.Any], - parse_obj_as( - type_=typing.Optional[typing.Any], # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 422: - raise UnprocessableEntityError( - typing.cast( - HttpValidationError, - parse_obj_as( - type_=HttpValidationError, # type: ignore - object_=_response.json(), - ), - ) - ) - if _response.status_code == 429: - raise TooManyRequestsError( - typing.cast( - GenericErrorResponse, - parse_obj_as( - type_=GenericErrorResponse, # type: ignore - object_=_response.json(), - ), - ) - ) - _response_json = _response.json() - except JSONDecodeError: - raise ApiError(status_code=_response.status_code, body=_response.text) - raise ApiError(status_code=_response.status_code, body=_response_json) diff --git a/src/gooey/smart_gpt/types/__init__.py b/src/gooey/smart_gpt/types/__init__.py deleted file mode 100644 index f866a87..0000000 --- a/src/gooey/smart_gpt/types/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -from .smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem -from .smart_gpt_page_request_functions_item_trigger import SmartGptPageRequestFunctionsItemTrigger -from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType -from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel - -__all__ = [ - "SmartGptPageRequestFunctionsItem", - "SmartGptPageRequestFunctionsItemTrigger", - "SmartGptPageRequestResponseFormatType", - "SmartGptPageRequestSelectedModel", -] diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py index 1691490..02ed7e1 100644 --- a/src/gooey/types/__init__.py +++ b/src/gooey/types/__init__.py @@ -20,6 +20,13 @@ from .balance_response import BalanceResponse from .bot_broadcast_filters import BotBroadcastFilters from .bulk_eval_page_output import BulkEvalPageOutput +from .bulk_eval_page_request_agg_functions_item import BulkEvalPageRequestAggFunctionsItem +from .bulk_eval_page_request_agg_functions_item_function import BulkEvalPageRequestAggFunctionsItemFunction +from .bulk_eval_page_request_eval_prompts_item import BulkEvalPageRequestEvalPromptsItem +from .bulk_eval_page_request_functions_item import BulkEvalPageRequestFunctionsItem +from .bulk_eval_page_request_functions_item_trigger import BulkEvalPageRequestFunctionsItemTrigger +from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType +from .bulk_eval_page_request_selected_model import BulkEvalPageRequestSelectedModel from .bulk_eval_page_status_response import BulkEvalPageStatusResponse from .bulk_run_request_functions_item import BulkRunRequestFunctionsItem from .bulk_run_request_functions_item_trigger import BulkRunRequestFunctionsItemTrigger @@ -67,6 +74,17 @@ ) from .conversation_entry_role import ConversationEntryRole from .conversation_start import ConversationStart +from .create_stream_request import CreateStreamRequest +from .create_stream_request_asr_model import CreateStreamRequestAsrModel +from .create_stream_request_citation_style import CreateStreamRequestCitationStyle +from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel +from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel +from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel +from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName +from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType +from .create_stream_request_selected_model import CreateStreamRequestSelectedModel +from .create_stream_request_translation_model import CreateStreamRequestTranslationModel +from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider from .create_stream_response import CreateStreamResponse from .deforum_sd_page_output import DeforumSdPageOutput from .deforum_sd_page_request_animation_prompts_item import DeforumSdPageRequestAnimationPromptsItem @@ -171,6 +189,11 @@ from .lipsync_page_request_sadtalker_settings_preprocess import LipsyncPageRequestSadtalkerSettingsPreprocess from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel from .lipsync_page_status_response import LipsyncPageStatusResponse +from .lipsync_request_functions_item import LipsyncRequestFunctionsItem +from .lipsync_request_functions_item_trigger import LipsyncRequestFunctionsItemTrigger +from .lipsync_request_sadtalker_settings import LipsyncRequestSadtalkerSettings +from .lipsync_request_sadtalker_settings_preprocess import LipsyncRequestSadtalkerSettingsPreprocess +from .lipsync_request_selected_model import LipsyncRequestSelectedModel from .lipsync_tts_page_output import LipsyncTtsPageOutput from .lipsync_tts_page_request import LipsyncTtsPageRequest from .lipsync_tts_page_request_functions_item import LipsyncTtsPageRequestFunctionsItem @@ -273,6 +296,10 @@ from .serp_search_location import SerpSearchLocation from .serp_search_type import SerpSearchType from .smart_gpt_page_output import SmartGptPageOutput +from .smart_gpt_page_request_functions_item import SmartGptPageRequestFunctionsItem +from .smart_gpt_page_request_functions_item_trigger import SmartGptPageRequestFunctionsItemTrigger +from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType +from .smart_gpt_page_request_selected_model import SmartGptPageRequestSelectedModel from .smart_gpt_page_status_response import SmartGptPageStatusResponse from .social_lookup_email_page_output import SocialLookupEmailPageOutput from .social_lookup_email_page_request_functions_item import SocialLookupEmailPageRequestFunctionsItem @@ -367,6 +394,13 @@ "BalanceResponse", "BotBroadcastFilters", "BulkEvalPageOutput", + "BulkEvalPageRequestAggFunctionsItem", + "BulkEvalPageRequestAggFunctionsItemFunction", + "BulkEvalPageRequestEvalPromptsItem", + "BulkEvalPageRequestFunctionsItem", + "BulkEvalPageRequestFunctionsItemTrigger", + "BulkEvalPageRequestResponseFormatType", + "BulkEvalPageRequestSelectedModel", "BulkEvalPageStatusResponse", "BulkRunRequestFunctionsItem", "BulkRunRequestFunctionsItemTrigger", @@ -412,6 +446,17 @@ "ConversationEntryContentItem_Text", "ConversationEntryRole", "ConversationStart", + "CreateStreamRequest", + "CreateStreamRequestAsrModel", + "CreateStreamRequestCitationStyle", + "CreateStreamRequestEmbeddingModel", + "CreateStreamRequestLipsyncModel", + "CreateStreamRequestOpenaiTtsModel", + "CreateStreamRequestOpenaiVoiceName", + "CreateStreamRequestResponseFormatType", + "CreateStreamRequestSelectedModel", + "CreateStreamRequestTranslationModel", + "CreateStreamRequestTtsProvider", "CreateStreamResponse", "DeforumSdPageOutput", "DeforumSdPageRequestAnimationPromptsItem", @@ -514,6 +559,11 @@ "LipsyncPageRequestSadtalkerSettingsPreprocess", "LipsyncPageRequestSelectedModel", "LipsyncPageStatusResponse", + "LipsyncRequestFunctionsItem", + "LipsyncRequestFunctionsItemTrigger", + "LipsyncRequestSadtalkerSettings", + "LipsyncRequestSadtalkerSettingsPreprocess", + "LipsyncRequestSelectedModel", "LipsyncTtsPageOutput", "LipsyncTtsPageRequest", "LipsyncTtsPageRequestFunctionsItem", @@ -612,6 +662,10 @@ "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", + "SmartGptPageRequestFunctionsItem", + "SmartGptPageRequestFunctionsItemTrigger", + "SmartGptPageRequestResponseFormatType", + "SmartGptPageRequestSelectedModel", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", "SocialLookupEmailPageRequestFunctionsItem", diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item.py b/src/gooey/types/bulk_eval_page_request_agg_functions_item.py similarity index 85% rename from src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item.py rename to src/gooey/types/bulk_eval_page_request_agg_functions_item.py index 391a7ae..b7f5cd7 100644 --- a/src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item.py +++ b/src/gooey/types/bulk_eval_page_request_agg_functions_item.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import UniversalBaseModel import typing from .bulk_eval_page_request_agg_functions_item_function import BulkEvalPageRequestAggFunctionsItemFunction -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item_function.py b/src/gooey/types/bulk_eval_page_request_agg_functions_item_function.py similarity index 100% rename from src/gooey/evaluator/types/bulk_eval_page_request_agg_functions_item_function.py rename to src/gooey/types/bulk_eval_page_request_agg_functions_item_function.py diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_eval_prompts_item.py b/src/gooey/types/bulk_eval_page_request_eval_prompts_item.py similarity index 81% rename from src/gooey/evaluator/types/bulk_eval_page_request_eval_prompts_item.py rename to src/gooey/types/bulk_eval_page_request_eval_prompts_item.py index 8bbc6b0..7d3956d 100644 --- a/src/gooey/evaluator/types/bulk_eval_page_request_eval_prompts_item.py +++ b/src/gooey/types/bulk_eval_page_request_eval_prompts_item.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.pydantic_utilities import UniversalBaseModel -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing import pydantic diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_functions_item.py b/src/gooey/types/bulk_eval_page_request_functions_item.py similarity index 86% rename from src/gooey/evaluator/types/bulk_eval_page_request_functions_item.py rename to src/gooey/types/bulk_eval_page_request_functions_item.py index 26ac5c6..b89037c 100644 --- a/src/gooey/evaluator/types/bulk_eval_page_request_functions_item.py +++ b/src/gooey/types/bulk_eval_page_request_functions_item.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import UniversalBaseModel from .bulk_eval_page_request_functions_item_trigger import BulkEvalPageRequestFunctionsItemTrigger import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_functions_item_trigger.py b/src/gooey/types/bulk_eval_page_request_functions_item_trigger.py similarity index 100% rename from src/gooey/evaluator/types/bulk_eval_page_request_functions_item_trigger.py rename to src/gooey/types/bulk_eval_page_request_functions_item_trigger.py diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py b/src/gooey/types/bulk_eval_page_request_response_format_type.py similarity index 100% rename from src/gooey/evaluator/types/bulk_eval_page_request_response_format_type.py rename to src/gooey/types/bulk_eval_page_request_response_format_type.py diff --git a/src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py b/src/gooey/types/bulk_eval_page_request_selected_model.py similarity index 100% rename from src/gooey/evaluator/types/bulk_eval_page_request_selected_model.py rename to src/gooey/types/bulk_eval_page_request_selected_model.py diff --git a/src/gooey/types/create_stream_request.py b/src/gooey/types/create_stream_request.py new file mode 100644 index 0000000..70b1625 --- /dev/null +++ b/src/gooey/types/create_stream_request.py @@ -0,0 +1,175 @@ +# This file was auto-generated by Fern from our API Definition. + +from ..core.pydantic_utilities import UniversalBaseModel +import pydantic +import typing +from .button_pressed import ButtonPressed +from .recipe_function import RecipeFunction +from .conversation_entry import ConversationEntry +from .create_stream_request_selected_model import CreateStreamRequestSelectedModel +from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel +from .create_stream_request_citation_style import CreateStreamRequestCitationStyle +from .create_stream_request_asr_model import CreateStreamRequestAsrModel +from .create_stream_request_translation_model import CreateStreamRequestTranslationModel +from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel +from .llm_tools import LlmTools +from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType +from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider +from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName +from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel +from .sad_talker_settings import SadTalkerSettings +from ..core.pydantic_utilities import IS_PYDANTIC_V2 + + +class CreateStreamRequest(UniversalBaseModel): + integration_id: str = pydantic.Field() + """ + Your Integration ID as shown in the Copilot Integrations tab + """ + + conversation_id: typing.Optional[str] = pydantic.Field(default=None) + """ + The gooey conversation ID. + + If not provided, a new conversation will be started and a new ID will be returned in the response. Use this to maintain the state of the conversation between requests. + + Note that you may not provide a custom ID here, and must only use the `conversation_id` returned in a previous response. + """ + + user_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Your app's custom user ID. + + If not provided, a random user will be created and a new ID will be returned in the response. If a `conversation_id` is provided, this field is automatically set to the user's id associated with that conversation. + """ + + user_message_id: typing.Optional[str] = pydantic.Field(default=None) + """ + Your app's custom message ID for the user message. + + If not provided, a random ID will be generated and returned in the response. This is useful for tracking messages in the conversation. + """ + + button_pressed: typing.Optional[ButtonPressed] = pydantic.Field(default=None) + """ + The button that was pressed by the user. + """ + + functions: typing.Optional[typing.List[RecipeFunction]] = None + variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None) + """ + Variables to be used as Jinja prompt templates and in functions as arguments + """ + + input_prompt: typing.Optional[str] = None + input_audio: typing.Optional[str] = None + input_images: typing.Optional[typing.List[str]] = None + input_documents: typing.Optional[typing.List[str]] = None + doc_extract_url: typing.Optional[str] = pydantic.Field(default=None) + """ + Select a workflow to extract text from documents and images. + """ + + messages: typing.Optional[typing.List[ConversationEntry]] = None + bot_script: typing.Optional[str] = None + selected_model: typing.Optional[CreateStreamRequestSelectedModel] = None + document_model: typing.Optional[str] = pydantic.Field(default=None) + """ + When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api)) + """ + + task_instructions: typing.Optional[str] = None + query_instructions: typing.Optional[str] = None + keyword_instructions: typing.Optional[str] = None + documents: typing.Optional[typing.List[str]] = None + max_references: typing.Optional[int] = None + max_context_words: typing.Optional[int] = None + scroll_jump: typing.Optional[int] = None + embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = None + dense_weight: typing.Optional[float] = pydantic.Field(default=None) + """ + Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. + Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. + """ + + citation_style: typing.Optional[CreateStreamRequestCitationStyle] = None + use_url_shortener: typing.Optional[bool] = None + asr_model: typing.Optional[CreateStreamRequestAsrModel] = pydantic.Field(default=None) + """ + Choose a model to transcribe incoming audio messages to text. + """ + + asr_language: typing.Optional[str] = pydantic.Field(default=None) + """ + Choose a language to transcribe incoming audio messages to text. + """ + + translation_model: typing.Optional[CreateStreamRequestTranslationModel] = None + user_language: typing.Optional[str] = pydantic.Field(default=None) + """ + Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. + """ + + input_glossary_document: typing.Optional[str] = pydantic.Field(default=None) + """ + Translation Glossary for User Langauge -> LLM Language (English) + """ + + output_glossary_document: typing.Optional[str] = pydantic.Field(default=None) + """ + Translation Glossary for LLM Language (English) -> User Langauge + """ + + lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = None + tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None) + """ + Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). + """ + + avoid_repetition: typing.Optional[bool] = None + num_outputs: typing.Optional[int] = None + quality: typing.Optional[float] = None + max_tokens: typing.Optional[int] = None + sampling_temperature: typing.Optional[float] = None + response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = None + tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = None + uberduck_voice_name: typing.Optional[str] = None + uberduck_speaking_rate: typing.Optional[float] = None + google_voice_name: typing.Optional[str] = None + google_speaking_rate: typing.Optional[float] = None + google_pitch: typing.Optional[float] = None + bark_history_prompt: typing.Optional[str] = None + elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None) + """ + Use `elevenlabs_voice_id` instead + """ + + elevenlabs_api_key: typing.Optional[str] = None + elevenlabs_voice_id: typing.Optional[str] = None + elevenlabs_model: typing.Optional[str] = None + elevenlabs_stability: typing.Optional[float] = None + elevenlabs_similarity_boost: typing.Optional[float] = None + elevenlabs_style: typing.Optional[float] = None + elevenlabs_speaker_boost: typing.Optional[bool] = None + azure_voice_name: typing.Optional[str] = None + openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = None + openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = None + input_face: typing.Optional[str] = None + face_padding_top: typing.Optional[int] = None + face_padding_bottom: typing.Optional[int] = None + face_padding_left: typing.Optional[int] = None + face_padding_right: typing.Optional[int] = None + sadtalker_settings: typing.Optional[SadTalkerSettings] = None + input_text: typing.Optional[str] = pydantic.Field(default=None) + """ + Use `input_prompt` instead + """ + + if IS_PYDANTIC_V2: + model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2 + else: + + class Config: + frozen = True + smart_union = True + extra = pydantic.Extra.allow diff --git a/src/gooey/copilot_integrations/types/create_stream_request_asr_model.py b/src/gooey/types/create_stream_request_asr_model.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_asr_model.py rename to src/gooey/types/create_stream_request_asr_model.py diff --git a/src/gooey/copilot_integrations/types/create_stream_request_citation_style.py b/src/gooey/types/create_stream_request_citation_style.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_citation_style.py rename to src/gooey/types/create_stream_request_citation_style.py diff --git a/src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py b/src/gooey/types/create_stream_request_embedding_model.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_embedding_model.py rename to src/gooey/types/create_stream_request_embedding_model.py diff --git a/src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py b/src/gooey/types/create_stream_request_lipsync_model.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_lipsync_model.py rename to src/gooey/types/create_stream_request_lipsync_model.py diff --git a/src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py b/src/gooey/types/create_stream_request_openai_tts_model.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_openai_tts_model.py rename to src/gooey/types/create_stream_request_openai_tts_model.py diff --git a/src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py b/src/gooey/types/create_stream_request_openai_voice_name.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_openai_voice_name.py rename to src/gooey/types/create_stream_request_openai_voice_name.py diff --git a/src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py b/src/gooey/types/create_stream_request_response_format_type.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_response_format_type.py rename to src/gooey/types/create_stream_request_response_format_type.py diff --git a/src/gooey/copilot_integrations/types/create_stream_request_selected_model.py b/src/gooey/types/create_stream_request_selected_model.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_selected_model.py rename to src/gooey/types/create_stream_request_selected_model.py diff --git a/src/gooey/copilot_integrations/types/create_stream_request_translation_model.py b/src/gooey/types/create_stream_request_translation_model.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_translation_model.py rename to src/gooey/types/create_stream_request_translation_model.py diff --git a/src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py b/src/gooey/types/create_stream_request_tts_provider.py similarity index 100% rename from src/gooey/copilot_integrations/types/create_stream_request_tts_provider.py rename to src/gooey/types/create_stream_request_tts_provider.py diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_functions_item.py b/src/gooey/types/lipsync_request_functions_item.py similarity index 59% rename from src/gooey/lip_syncing/types/async_lipsync_request_functions_item.py rename to src/gooey/types/lipsync_request_functions_item.py index 6046d25..0ac74de 100644 --- a/src/gooey/lip_syncing/types/async_lipsync_request_functions_item.py +++ b/src/gooey/types/lipsync_request_functions_item.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.pydantic_utilities import UniversalBaseModel -from .async_lipsync_request_functions_item_trigger import AsyncLipsyncRequestFunctionsItemTrigger +from ..core.pydantic_utilities import UniversalBaseModel +from .lipsync_request_functions_item_trigger import LipsyncRequestFunctionsItemTrigger import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing -class AsyncLipsyncRequestFunctionsItem(UniversalBaseModel): +class LipsyncRequestFunctionsItem(UniversalBaseModel): url: str - trigger: AsyncLipsyncRequestFunctionsItemTrigger = pydantic.Field() + trigger: LipsyncRequestFunctionsItemTrigger = pydantic.Field() """ When to run this function. `pre` runs before the recipe, `post` runs after the recipe. """ diff --git a/src/gooey/types/lipsync_request_functions_item_trigger.py b/src/gooey/types/lipsync_request_functions_item_trigger.py new file mode 100644 index 0000000..ee6b518 --- /dev/null +++ b/src/gooey/types/lipsync_request_functions_item_trigger.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings.py b/src/gooey/types/lipsync_request_sadtalker_settings.py similarity index 78% rename from src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings.py rename to src/gooey/types/lipsync_request_sadtalker_settings.py index 90c64e2..affdb1e 100644 --- a/src/gooey/copilot_for_your_enterprise/types/async_video_bots_request_sadtalker_settings.py +++ b/src/gooey/types/lipsync_request_sadtalker_settings.py @@ -1,15 +1,15 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import UniversalBaseModel import typing -from .async_video_bots_request_sadtalker_settings_preprocess import AsyncVideoBotsRequestSadtalkerSettingsPreprocess +from .lipsync_request_sadtalker_settings_preprocess import LipsyncRequestSadtalkerSettingsPreprocess import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 -class AsyncVideoBotsRequestSadtalkerSettings(UniversalBaseModel): +class LipsyncRequestSadtalkerSettings(UniversalBaseModel): still: typing.Optional[bool] = None - preprocess: typing.Optional[AsyncVideoBotsRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) + preprocess: typing.Optional[LipsyncRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None) """ SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping. """ diff --git a/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings_preprocess.py b/src/gooey/types/lipsync_request_sadtalker_settings_preprocess.py similarity index 71% rename from src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings_preprocess.py rename to src/gooey/types/lipsync_request_sadtalker_settings_preprocess.py index 21e1a4a..42be9c8 100644 --- a/src/gooey/lip_syncing/types/async_lipsync_request_sadtalker_settings_preprocess.py +++ b/src/gooey/types/lipsync_request_sadtalker_settings_preprocess.py @@ -2,6 +2,6 @@ import typing -AsyncLipsyncRequestSadtalkerSettingsPreprocess = typing.Union[ +LipsyncRequestSadtalkerSettingsPreprocess = typing.Union[ typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any ] diff --git a/src/gooey/types/lipsync_request_selected_model.py b/src/gooey/types/lipsync_request_selected_model.py new file mode 100644 index 0000000..c5614b4 --- /dev/null +++ b/src/gooey/types/lipsync_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item.py b/src/gooey/types/smart_gpt_page_request_functions_item.py similarity index 86% rename from src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item.py rename to src/gooey/types/smart_gpt_page_request_functions_item.py index 03c5e72..edb2c83 100644 --- a/src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item.py +++ b/src/gooey/types/smart_gpt_page_request_functions_item.py @@ -1,9 +1,9 @@ # This file was auto-generated by Fern from our API Definition. -from ...core.pydantic_utilities import UniversalBaseModel +from ..core.pydantic_utilities import UniversalBaseModel from .smart_gpt_page_request_functions_item_trigger import SmartGptPageRequestFunctionsItemTrigger import pydantic -from ...core.pydantic_utilities import IS_PYDANTIC_V2 +from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item_trigger.py b/src/gooey/types/smart_gpt_page_request_functions_item_trigger.py similarity index 100% rename from src/gooey/smart_gpt/types/smart_gpt_page_request_functions_item_trigger.py rename to src/gooey/types/smart_gpt_page_request_functions_item_trigger.py diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py b/src/gooey/types/smart_gpt_page_request_response_format_type.py similarity index 100% rename from src/gooey/smart_gpt/types/smart_gpt_page_request_response_format_type.py rename to src/gooey/types/smart_gpt_page_request_response_format_type.py diff --git a/src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py b/src/gooey/types/smart_gpt_page_request_selected_model.py similarity index 100% rename from src/gooey/smart_gpt/types/smart_gpt_page_request_selected_model.py rename to src/gooey/types/smart_gpt_page_request_selected_model.py