From ee79e22c0d5c06bfc7b9ccbf078553e893b0d659 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Thu, 12 Sep 2024 14:38:38 +0000
Subject: [PATCH] Release 0.0.1-beta19
---
pyproject.toml | 2 +-
reference.md | 248 ++-
src/gooey/__init__.py | 122 +-
src/gooey/client.py | 1365 ++++++++---------
src/gooey/copilot/__init__.py | 44 +-
src/gooey/copilot/client.py | 399 +++--
src/gooey/copilot/types/__init__.py | 46 +-
.../copilot_completion_request_asr_model.py | 23 -
...pilot_completion_request_citation_style.py | 25 -
...ilot_completion_request_embedding_model.py | 18 -
...pilot_completion_request_functions_item.py | 24 -
...mpletion_request_functions_item_trigger.py | 5 -
...opilot_completion_request_lipsync_model.py | 5 -
...lot_completion_request_openai_tts_model.py | 5 -
...ot_completion_request_openai_voice_name.py | 7 -
...completion_request_response_format_type.py | 5 -
...t_completion_request_sadtalker_settings.py | 42 -
...n_request_sadtalker_settings_preprocess.py | 7 -
...ot_completion_request_translation_model.py | 5 -
...copilot_completion_request_tts_provider.py | 7 -
.../video_bots_page_request_asr_model.py | 0
.../video_bots_page_request_citation_style.py | 0
...video_bots_page_request_embedding_model.py | 0
.../video_bots_page_request_lipsync_model.py | 0
...ideo_bots_page_request_openai_tts_model.py | 0
...deo_bots_page_request_openai_voice_name.py | 0
..._bots_page_request_response_format_type.py | 0
...deo_bots_page_request_translation_model.py | 0
.../video_bots_page_request_tts_provider.py | 0
src/gooey/core/client_wrapper.py | 2 +-
src/gooey/types/__init__.py | 102 --
src/gooey/types/asr_page_request.py | 43 -
src/gooey/types/bulk_runner_page_request.py | 55 -
.../types/compare_upscaler_page_request.py | 37 -
src/gooey/types/doc_extract_page_request.py | 43 -
src/gooey/types/doc_summary_page_request.py | 43 -
...oc_summary_request_response_format_type.py | 5 -
.../doc_summary_request_selected_asr_model.py | 23 -
.../types/face_inpainting_page_request.py | 42 -
.../types/image_segmentation_page_request.py | 36 -
src/gooey/types/img2img_page_request.py | 43 -
src/gooey/types/lipsync_page_request.py | 37 -
.../types/lipsync_request_selected_model.py | 5 -
src/gooey/types/lipsync_tts_page_request.py | 62 -
.../lipsync_tts_request_openai_tts_model.py | 5 -
.../lipsync_tts_request_openai_voice_name.py | 7 -
.../lipsync_tts_request_selected_model.py | 5 -
.../types/lipsync_tts_request_tts_provider.py | 7 -
.../types/object_inpainting_page_request.py | 43 -
.../types/portrait_request_selected_model.py | 5 -
.../product_image_request_selected_model.py | 5 -
.../types/qr_code_generator_page_request.py | 66 -
...est_image_prompt_controlnet_models_item.py | 20 -
src/gooey/types/qr_code_request_scheduler.py | 23 -
..._request_selected_controlnet_model_item.py | 20 -
.../types/qr_code_request_selected_model.py | 22 -
src/gooey/types/recipe_function.py | 8 +-
...image_request_selected_controlnet_model.py | 19 -
..._request_selected_controlnet_model_item.py | 20 -
.../remix_image_request_selected_model.py | 21 -
...emove_background_request_selected_model.py | 5 -
src/gooey/types/sad_talker_settings.py | 12 +-
...peech_recognition_request_output_format.py | 5 -
...eech_recognition_request_selected_model.py | 23 -
...h_recognition_request_translation_model.py | 5 -
...esize_data_request_response_format_type.py | 5 -
...thesize_data_request_selected_asr_model.py | 23 -
.../types/translate_request_selected_model.py | 5 -
src/gooey/types/translation_page_request.py | 33 -
.../upscale_request_selected_models_item.py | 7 -
src/gooey/types/video_bots_page_request.py | 131 --
.../video_bots_page_request_functions_item.py | 24 -
...ots_page_request_functions_item_trigger.py | 5 -
...eo_bots_page_request_sadtalker_settings.py | 40 -
...e_request_sadtalker_settings_preprocess.py | 7 -
75 files changed, 1043 insertions(+), 2565 deletions(-)
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_asr_model.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_citation_style.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_embedding_model.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_functions_item.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_lipsync_model.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_response_format_type.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_translation_model.py
delete mode 100644 src/gooey/copilot/types/copilot_completion_request_tts_provider.py
rename src/gooey/{ => copilot}/types/video_bots_page_request_asr_model.py (100%)
rename src/gooey/{ => copilot}/types/video_bots_page_request_citation_style.py (100%)
rename src/gooey/{ => copilot}/types/video_bots_page_request_embedding_model.py (100%)
rename src/gooey/{ => copilot}/types/video_bots_page_request_lipsync_model.py (100%)
rename src/gooey/{ => copilot}/types/video_bots_page_request_openai_tts_model.py (100%)
rename src/gooey/{ => copilot}/types/video_bots_page_request_openai_voice_name.py (100%)
rename src/gooey/{ => copilot}/types/video_bots_page_request_response_format_type.py (100%)
rename src/gooey/{ => copilot}/types/video_bots_page_request_translation_model.py (100%)
rename src/gooey/{ => copilot}/types/video_bots_page_request_tts_provider.py (100%)
delete mode 100644 src/gooey/types/asr_page_request.py
delete mode 100644 src/gooey/types/bulk_runner_page_request.py
delete mode 100644 src/gooey/types/compare_upscaler_page_request.py
delete mode 100644 src/gooey/types/doc_extract_page_request.py
delete mode 100644 src/gooey/types/doc_summary_page_request.py
delete mode 100644 src/gooey/types/doc_summary_request_response_format_type.py
delete mode 100644 src/gooey/types/doc_summary_request_selected_asr_model.py
delete mode 100644 src/gooey/types/face_inpainting_page_request.py
delete mode 100644 src/gooey/types/image_segmentation_page_request.py
delete mode 100644 src/gooey/types/img2img_page_request.py
delete mode 100644 src/gooey/types/lipsync_page_request.py
delete mode 100644 src/gooey/types/lipsync_request_selected_model.py
delete mode 100644 src/gooey/types/lipsync_tts_page_request.py
delete mode 100644 src/gooey/types/lipsync_tts_request_openai_tts_model.py
delete mode 100644 src/gooey/types/lipsync_tts_request_openai_voice_name.py
delete mode 100644 src/gooey/types/lipsync_tts_request_selected_model.py
delete mode 100644 src/gooey/types/lipsync_tts_request_tts_provider.py
delete mode 100644 src/gooey/types/object_inpainting_page_request.py
delete mode 100644 src/gooey/types/portrait_request_selected_model.py
delete mode 100644 src/gooey/types/product_image_request_selected_model.py
delete mode 100644 src/gooey/types/qr_code_generator_page_request.py
delete mode 100644 src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
delete mode 100644 src/gooey/types/qr_code_request_scheduler.py
delete mode 100644 src/gooey/types/qr_code_request_selected_controlnet_model_item.py
delete mode 100644 src/gooey/types/qr_code_request_selected_model.py
delete mode 100644 src/gooey/types/remix_image_request_selected_controlnet_model.py
delete mode 100644 src/gooey/types/remix_image_request_selected_controlnet_model_item.py
delete mode 100644 src/gooey/types/remix_image_request_selected_model.py
delete mode 100644 src/gooey/types/remove_background_request_selected_model.py
delete mode 100644 src/gooey/types/speech_recognition_request_output_format.py
delete mode 100644 src/gooey/types/speech_recognition_request_selected_model.py
delete mode 100644 src/gooey/types/speech_recognition_request_translation_model.py
delete mode 100644 src/gooey/types/synthesize_data_request_response_format_type.py
delete mode 100644 src/gooey/types/synthesize_data_request_selected_asr_model.py
delete mode 100644 src/gooey/types/translate_request_selected_model.py
delete mode 100644 src/gooey/types/translation_page_request.py
delete mode 100644 src/gooey/types/upscale_request_selected_models_item.py
delete mode 100644 src/gooey/types/video_bots_page_request.py
delete mode 100644 src/gooey/types/video_bots_page_request_functions_item.py
delete mode 100644 src/gooey/types/video_bots_page_request_functions_item_trigger.py
delete mode 100644 src/gooey/types/video_bots_page_request_sadtalker_settings.py
delete mode 100644 src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py
diff --git a/pyproject.toml b/pyproject.toml
index 9ae1dd9..7863965 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "gooeyai"
-version = "0.0.1-beta18"
+version = "0.0.1-beta19"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index a067ff0..7222382 100644
--- a/reference.md
+++ b/reference.md
@@ -232,7 +232,7 @@ client.qr_code(
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -256,9 +256,7 @@ client.qr_code(
-
-**qr_code_input_image:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**qr_code_input_image:** `typing.Optional[str]`
@@ -274,9 +272,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**qr_code_file:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**qr_code_file:** `typing.Optional[str]`
@@ -308,7 +304,9 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**image_prompt_controlnet_models:** `typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]`
+**image_prompt_controlnet_models:** `typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+]`
@@ -348,7 +346,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[QrCodeRequestSelectedModel]`
+**selected_model:** `typing.Optional[QrCodeGeneratorPageRequestSelectedModel]`
@@ -356,7 +354,9 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_controlnet_model:** `typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]`
+**selected_controlnet_model:** `typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+]`
@@ -388,7 +388,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]`
+**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
@@ -412,7 +412,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**scheduler:** `typing.Optional[QrCodeRequestScheduler]`
+**scheduler:** `typing.Optional[QrCodeGeneratorPageRequestScheduler]`
@@ -1378,6 +1378,7 @@ client = Gooey(
api_key="YOUR_API_KEY",
)
client.bulk_run(
+ documents=["documents"],
run_urls=["run_urls"],
input_columns={"key": "value"},
output_columns={"key": "value"},
@@ -1397,9 +1398,13 @@ client.bulk_run(
-
-**documents:** `from __future__ import annotations
+**documents:** `typing.Sequence[str]`
+
-typing.List[core.File]` — See core.File for more documentation
+Upload or link to a CSV or google sheet that contains your sample input data.
+For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+Remember to includes header names in your CSV too.
+
@@ -1407,7 +1412,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**run_urls:** `typing.List[str]`
+**run_urls:** `typing.Sequence[str]`
Provide one or more Gooey.AI workflow runs.
@@ -1452,7 +1457,7 @@ For each output field in the Gooey.AI workflow, specify the column name that you
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1468,7 +1473,7 @@ For each output field in the Gooey.AI workflow, specify the column name that you
-
-**eval_urls:** `typing.Optional[typing.List[str]]`
+**eval_urls:** `typing.Optional[typing.Sequence[str]]`
_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
@@ -1693,7 +1698,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.synthesize_data()
+client.synthesize_data(
+ documents=["documents"],
+)
```
@@ -1709,9 +1716,7 @@ client.synthesize_data()
-
-**documents:** `from __future__ import annotations
-
-typing.List[core.File]` — See core.File for more documentation
+**documents:** `typing.Sequence[str]`
@@ -1727,7 +1732,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -1743,9 +1748,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**sheet_url:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**sheet_url:** `typing.Optional[str]`
@@ -1753,7 +1756,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_asr_model:** `typing.Optional[SynthesizeDataRequestSelectedAsrModel]`
+**selected_asr_model:** `typing.Optional[DocExtractPageRequestSelectedAsrModel]`
@@ -1769,9 +1772,10 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**glossary_document:** `from __future__ import annotations
+**glossary_document:** `typing.Optional[str]`
-typing.Optional[core.File]` — See core.File for more documentation
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
@@ -1835,7 +1839,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**response_format_type:** `typing.Optional[SynthesizeDataRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[DocExtractPageRequestResponseFormatType]`
@@ -2428,7 +2432,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.doc_summary()
+client.doc_summary(
+ documents=["documents"],
+)
```
@@ -2444,9 +2450,7 @@ client.doc_summary()
-
-**documents:** `from __future__ import annotations
-
-typing.List[core.File]` — See core.File for more documentation
+**documents:** `typing.Sequence[str]`
@@ -2462,7 +2466,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2510,7 +2514,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]`
+**selected_asr_model:** `typing.Optional[DocSummaryPageRequestSelectedAsrModel]`
@@ -2566,7 +2570,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[DocSummaryPageRequestResponseFormatType]`
@@ -2714,7 +2718,7 @@ client.lipsync()
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2730,9 +2734,7 @@ client.lipsync()
-
-**input_face:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**input_face:** `typing.Optional[str]`
@@ -2780,7 +2782,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[LipsyncRequestSelectedModel]`
+**selected_model:** `typing.Optional[LipsyncPageRequestSelectedModel]`
@@ -2788,9 +2790,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**input_audio:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**input_audio:** `typing.Optional[str]`
@@ -2870,7 +2870,7 @@ client.lipsync_tts(
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -2886,7 +2886,7 @@ client.lipsync_tts(
-
-**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]`
+**tts_provider:** `typing.Optional[LipsyncTtsPageRequestTtsProvider]`
@@ -3014,7 +3014,7 @@ client.lipsync_tts(
-
-**openai_voice_name:** `typing.Optional[LipsyncTtsRequestOpenaiVoiceName]`
+**openai_voice_name:** `typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]`
@@ -3022,7 +3022,7 @@ client.lipsync_tts(
-
-**openai_tts_model:** `typing.Optional[LipsyncTtsRequestOpenaiTtsModel]`
+**openai_tts_model:** `typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]`
@@ -3030,9 +3030,7 @@ client.lipsync_tts(
-
-**input_face:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**input_face:** `typing.Optional[str]`
@@ -3080,7 +3078,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]`
+**selected_model:** `typing.Optional[LipsyncTtsPageRequestSelectedModel]`
@@ -3358,7 +3356,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.speech_recognition()
+client.speech_recognition(
+ documents=["documents"],
+)
```
@@ -3374,9 +3374,7 @@ client.speech_recognition()
-
-**documents:** `from __future__ import annotations
-
-typing.List[core.File]` — See core.File for more documentation
+**documents:** `typing.Sequence[str]`
@@ -3392,7 +3390,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3408,7 +3406,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]`
+**selected_model:** `typing.Optional[AsrPageRequestSelectedModel]`
@@ -3424,7 +3422,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]`
+**translation_model:** `typing.Optional[AsrPageRequestTranslationModel]`
@@ -3432,7 +3430,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]`
+**output_format:** `typing.Optional[AsrPageRequestOutputFormat]`
@@ -3464,9 +3462,10 @@ typing.List[core.File]` — See core.File for more documentation
-
-**glossary_document:** `from __future__ import annotations
+**glossary_document:** `typing.Optional[str]`
-typing.Optional[core.File]` — See core.File for more documentation
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
@@ -3688,7 +3687,7 @@ client.translate()
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3704,7 +3703,7 @@ client.translate()
-
-**texts:** `typing.Optional[typing.List[str]]`
+**texts:** `typing.Optional[typing.Sequence[str]]`
@@ -3712,7 +3711,7 @@ client.translate()
-
-**selected_model:** `typing.Optional[TranslateRequestSelectedModel]`
+**selected_model:** `typing.Optional[TranslationPageRequestSelectedModel]`
@@ -3736,9 +3735,10 @@ client.translate()
-
-**glossary_document:** `from __future__ import annotations
+**glossary_document:** `typing.Optional[str]`
-typing.Optional[core.File]` — See core.File for more documentation
+Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
@@ -3784,7 +3784,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.remix_image()
+client.remix_image(
+ input_image="input_image",
+)
```
@@ -3800,9 +3802,7 @@ client.remix_image()
-
-**input_image:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
+**input_image:** `str`
@@ -3818,7 +3818,7 @@ core.File` — See core.File for more documentation
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -3842,7 +3842,7 @@ core.File` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]`
+**selected_model:** `typing.Optional[Img2ImgPageRequestSelectedModel]`
@@ -3850,7 +3850,7 @@ core.File` — See core.File for more documentation
-
-**selected_controlnet_model:** `typing.Optional[RemixImageRequestSelectedControlnetModel]`
+**selected_controlnet_model:** `typing.Optional[Img2ImgPageRequestSelectedControlnetModel]`
@@ -3914,7 +3914,7 @@ core.File` — See core.File for more documentation
-
-**controlnet_conditioning_scale:** `typing.Optional[typing.List[float]]`
+**controlnet_conditioning_scale:** `typing.Optional[typing.Sequence[float]]`
@@ -4177,6 +4177,7 @@ client = Gooey(
api_key="YOUR_API_KEY",
)
client.product_image(
+ input_image="input_image",
text_prompt="text_prompt",
)
@@ -4194,9 +4195,7 @@ client.product_image(
-
-**input_image:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
+**input_image:** `str`
@@ -4220,7 +4219,7 @@ core.File` — See core.File for more documentation
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4268,7 +4267,7 @@ core.File` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]`
+**selected_model:** `typing.Optional[ObjectInpaintingPageRequestSelectedModel]`
@@ -4379,7 +4378,8 @@ client = Gooey(
api_key="YOUR_API_KEY",
)
client.portrait(
- text_prompt="text_prompt",
+ input_image="input_image",
+ text_prompt="tony stark from the iron man",
)
```
@@ -4396,9 +4396,7 @@ client.portrait(
-
-**input_image:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
+**input_image:** `str`
@@ -4422,7 +4420,7 @@ core.File` — See core.File for more documentation
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -4462,7 +4460,7 @@ core.File` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[PortraitRequestSelectedModel]`
+**selected_model:** `typing.Optional[FaceInpaintingPageRequestSelectedModel]`
@@ -5022,7 +5020,9 @@ from gooey import Gooey
client = Gooey(
api_key="YOUR_API_KEY",
)
-client.remove_background()
+client.remove_background(
+ input_image="input_image",
+)
```
@@ -5038,9 +5038,7 @@ client.remove_background()
-
-**input_image:** `from __future__ import annotations
-
-core.File` — See core.File for more documentation
+**input_image:** `str`
@@ -5056,7 +5054,7 @@ core.File` — See core.File for more documentation
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -5072,7 +5070,7 @@ core.File` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]`
+**selected_model:** `typing.Optional[ImageSegmentationPageRequestSelectedModel]`
@@ -5200,7 +5198,7 @@ client.upscale(
-
-**functions:** `typing.Optional[typing.List[RecipeFunction]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -5216,9 +5214,7 @@ client.upscale(
-
-**input_image:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**input_image:** `typing.Optional[str]` — Input Image
@@ -5226,9 +5222,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**input_video:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**input_video:** `typing.Optional[str]` — Input Video
@@ -5236,7 +5230,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]`
+**selected_models:** `typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]`
@@ -5726,7 +5720,7 @@ client.copilot.completion()
-
-**functions:** `typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]]`
+**functions:** `typing.Optional[typing.Sequence[RecipeFunction]]`
@@ -5758,9 +5752,7 @@ client.copilot.completion()
-
-**input_images:** `from __future__ import annotations
-
-typing.Optional[typing.List[core.File]]` — See core.File for more documentation
+**input_images:** `typing.Optional[typing.Sequence[str]]`
@@ -5768,9 +5760,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-
-**input_documents:** `from __future__ import annotations
-
-typing.Optional[typing.List[core.File]]` — See core.File for more documentation
+**input_documents:** `typing.Optional[typing.Sequence[str]]`
@@ -5786,7 +5776,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-
-**messages:** `typing.Optional[typing.List[ConversationEntry]]`
+**messages:** `typing.Optional[typing.Sequence[ConversationEntry]]`
@@ -5842,9 +5832,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-
-**documents:** `from __future__ import annotations
-
-typing.Optional[typing.List[core.File]]` — See core.File for more documentation
+**documents:** `typing.Optional[typing.Sequence[str]]`
@@ -5876,7 +5864,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-
-**embedding_model:** `typing.Optional[CopilotCompletionRequestEmbeddingModel]`
+**embedding_model:** `typing.Optional[VideoBotsPageRequestEmbeddingModel]`
@@ -5897,7 +5885,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**citation_style:** `typing.Optional[CopilotCompletionRequestCitationStyle]`
+**citation_style:** `typing.Optional[VideoBotsPageRequestCitationStyle]`
@@ -5913,7 +5901,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**asr_model:** `typing.Optional[CopilotCompletionRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+**asr_model:** `typing.Optional[VideoBotsPageRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
@@ -5929,7 +5917,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**translation_model:** `typing.Optional[CopilotCompletionRequestTranslationModel]`
+**translation_model:** `typing.Optional[VideoBotsPageRequestTranslationModel]`
@@ -5945,9 +5933,11 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**input_glossary_document:** `from __future__ import annotations
+**input_glossary_document:** `typing.Optional[str]`
+
-typing.Optional[core.File]` — See core.File for more documentation
+Translation Glossary for User Langauge -> LLM Language (English)
+
@@ -5955,9 +5945,11 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**output_glossary_document:** `from __future__ import annotations
+**output_glossary_document:** `typing.Optional[str]`
+
-typing.Optional[core.File]` — See core.File for more documentation
+Translation Glossary for LLM Language (English) -> User Langauge
+
@@ -5965,7 +5957,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**lipsync_model:** `typing.Optional[CopilotCompletionRequestLipsyncModel]`
+**lipsync_model:** `typing.Optional[VideoBotsPageRequestLipsyncModel]`
@@ -5973,7 +5965,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**tools:** `typing.Optional[typing.List[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
+**tools:** `typing.Optional[typing.Sequence[LlmTools]]` — Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
@@ -6021,7 +6013,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**response_format_type:** `typing.Optional[CopilotCompletionRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[VideoBotsPageRequestResponseFormatType]`
@@ -6029,7 +6021,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**tts_provider:** `typing.Optional[CopilotCompletionRequestTtsProvider]`
+**tts_provider:** `typing.Optional[VideoBotsPageRequestTtsProvider]`
@@ -6157,7 +6149,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**openai_voice_name:** `typing.Optional[CopilotCompletionRequestOpenaiVoiceName]`
+**openai_voice_name:** `typing.Optional[VideoBotsPageRequestOpenaiVoiceName]`
@@ -6165,7 +6157,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**openai_tts_model:** `typing.Optional[CopilotCompletionRequestOpenaiTtsModel]`
+**openai_tts_model:** `typing.Optional[VideoBotsPageRequestOpenaiTtsModel]`
@@ -6173,9 +6165,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**input_face:** `from __future__ import annotations
-
-typing.Optional[core.File]` — See core.File for more documentation
+**input_face:** `typing.Optional[str]`
@@ -6215,7 +6205,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**sadtalker_settings:** `typing.Optional[CopilotCompletionRequestSadtalkerSettings]`
+**sadtalker_settings:** `typing.Optional[SadTalkerSettings]`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py
index 86305e9..494a2e7 100644
--- a/src/gooey/__init__.py
+++ b/src/gooey/__init__.py
@@ -10,7 +10,6 @@
AsrOutputJson,
AsrPageOutput,
AsrPageOutputOutputTextItem,
- AsrPageRequest,
AsrPageRequestOutputFormat,
AsrPageRequestSelectedModel,
AsrPageRequestTranslationModel,
@@ -23,7 +22,6 @@
BulkEvalPageRequestResponseFormatType,
BulkEvalPageStatusResponse,
BulkRunnerPageOutput,
- BulkRunnerPageRequest,
BulkRunnerPageStatusResponse,
ButtonPressed,
CalledFunctionResponse,
@@ -41,7 +39,6 @@
CompareText2ImgPageRequestSelectedModelsItem,
CompareText2ImgPageStatusResponse,
CompareUpscalerPageOutput,
- CompareUpscalerPageRequest,
CompareUpscalerPageRequestSelectedModelsItem,
CompareUpscalerPageStatusResponse,
ConsoleLogs,
@@ -68,7 +65,6 @@
DeforumSdPageRequestSelectedModel,
DeforumSdPageStatusResponse,
DocExtractPageOutput,
- DocExtractPageRequest,
DocExtractPageRequestResponseFormatType,
DocExtractPageRequestSelectedAsrModel,
DocExtractPageStatusResponse,
@@ -79,12 +75,9 @@
DocSearchPageRequestResponseFormatType,
DocSearchPageStatusResponse,
DocSummaryPageOutput,
- DocSummaryPageRequest,
DocSummaryPageRequestResponseFormatType,
DocSummaryPageRequestSelectedAsrModel,
DocSummaryPageStatusResponse,
- DocSummaryRequestResponseFormatType,
- DocSummaryRequestSelectedAsrModel,
EmailFaceInpaintingPageOutput,
EmailFaceInpaintingPageRequestSelectedModel,
EmailFaceInpaintingPageStatusResponse,
@@ -93,7 +86,6 @@
EmbeddingsPageStatusResponse,
EvalPrompt,
FaceInpaintingPageOutput,
- FaceInpaintingPageRequest,
FaceInpaintingPageRequestSelectedModel,
FaceInpaintingPageStatusResponse,
FinalResponse,
@@ -110,13 +102,11 @@
GoogleImageGenPageStatusResponse,
HttpValidationError,
ImageSegmentationPageOutput,
- ImageSegmentationPageRequest,
ImageSegmentationPageRequestSelectedModel,
ImageSegmentationPageStatusResponse,
ImageUrl,
ImageUrlDetail,
Img2ImgPageOutput,
- Img2ImgPageRequest,
Img2ImgPageRequestSelectedControlnetModel,
Img2ImgPageRequestSelectedControlnetModelItem,
Img2ImgPageRequestSelectedModel,
@@ -126,42 +116,27 @@
LetterWriterPageRequest,
LetterWriterPageStatusResponse,
LipsyncPageOutput,
- LipsyncPageRequest,
LipsyncPageRequestSelectedModel,
LipsyncPageStatusResponse,
- LipsyncRequestSelectedModel,
LipsyncTtsPageOutput,
- LipsyncTtsPageRequest,
LipsyncTtsPageRequestOpenaiTtsModel,
LipsyncTtsPageRequestOpenaiVoiceName,
LipsyncTtsPageRequestSelectedModel,
LipsyncTtsPageRequestTtsProvider,
LipsyncTtsPageStatusResponse,
- LipsyncTtsRequestOpenaiTtsModel,
- LipsyncTtsRequestOpenaiVoiceName,
- LipsyncTtsRequestSelectedModel,
- LipsyncTtsRequestTtsProvider,
LlmTools,
MessagePart,
ObjectInpaintingPageOutput,
- ObjectInpaintingPageRequest,
ObjectInpaintingPageRequestSelectedModel,
ObjectInpaintingPageStatusResponse,
- PortraitRequestSelectedModel,
- ProductImageRequestSelectedModel,
PromptTreeNode,
PromptTreeNodePrompt,
QrCodeGeneratorPageOutput,
- QrCodeGeneratorPageRequest,
QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
QrCodeGeneratorPageRequestScheduler,
QrCodeGeneratorPageRequestSelectedControlnetModelItem,
QrCodeGeneratorPageRequestSelectedModel,
QrCodeGeneratorPageStatusResponse,
- QrCodeRequestImagePromptControlnetModelsItem,
- QrCodeRequestScheduler,
- QrCodeRequestSelectedControlnetModelItem,
- QrCodeRequestSelectedModel,
RecipeFunction,
RecipeFunctionTrigger,
RecipeRunState,
@@ -177,10 +152,6 @@
RelatedQnAPageRequestEmbeddingModel,
RelatedQnAPageRequestResponseFormatType,
RelatedQnAPageStatusResponse,
- RemixImageRequestSelectedControlnetModel,
- RemixImageRequestSelectedControlnetModelItem,
- RemixImageRequestSelectedModel,
- RemoveBackgroundRequestSelectedModel,
ReplyButton,
ResponseModel,
ResponseModelFinalKeywordQuery,
@@ -202,12 +173,7 @@
SocialLookupEmailPageOutput,
SocialLookupEmailPageRequestResponseFormatType,
SocialLookupEmailPageStatusResponse,
- SpeechRecognitionRequestOutputFormat,
- SpeechRecognitionRequestSelectedModel,
- SpeechRecognitionRequestTranslationModel,
StreamError,
- SynthesizeDataRequestResponseFormatType,
- SynthesizeDataRequestSelectedAsrModel,
Text2AudioPageOutput,
Text2AudioPageStatusResponse,
TextToSpeechPageOutput,
@@ -216,51 +182,30 @@
TextToSpeechPageRequestTtsProvider,
TextToSpeechPageStatusResponse,
TrainingDataModel,
- TranslateRequestSelectedModel,
TranslationPageOutput,
- TranslationPageRequest,
TranslationPageRequestSelectedModel,
TranslationPageStatusResponse,
- UpscaleRequestSelectedModelsItem,
ValidationError,
ValidationErrorLocItem,
Vcard,
VideoBotsPageOutput,
VideoBotsPageOutputFinalKeywordQuery,
VideoBotsPageOutputFinalPrompt,
- VideoBotsPageRequest,
+ VideoBotsPageStatusResponse,
+)
+from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError
+from . import copilot
+from .client import AsyncGooey, Gooey
+from .copilot import (
VideoBotsPageRequestAsrModel,
VideoBotsPageRequestCitationStyle,
VideoBotsPageRequestEmbeddingModel,
- VideoBotsPageRequestFunctionsItem,
- VideoBotsPageRequestFunctionsItemTrigger,
VideoBotsPageRequestLipsyncModel,
VideoBotsPageRequestOpenaiTtsModel,
VideoBotsPageRequestOpenaiVoiceName,
VideoBotsPageRequestResponseFormatType,
- VideoBotsPageRequestSadtalkerSettings,
- VideoBotsPageRequestSadtalkerSettingsPreprocess,
VideoBotsPageRequestTranslationModel,
VideoBotsPageRequestTtsProvider,
- VideoBotsPageStatusResponse,
-)
-from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError
-from . import copilot
-from .client import AsyncGooey, Gooey
-from .copilot import (
- CopilotCompletionRequestAsrModel,
- CopilotCompletionRequestCitationStyle,
- CopilotCompletionRequestEmbeddingModel,
- CopilotCompletionRequestFunctionsItem,
- CopilotCompletionRequestFunctionsItemTrigger,
- CopilotCompletionRequestLipsyncModel,
- CopilotCompletionRequestOpenaiTtsModel,
- CopilotCompletionRequestOpenaiVoiceName,
- CopilotCompletionRequestResponseFormatType,
- CopilotCompletionRequestSadtalkerSettings,
- CopilotCompletionRequestSadtalkerSettingsPreprocess,
- CopilotCompletionRequestTranslationModel,
- CopilotCompletionRequestTtsProvider,
)
from .environment import GooeyEnvironment
from .version import __version__
@@ -275,7 +220,6 @@
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
- "AsrPageRequest",
"AsrPageRequestOutputFormat",
"AsrPageRequestSelectedModel",
"AsrPageRequestTranslationModel",
@@ -289,7 +233,6 @@
"BulkEvalPageRequestResponseFormatType",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
- "BulkRunnerPageRequest",
"BulkRunnerPageStatusResponse",
"ButtonPressed",
"CalledFunctionResponse",
@@ -307,7 +250,6 @@
"CompareText2ImgPageRequestSelectedModelsItem",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
- "CompareUpscalerPageRequest",
"CompareUpscalerPageRequestSelectedModelsItem",
"CompareUpscalerPageStatusResponse",
"ConsoleLogs",
@@ -319,19 +261,6 @@
"ConversationEntryContentItem_Text",
"ConversationEntryRole",
"ConversationStart",
- "CopilotCompletionRequestAsrModel",
- "CopilotCompletionRequestCitationStyle",
- "CopilotCompletionRequestEmbeddingModel",
- "CopilotCompletionRequestFunctionsItem",
- "CopilotCompletionRequestFunctionsItemTrigger",
- "CopilotCompletionRequestLipsyncModel",
- "CopilotCompletionRequestOpenaiTtsModel",
- "CopilotCompletionRequestOpenaiVoiceName",
- "CopilotCompletionRequestResponseFormatType",
- "CopilotCompletionRequestSadtalkerSettings",
- "CopilotCompletionRequestSadtalkerSettingsPreprocess",
- "CopilotCompletionRequestTranslationModel",
- "CopilotCompletionRequestTtsProvider",
"CreateStreamRequest",
"CreateStreamRequestAsrModel",
"CreateStreamRequestCitationStyle",
@@ -347,7 +276,6 @@
"DeforumSdPageRequestSelectedModel",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
- "DocExtractPageRequest",
"DocExtractPageRequestResponseFormatType",
"DocExtractPageRequestSelectedAsrModel",
"DocExtractPageStatusResponse",
@@ -358,12 +286,9 @@
"DocSearchPageRequestResponseFormatType",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
- "DocSummaryPageRequest",
"DocSummaryPageRequestResponseFormatType",
"DocSummaryPageRequestSelectedAsrModel",
"DocSummaryPageStatusResponse",
- "DocSummaryRequestResponseFormatType",
- "DocSummaryRequestSelectedAsrModel",
"EmailFaceInpaintingPageOutput",
"EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageStatusResponse",
@@ -372,7 +297,6 @@
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
- "FaceInpaintingPageRequest",
"FaceInpaintingPageRequestSelectedModel",
"FaceInpaintingPageStatusResponse",
"FinalResponse",
@@ -391,13 +315,11 @@
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
"ImageSegmentationPageOutput",
- "ImageSegmentationPageRequest",
"ImageSegmentationPageRequestSelectedModel",
"ImageSegmentationPageStatusResponse",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
- "Img2ImgPageRequest",
"Img2ImgPageRequestSelectedControlnetModel",
"Img2ImgPageRequestSelectedControlnetModelItem",
"Img2ImgPageRequestSelectedModel",
@@ -407,43 +329,28 @@
"LetterWriterPageRequest",
"LetterWriterPageStatusResponse",
"LipsyncPageOutput",
- "LipsyncPageRequest",
"LipsyncPageRequestSelectedModel",
"LipsyncPageStatusResponse",
- "LipsyncRequestSelectedModel",
"LipsyncTtsPageOutput",
- "LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
"LipsyncTtsPageRequestSelectedModel",
"LipsyncTtsPageRequestTtsProvider",
"LipsyncTtsPageStatusResponse",
- "LipsyncTtsRequestOpenaiTtsModel",
- "LipsyncTtsRequestOpenaiVoiceName",
- "LipsyncTtsRequestSelectedModel",
- "LipsyncTtsRequestTtsProvider",
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
- "ObjectInpaintingPageRequest",
"ObjectInpaintingPageRequestSelectedModel",
"ObjectInpaintingPageStatusResponse",
"PaymentRequiredError",
- "PortraitRequestSelectedModel",
- "ProductImageRequestSelectedModel",
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
- "QrCodeGeneratorPageRequest",
"QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
"QrCodeGeneratorPageRequestScheduler",
"QrCodeGeneratorPageRequestSelectedControlnetModelItem",
"QrCodeGeneratorPageRequestSelectedModel",
"QrCodeGeneratorPageStatusResponse",
- "QrCodeRequestImagePromptControlnetModelsItem",
- "QrCodeRequestScheduler",
- "QrCodeRequestSelectedControlnetModelItem",
- "QrCodeRequestSelectedModel",
"RecipeFunction",
"RecipeFunctionTrigger",
"RecipeRunState",
@@ -459,10 +366,6 @@
"RelatedQnAPageRequestEmbeddingModel",
"RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageStatusResponse",
- "RemixImageRequestSelectedControlnetModel",
- "RemixImageRequestSelectedControlnetModelItem",
- "RemixImageRequestSelectedModel",
- "RemoveBackgroundRequestSelectedModel",
"ReplyButton",
"ResponseModel",
"ResponseModelFinalKeywordQuery",
@@ -484,12 +387,7 @@
"SocialLookupEmailPageOutput",
"SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageStatusResponse",
- "SpeechRecognitionRequestOutputFormat",
- "SpeechRecognitionRequestSelectedModel",
- "SpeechRecognitionRequestTranslationModel",
"StreamError",
- "SynthesizeDataRequestResponseFormatType",
- "SynthesizeDataRequestSelectedAsrModel",
"Text2AudioPageOutput",
"Text2AudioPageStatusResponse",
"TextToSpeechPageOutput",
@@ -499,31 +397,23 @@
"TextToSpeechPageStatusResponse",
"TooManyRequestsError",
"TrainingDataModel",
- "TranslateRequestSelectedModel",
"TranslationPageOutput",
- "TranslationPageRequest",
"TranslationPageRequestSelectedModel",
"TranslationPageStatusResponse",
"UnprocessableEntityError",
- "UpscaleRequestSelectedModelsItem",
"ValidationError",
"ValidationErrorLocItem",
"Vcard",
"VideoBotsPageOutput",
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
- "VideoBotsPageRequest",
"VideoBotsPageRequestAsrModel",
"VideoBotsPageRequestCitationStyle",
"VideoBotsPageRequestEmbeddingModel",
- "VideoBotsPageRequestFunctionsItem",
- "VideoBotsPageRequestFunctionsItemTrigger",
"VideoBotsPageRequestLipsyncModel",
"VideoBotsPageRequestOpenaiTtsModel",
"VideoBotsPageRequestOpenaiVoiceName",
"VideoBotsPageRequestResponseFormatType",
- "VideoBotsPageRequestSadtalkerSettings",
- "VideoBotsPageRequestSadtalkerSettingsPreprocess",
"VideoBotsPageRequestTranslationModel",
"VideoBotsPageRequestTtsProvider",
"VideoBotsPageStatusResponse",
diff --git a/src/gooey/client.py b/src/gooey/client.py
index 6767f27..beba2f0 100644
--- a/src/gooey/client.py
+++ b/src/gooey/client.py
@@ -20,12 +20,15 @@
from .types.http_validation_error import HttpValidationError
from .errors.too_many_requests_error import TooManyRequestsError
from json.decoder import JSONDecodeError
-from . import core
from .types.vcard import Vcard
-from .types.qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem
-from .types.qr_code_request_selected_model import QrCodeRequestSelectedModel
-from .types.qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem
-from .types.qr_code_request_scheduler import QrCodeRequestScheduler
+from .types.qr_code_generator_page_request_image_prompt_controlnet_models_item import (
+ QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
+)
+from .types.qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
+from .types.qr_code_generator_page_request_selected_controlnet_model_item import (
+ QrCodeGeneratorPageRequestSelectedControlnetModelItem,
+)
+from .types.qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
from .types.qr_code_generator_page_output import QrCodeGeneratorPageOutput
from .types.large_language_models import LargeLanguageModels
from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
@@ -45,8 +48,8 @@
from .types.agg_function import AggFunction
from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
from .types.bulk_eval_page_output import BulkEvalPageOutput
-from .types.synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel
-from .types.synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
+from .types.doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
+from .types.doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
from .types.doc_extract_page_output import DocExtractPageOutput
from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
from .types.compare_llm_page_output import CompareLlmPageOutput
@@ -57,46 +60,46 @@
from .types.doc_search_page_output import DocSearchPageOutput
from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
from .types.smart_gpt_page_output import SmartGptPageOutput
-from .types.doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel
-from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
+from .types.doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
+from .types.doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
from .types.doc_summary_page_output import DocSummaryPageOutput
from .types.functions_page_output import FunctionsPageOutput
from .types.sad_talker_settings import SadTalkerSettings
-from .types.lipsync_request_selected_model import LipsyncRequestSelectedModel
+from .types.lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
from .types.lipsync_page_output import LipsyncPageOutput
-from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
-from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
-from .types.lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
-from .types.lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel
+from .types.lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
+from .types.lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
+from .types.lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
+from .types.lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
from .types.lipsync_tts_page_output import LipsyncTtsPageOutput
from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
from .types.text_to_speech_page_output import TextToSpeechPageOutput
-from .types.speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel
-from .types.speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
-from .types.speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
+from .types.asr_page_request_selected_model import AsrPageRequestSelectedModel
+from .types.asr_page_request_translation_model import AsrPageRequestTranslationModel
+from .types.asr_page_request_output_format import AsrPageRequestOutputFormat
from .types.asr_page_output import AsrPageOutput
from .types.text2audio_page_output import Text2AudioPageOutput
-from .types.translate_request_selected_model import TranslateRequestSelectedModel
+from .types.translation_page_request_selected_model import TranslationPageRequestSelectedModel
from .types.translation_page_output import TranslationPageOutput
-from .types.remix_image_request_selected_model import RemixImageRequestSelectedModel
-from .types.remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel
+from .types.img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
+from .types.img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
from .types.img2img_page_output import Img2ImgPageOutput
from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler
from .types.compare_text2img_page_output import CompareText2ImgPageOutput
-from .types.product_image_request_selected_model import ProductImageRequestSelectedModel
+from .types.object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
from .types.object_inpainting_page_output import ObjectInpaintingPageOutput
-from .types.portrait_request_selected_model import PortraitRequestSelectedModel
+from .types.face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
from .types.face_inpainting_page_output import FaceInpaintingPageOutput
from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
from .types.email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
from .types.google_image_gen_page_output import GoogleImageGenPageOutput
-from .types.remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel
+from .types.image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
from .types.image_segmentation_page_output import ImageSegmentationPageOutput
-from .types.upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem
+from .types.compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
from .types.compare_upscaler_page_output import CompareUpscalerPageOutput
from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
from .types.embeddings_page_output import EmbeddingsPageOutput
@@ -331,36 +334,38 @@ def qr_code(
*,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- qr_code_data: typing.Optional[str] = None,
- qr_code_input_image: typing.Optional[core.File] = None,
- qr_code_vcard: typing.Optional[Vcard] = None,
- qr_code_file: typing.Optional[core.File] = None,
- use_url_shortener: typing.Optional[bool] = None,
- negative_prompt: typing.Optional[str] = None,
- image_prompt: typing.Optional[str] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ qr_code_data: typing.Optional[str] = OMIT,
+ qr_code_input_image: typing.Optional[str] = OMIT,
+ qr_code_vcard: typing.Optional[Vcard] = OMIT,
+ qr_code_file: typing.Optional[str] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ image_prompt: typing.Optional[str] = OMIT,
image_prompt_controlnet_models: typing.Optional[
- typing.List[QrCodeRequestImagePromptControlnetModelsItem]
- ] = None,
- image_prompt_strength: typing.Optional[float] = None,
- image_prompt_scale: typing.Optional[float] = None,
- image_prompt_pos_x: typing.Optional[float] = None,
- image_prompt_pos_y: typing.Optional[float] = None,
- selected_model: typing.Optional[QrCodeRequestSelectedModel] = None,
- selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None,
- output_width: typing.Optional[int] = None,
- output_height: typing.Optional[int] = None,
- guidance_scale: typing.Optional[float] = None,
- controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[int] = None,
- scheduler: typing.Optional[QrCodeRequestScheduler] = None,
- seed: typing.Optional[int] = None,
- obj_scale: typing.Optional[float] = None,
- obj_pos_x: typing.Optional[float] = None,
- obj_pos_y: typing.Optional[float] = None,
- settings: typing.Optional[RunSettings] = None,
+ typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+ ] = OMIT,
+ image_prompt_strength: typing.Optional[float] = OMIT,
+ image_prompt_scale: typing.Optional[float] = OMIT,
+ image_prompt_pos_x: typing.Optional[float] = OMIT,
+ image_prompt_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+ ] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> QrCodeGeneratorPageOutput:
"""
@@ -370,20 +375,18 @@ def qr_code(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
qr_code_data : typing.Optional[str]
- qr_code_input_image : typing.Optional[core.File]
- See core.File for more documentation
+ qr_code_input_image : typing.Optional[str]
qr_code_vcard : typing.Optional[Vcard]
- qr_code_file : typing.Optional[core.File]
- See core.File for more documentation
+ qr_code_file : typing.Optional[str]
use_url_shortener : typing.Optional[bool]
@@ -391,7 +394,7 @@ def qr_code(
image_prompt : typing.Optional[str]
- image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]
+ image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
image_prompt_strength : typing.Optional[float]
@@ -401,9 +404,9 @@ def qr_code(
image_prompt_pos_y : typing.Optional[float]
- selected_model : typing.Optional[QrCodeRequestSelectedModel]
+ selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
- selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]
+ selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
output_width : typing.Optional[int]
@@ -411,13 +414,13 @@ def qr_code(
guidance_scale : typing.Optional[float]
- controlnet_conditioning_scale : typing.Optional[typing.List[float]]
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
num_outputs : typing.Optional[int]
quality : typing.Optional[int]
- scheduler : typing.Optional[QrCodeRequestScheduler]
+ scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler]
seed : typing.Optional[int]
@@ -454,11 +457,13 @@ def qr_code(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
"qr_code_data": qr_code_data,
+ "qr_code_input_image": qr_code_input_image,
"qr_code_vcard": qr_code_vcard,
+ "qr_code_file": qr_code_file,
"use_url_shortener": use_url_shortener,
"text_prompt": text_prompt,
"negative_prompt": negative_prompt,
@@ -483,10 +488,6 @@ def qr_code(
"obj_pos_y": obj_pos_y,
"settings": settings,
},
- files={
- "qr_code_input_image": qr_code_input_image,
- "qr_code_file": qr_code_file,
- },
request_options=request_options,
omit=OMIT,
)
@@ -1223,24 +1224,28 @@ def personalize_email(
def bulk_run(
self,
*,
- documents: typing.List[core.File],
- run_urls: typing.List[str],
+ documents: typing.Sequence[str],
+ run_urls: typing.Sequence[str],
input_columns: typing.Dict[str, str],
output_columns: typing.Dict[str, str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- eval_urls: typing.Optional[typing.List[str]] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> BulkRunnerPageOutput:
"""
Parameters
----------
- documents : typing.List[core.File]
- See core.File for more documentation
+ documents : typing.Sequence[str]
+
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
- run_urls : typing.List[str]
+
+ run_urls : typing.Sequence[str]
Provide one or more Gooey.AI workflow runs.
You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
@@ -1258,12 +1263,12 @@ def bulk_run(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- eval_urls : typing.Optional[typing.List[str]]
+ eval_urls : typing.Optional[typing.Sequence[str]]
_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
@@ -1286,6 +1291,7 @@ def bulk_run(
api_key="YOUR_API_KEY",
)
client.bulk_run(
+ documents=["documents"],
run_urls=["run_urls"],
input_columns={"key": "value"},
output_columns={"key": "value"},
@@ -1297,18 +1303,16 @@ def bulk_run(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "documents": documents,
"run_urls": run_urls,
"input_columns": input_columns,
"output_columns": output_columns,
"eval_urls": eval_urls,
"settings": settings,
},
- files={
- "documents": documents,
- },
request_options=request_options,
omit=OMIT,
)
@@ -1509,47 +1513,46 @@ def eval(
def synthesize_data(
self,
*,
- documents: typing.List[core.File],
+ documents: typing.Sequence[str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- sheet_url: typing.Optional[core.File] = None,
- selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None,
- google_translate_target: typing.Optional[str] = None,
- glossary_document: typing.Optional[core.File] = None,
- task_instructions: typing.Optional[str] = None,
- selected_model: typing.Optional[LargeLanguageModels] = None,
- avoid_repetition: typing.Optional[bool] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[float] = None,
- max_tokens: typing.Optional[int] = None,
- sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ sheet_url: typing.Optional[str] = OMIT,
+ selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[LargeLanguageModels] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> DocExtractPageOutput:
"""
Parameters
----------
- documents : typing.List[core.File]
- See core.File for more documentation
+ documents : typing.Sequence[str]
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- sheet_url : typing.Optional[core.File]
- See core.File for more documentation
+ sheet_url : typing.Optional[str]
- selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
google_translate_target : typing.Optional[str]
- glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
task_instructions : typing.Optional[str]
@@ -1565,7 +1568,7 @@ def synthesize_data(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType]
+ response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -1584,7 +1587,9 @@ def synthesize_data(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.synthesize_data()
+ client.synthesize_data(
+ documents=["documents"],
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/doc-extract/async",
@@ -1592,11 +1597,14 @@ def synthesize_data(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "documents": documents,
+ "sheet_url": sheet_url,
"selected_asr_model": selected_asr_model,
"google_translate_target": google_translate_target,
+ "glossary_document": glossary_document,
"task_instructions": task_instructions,
"selected_model": selected_model,
"avoid_repetition": avoid_repetition,
@@ -1607,11 +1615,6 @@ def synthesize_data(
"response_format_type": response_format_type,
"settings": settings,
},
- files={
- "documents": documents,
- "sheet_url": sheet_url,
- "glossary_document": glossary_document,
- },
request_options=request_options,
omit=OMIT,
)
@@ -2110,34 +2113,33 @@ def smart_gpt(
def doc_summary(
self,
*,
- documents: typing.List[core.File],
+ documents: typing.Sequence[str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- task_instructions: typing.Optional[str] = None,
- merge_instructions: typing.Optional[str] = None,
- selected_model: typing.Optional[LargeLanguageModels] = None,
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
- selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None,
- google_translate_target: typing.Optional[str] = None,
- avoid_repetition: typing.Optional[bool] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[float] = None,
- max_tokens: typing.Optional[int] = None,
- sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ merge_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[LargeLanguageModels] = OMIT,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
+ selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> DocSummaryPageOutput:
"""
Parameters
----------
- documents : typing.List[core.File]
- See core.File for more documentation
+ documents : typing.Sequence[str]
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -2150,7 +2152,7 @@ def doc_summary(
chain_type : typing.Optional[typing.Literal["map_reduce"]]
- selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
google_translate_target : typing.Optional[str]
@@ -2164,7 +2166,7 @@ def doc_summary(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocSummaryRequestResponseFormatType]
+ response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -2183,7 +2185,9 @@ def doc_summary(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.doc_summary()
+ client.doc_summary(
+ documents=["documents"],
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/doc-summary/async",
@@ -2191,9 +2195,10 @@ def doc_summary(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "documents": documents,
"task_instructions": task_instructions,
"merge_instructions": merge_instructions,
"selected_model": selected_model,
@@ -2208,9 +2213,6 @@ def doc_summary(
"response_format_type": response_format_type,
"settings": settings,
},
- files={
- "documents": documents,
- },
request_options=request_options,
omit=OMIT,
)
@@ -2359,17 +2361,17 @@ def lipsync(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- input_face: typing.Optional[core.File] = None,
- face_padding_top: typing.Optional[int] = None,
- face_padding_bottom: typing.Optional[int] = None,
- face_padding_left: typing.Optional[int] = None,
- face_padding_right: typing.Optional[int] = None,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
- selected_model: typing.Optional[LipsyncRequestSelectedModel] = None,
- input_audio: typing.Optional[core.File] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncPageOutput:
"""
@@ -2377,13 +2379,12 @@ def lipsync(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- input_face : typing.Optional[core.File]
- See core.File for more documentation
+ input_face : typing.Optional[str]
face_padding_top : typing.Optional[int]
@@ -2395,10 +2396,9 @@ def lipsync(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
- input_audio : typing.Optional[core.File]
- See core.File for more documentation
+ input_audio : typing.Optional[str]
settings : typing.Optional[RunSettings]
@@ -2425,20 +2425,18 @@ def lipsync(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
"face_padding_right": face_padding_right,
"sadtalker_settings": sadtalker_settings,
"selected_model": selected_model,
- "settings": settings,
- },
- files={
- "input_face": input_face,
"input_audio": input_audio,
+ "settings": settings,
},
request_options=request_options,
omit=OMIT,
@@ -2492,34 +2490,34 @@ def lipsync_tts(
*,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None,
- uberduck_voice_name: typing.Optional[str] = None,
- uberduck_speaking_rate: typing.Optional[float] = None,
- google_voice_name: typing.Optional[str] = None,
- google_speaking_rate: typing.Optional[float] = None,
- google_pitch: typing.Optional[float] = None,
- bark_history_prompt: typing.Optional[str] = None,
- elevenlabs_voice_name: typing.Optional[str] = None,
- elevenlabs_api_key: typing.Optional[str] = None,
- elevenlabs_voice_id: typing.Optional[str] = None,
- elevenlabs_model: typing.Optional[str] = None,
- elevenlabs_stability: typing.Optional[float] = None,
- elevenlabs_similarity_boost: typing.Optional[float] = None,
- elevenlabs_style: typing.Optional[float] = None,
- elevenlabs_speaker_boost: typing.Optional[bool] = None,
- azure_voice_name: typing.Optional[str] = None,
- openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None,
- openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None,
- input_face: typing.Optional[core.File] = None,
- face_padding_top: typing.Optional[int] = None,
- face_padding_bottom: typing.Optional[int] = None,
- face_padding_left: typing.Optional[int] = None,
- face_padding_right: typing.Optional[int] = None,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
- selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncTtsPageOutput:
"""
@@ -2529,12 +2527,12 @@ def lipsync_tts(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider]
+ tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -2567,12 +2565,11 @@ def lipsync_tts(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
- input_face : typing.Optional[core.File]
- See core.File for more documentation
+ input_face : typing.Optional[str]
face_padding_top : typing.Optional[int]
@@ -2584,7 +2581,7 @@ def lipsync_tts(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncTtsRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
settings : typing.Optional[RunSettings]
@@ -2613,7 +2610,7 @@ def lipsync_tts(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
"text_prompt": text_prompt,
@@ -2635,6 +2632,7 @@ def lipsync_tts(
"azure_voice_name": azure_voice_name,
"openai_voice_name": openai_voice_name,
"openai_tts_model": openai_tts_model,
+ "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
@@ -2643,9 +2641,6 @@ def lipsync_tts(
"selected_model": selected_model,
"settings": settings,
},
- files={
- "input_face": input_face,
- },
request_options=request_options,
omit=OMIT,
)
@@ -2871,41 +2866,40 @@ def text_to_speech(
def speech_recognition(
self,
*,
- documents: typing.List[core.File],
+ documents: typing.Sequence[str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None,
- language: typing.Optional[str] = None,
- translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
- output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
- google_translate_target: typing.Optional[str] = None,
- translation_source: typing.Optional[str] = None,
- translation_target: typing.Optional[str] = None,
- glossary_document: typing.Optional[core.File] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
+ language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
+ output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsrPageOutput:
"""
Parameters
----------
- documents : typing.List[core.File]
- See core.File for more documentation
+ documents : typing.Sequence[str]
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel]
+ selected_model : typing.Optional[AsrPageRequestSelectedModel]
language : typing.Optional[str]
- translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel]
+ translation_model : typing.Optional[AsrPageRequestTranslationModel]
- output_format : typing.Optional[SpeechRecognitionRequestOutputFormat]
+ output_format : typing.Optional[AsrPageRequestOutputFormat]
google_translate_target : typing.Optional[str]
use `translation_model` & `translation_target` instead.
@@ -2914,8 +2908,9 @@ def speech_recognition(
translation_target : typing.Optional[str]
- glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
settings : typing.Optional[RunSettings]
@@ -2934,7 +2929,9 @@ def speech_recognition(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.speech_recognition()
+ client.speech_recognition(
+ documents=["documents"],
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/asr/async",
@@ -2942,9 +2939,10 @@ def speech_recognition(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "documents": documents,
"selected_model": selected_model,
"language": language,
"translation_model": translation_model,
@@ -2952,11 +2950,8 @@ def speech_recognition(
"google_translate_target": google_translate_target,
"translation_source": translation_source,
"translation_target": translation_target,
- "settings": settings,
- },
- files={
- "documents": documents,
"glossary_document": glossary_document,
+ "settings": settings,
},
request_options=request_options,
omit=OMIT,
@@ -3143,14 +3138,14 @@ def translate(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- texts: typing.Optional[typing.List[str]] = None,
- selected_model: typing.Optional[TranslateRequestSelectedModel] = None,
- translation_source: typing.Optional[str] = None,
- translation_target: typing.Optional[str] = None,
- glossary_document: typing.Optional[core.File] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ texts: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> TranslationPageOutput:
"""
@@ -3158,21 +3153,22 @@ def translate(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- texts : typing.Optional[typing.List[str]]
+ texts : typing.Optional[typing.Sequence[str]]
- selected_model : typing.Optional[TranslateRequestSelectedModel]
+ selected_model : typing.Optional[TranslationPageRequestSelectedModel]
translation_source : typing.Optional[str]
translation_target : typing.Optional[str]
- glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
settings : typing.Optional[RunSettings]
@@ -3199,17 +3195,15 @@ def translate(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
"texts": texts,
"selected_model": selected_model,
"translation_source": translation_source,
"translation_target": translation_target,
- "settings": settings,
- },
- files={
"glossary_document": glossary_document,
+ "settings": settings,
},
request_options=request_options,
omit=OMIT,
@@ -3261,44 +3255,43 @@ def translate(
def remix_image(
self,
*,
- input_image: core.File,
+ input_image: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- text_prompt: typing.Optional[str] = None,
- selected_model: typing.Optional[RemixImageRequestSelectedModel] = None,
- selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None,
- negative_prompt: typing.Optional[str] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[int] = None,
- output_width: typing.Optional[int] = None,
- output_height: typing.Optional[int] = None,
- guidance_scale: typing.Optional[float] = None,
- prompt_strength: typing.Optional[float] = None,
- controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
- seed: typing.Optional[int] = None,
- image_guidance_scale: typing.Optional[float] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ text_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> Img2ImgPageOutput:
"""
Parameters
----------
- input_image : core.File
- See core.File for more documentation
+ input_image : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
text_prompt : typing.Optional[str]
- selected_model : typing.Optional[RemixImageRequestSelectedModel]
+ selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
- selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel]
+ selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
negative_prompt : typing.Optional[str]
@@ -3314,7 +3307,7 @@ def remix_image(
prompt_strength : typing.Optional[float]
- controlnet_conditioning_scale : typing.Optional[typing.List[float]]
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
seed : typing.Optional[int]
@@ -3337,7 +3330,9 @@ def remix_image(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.remix_image()
+ client.remix_image(
+ input_image="input_image",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/Img2Img/async",
@@ -3345,9 +3340,10 @@ def remix_image(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
"text_prompt": text_prompt,
"selected_model": selected_model,
"selected_controlnet_model": selected_controlnet_model,
@@ -3363,9 +3359,6 @@ def remix_image(
"image_guidance_scale": image_guidance_scale,
"settings": settings,
},
- files={
- "input_image": input_image,
- },
request_options=request_options,
omit=OMIT,
)
@@ -3574,38 +3567,37 @@ def text_to_image(
def product_image(
self,
*,
- input_image: core.File,
+ input_image: str,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- obj_scale: typing.Optional[float] = None,
- obj_pos_x: typing.Optional[float] = None,
- obj_pos_y: typing.Optional[float] = None,
- mask_threshold: typing.Optional[float] = None,
- selected_model: typing.Optional[ProductImageRequestSelectedModel] = None,
- negative_prompt: typing.Optional[str] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[int] = None,
- output_width: typing.Optional[int] = None,
- output_height: typing.Optional[int] = None,
- guidance_scale: typing.Optional[float] = None,
- sd2upscaling: typing.Optional[bool] = None,
- seed: typing.Optional[int] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> ObjectInpaintingPageOutput:
"""
Parameters
----------
- input_image : core.File
- See core.File for more documentation
+ input_image : str
text_prompt : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3618,7 +3610,7 @@ def product_image(
mask_threshold : typing.Optional[float]
- selected_model : typing.Optional[ProductImageRequestSelectedModel]
+ selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
negative_prompt : typing.Optional[str]
@@ -3654,6 +3646,7 @@ def product_image(
api_key="YOUR_API_KEY",
)
client.product_image(
+ input_image="input_image",
text_prompt="text_prompt",
)
"""
@@ -3663,9 +3656,10 @@ def product_image(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
"text_prompt": text_prompt,
"obj_scale": obj_scale,
"obj_pos_x": obj_pos_x,
@@ -3682,9 +3676,6 @@ def product_image(
"seed": seed,
"settings": settings,
},
- files={
- "input_image": input_image,
- },
request_options=request_options,
omit=OMIT,
)
@@ -3735,37 +3726,36 @@ def product_image(
def portrait(
self,
*,
- input_image: core.File,
+ input_image: str,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- face_scale: typing.Optional[float] = None,
- face_pos_x: typing.Optional[float] = None,
- face_pos_y: typing.Optional[float] = None,
- selected_model: typing.Optional[PortraitRequestSelectedModel] = None,
- negative_prompt: typing.Optional[str] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[int] = None,
- upscale_factor: typing.Optional[float] = None,
- output_width: typing.Optional[int] = None,
- output_height: typing.Optional[int] = None,
- guidance_scale: typing.Optional[float] = None,
- seed: typing.Optional[int] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> FaceInpaintingPageOutput:
"""
Parameters
----------
- input_image : core.File
- See core.File for more documentation
+ input_image : str
text_prompt : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -3776,7 +3766,7 @@ def portrait(
face_pos_y : typing.Optional[float]
- selected_model : typing.Optional[PortraitRequestSelectedModel]
+ selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
negative_prompt : typing.Optional[str]
@@ -3812,7 +3802,8 @@ def portrait(
api_key="YOUR_API_KEY",
)
client.portrait(
- text_prompt="text_prompt",
+ input_image="input_image",
+ text_prompt="tony stark from the iron man",
)
"""
_response = self._client_wrapper.httpx_client.request(
@@ -3821,9 +3812,10 @@ def portrait(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
"text_prompt": text_prompt,
"face_scale": face_scale,
"face_pos_x": face_pos_x,
@@ -3839,9 +3831,6 @@ def portrait(
"seed": seed,
"settings": settings,
},
- files={
- "input_image": input_image,
- },
request_options=request_options,
omit=OMIT,
)
@@ -4235,34 +4224,33 @@ def image_from_web_search(
def remove_background(
self,
*,
- input_image: core.File,
+ input_image: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None,
- mask_threshold: typing.Optional[float] = None,
- rect_persepective_transform: typing.Optional[bool] = None,
- reflection_opacity: typing.Optional[float] = None,
- obj_scale: typing.Optional[float] = None,
- obj_pos_x: typing.Optional[float] = None,
- obj_pos_y: typing.Optional[float] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ rect_persepective_transform: typing.Optional[bool] = OMIT,
+ reflection_opacity: typing.Optional[float] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> ImageSegmentationPageOutput:
"""
Parameters
----------
- input_image : core.File
- See core.File for more documentation
+ input_image : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel]
+ selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
mask_threshold : typing.Optional[float]
@@ -4293,7 +4281,9 @@ def remove_background(
client = Gooey(
api_key="YOUR_API_KEY",
)
- client.remove_background()
+ client.remove_background(
+ input_image="input_image",
+ )
"""
_response = self._client_wrapper.httpx_client.request(
"v3/ImageSegmentation/async",
@@ -4301,9 +4291,10 @@ def remove_background(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
"selected_model": selected_model,
"mask_threshold": mask_threshold,
"rect_persepective_transform": rect_persepective_transform,
@@ -4313,9 +4304,6 @@ def remove_background(
"obj_pos_y": obj_pos_y,
"settings": settings,
},
- files={
- "input_image": input_image,
- },
request_options=request_options,
omit=OMIT,
)
@@ -4368,13 +4356,13 @@ def upscale(
*,
scale: int,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- input_image: typing.Optional[core.File] = None,
- input_video: typing.Optional[core.File] = None,
- selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None,
- selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_image: typing.Optional[str] = OMIT,
+ input_video: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CompareUpscalerPageOutput:
"""
@@ -4385,18 +4373,18 @@ def upscale(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- input_image : typing.Optional[core.File]
- See core.File for more documentation
+ input_image : typing.Optional[str]
+ Input Image
- input_video : typing.Optional[core.File]
- See core.File for more documentation
+ input_video : typing.Optional[str]
+ Input Video
- selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]
+ selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
@@ -4427,18 +4415,16 @@ def upscale(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
+ "input_video": input_video,
"scale": scale,
"selected_models": selected_models,
"selected_bg_model": selected_bg_model,
"settings": settings,
},
- files={
- "input_image": input_image,
- "input_video": input_video,
- },
request_options=request_options,
omit=OMIT,
)
@@ -5055,36 +5041,38 @@ async def qr_code(
*,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- qr_code_data: typing.Optional[str] = None,
- qr_code_input_image: typing.Optional[core.File] = None,
- qr_code_vcard: typing.Optional[Vcard] = None,
- qr_code_file: typing.Optional[core.File] = None,
- use_url_shortener: typing.Optional[bool] = None,
- negative_prompt: typing.Optional[str] = None,
- image_prompt: typing.Optional[str] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ qr_code_data: typing.Optional[str] = OMIT,
+ qr_code_input_image: typing.Optional[str] = OMIT,
+ qr_code_vcard: typing.Optional[Vcard] = OMIT,
+ qr_code_file: typing.Optional[str] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ image_prompt: typing.Optional[str] = OMIT,
image_prompt_controlnet_models: typing.Optional[
- typing.List[QrCodeRequestImagePromptControlnetModelsItem]
- ] = None,
- image_prompt_strength: typing.Optional[float] = None,
- image_prompt_scale: typing.Optional[float] = None,
- image_prompt_pos_x: typing.Optional[float] = None,
- image_prompt_pos_y: typing.Optional[float] = None,
- selected_model: typing.Optional[QrCodeRequestSelectedModel] = None,
- selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None,
- output_width: typing.Optional[int] = None,
- output_height: typing.Optional[int] = None,
- guidance_scale: typing.Optional[float] = None,
- controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[int] = None,
- scheduler: typing.Optional[QrCodeRequestScheduler] = None,
- seed: typing.Optional[int] = None,
- obj_scale: typing.Optional[float] = None,
- obj_pos_x: typing.Optional[float] = None,
- obj_pos_y: typing.Optional[float] = None,
- settings: typing.Optional[RunSettings] = None,
+ typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
+ ] = OMIT,
+ image_prompt_strength: typing.Optional[float] = OMIT,
+ image_prompt_scale: typing.Optional[float] = OMIT,
+ image_prompt_pos_x: typing.Optional[float] = OMIT,
+ image_prompt_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[
+ typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]
+ ] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> QrCodeGeneratorPageOutput:
"""
@@ -5094,20 +5082,18 @@ async def qr_code(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
qr_code_data : typing.Optional[str]
- qr_code_input_image : typing.Optional[core.File]
- See core.File for more documentation
+ qr_code_input_image : typing.Optional[str]
qr_code_vcard : typing.Optional[Vcard]
- qr_code_file : typing.Optional[core.File]
- See core.File for more documentation
+ qr_code_file : typing.Optional[str]
use_url_shortener : typing.Optional[bool]
@@ -5115,7 +5101,7 @@ async def qr_code(
image_prompt : typing.Optional[str]
- image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]
+ image_prompt_controlnet_models : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]]
image_prompt_strength : typing.Optional[float]
@@ -5125,9 +5111,9 @@ async def qr_code(
image_prompt_pos_y : typing.Optional[float]
- selected_model : typing.Optional[QrCodeRequestSelectedModel]
+ selected_model : typing.Optional[QrCodeGeneratorPageRequestSelectedModel]
- selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]
+ selected_controlnet_model : typing.Optional[typing.Sequence[QrCodeGeneratorPageRequestSelectedControlnetModelItem]]
output_width : typing.Optional[int]
@@ -5135,13 +5121,13 @@ async def qr_code(
guidance_scale : typing.Optional[float]
- controlnet_conditioning_scale : typing.Optional[typing.List[float]]
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
num_outputs : typing.Optional[int]
quality : typing.Optional[int]
- scheduler : typing.Optional[QrCodeRequestScheduler]
+ scheduler : typing.Optional[QrCodeGeneratorPageRequestScheduler]
seed : typing.Optional[int]
@@ -5186,11 +5172,13 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
"qr_code_data": qr_code_data,
+ "qr_code_input_image": qr_code_input_image,
"qr_code_vcard": qr_code_vcard,
+ "qr_code_file": qr_code_file,
"use_url_shortener": use_url_shortener,
"text_prompt": text_prompt,
"negative_prompt": negative_prompt,
@@ -5215,10 +5203,6 @@ async def main() -> None:
"obj_pos_y": obj_pos_y,
"settings": settings,
},
- files={
- "qr_code_input_image": qr_code_input_image,
- "qr_code_file": qr_code_file,
- },
request_options=request_options,
omit=OMIT,
)
@@ -5987,24 +5971,28 @@ async def main() -> None:
async def bulk_run(
self,
*,
- documents: typing.List[core.File],
- run_urls: typing.List[str],
+ documents: typing.Sequence[str],
+ run_urls: typing.Sequence[str],
input_columns: typing.Dict[str, str],
output_columns: typing.Dict[str, str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- eval_urls: typing.Optional[typing.List[str]] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ eval_urls: typing.Optional[typing.Sequence[str]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> BulkRunnerPageOutput:
"""
Parameters
----------
- documents : typing.List[core.File]
- See core.File for more documentation
+ documents : typing.Sequence[str]
+
+ Upload or link to a CSV or google sheet that contains your sample input data.
+ For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
+ Remember to includes header names in your CSV too.
- run_urls : typing.List[str]
+
+ run_urls : typing.Sequence[str]
Provide one or more Gooey.AI workflow runs.
You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
@@ -6022,12 +6010,12 @@ async def bulk_run(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- eval_urls : typing.Optional[typing.List[str]]
+ eval_urls : typing.Optional[typing.Sequence[str]]
_(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
@@ -6055,6 +6043,7 @@ async def bulk_run(
async def main() -> None:
await client.bulk_run(
+ documents=["documents"],
run_urls=["run_urls"],
input_columns={"key": "value"},
output_columns={"key": "value"},
@@ -6069,18 +6058,16 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "documents": documents,
"run_urls": run_urls,
"input_columns": input_columns,
"output_columns": output_columns,
"eval_urls": eval_urls,
"settings": settings,
},
- files={
- "documents": documents,
- },
request_options=request_options,
omit=OMIT,
)
@@ -6289,47 +6276,46 @@ async def main() -> None:
async def synthesize_data(
self,
*,
- documents: typing.List[core.File],
+ documents: typing.Sequence[str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- sheet_url: typing.Optional[core.File] = None,
- selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None,
- google_translate_target: typing.Optional[str] = None,
- glossary_document: typing.Optional[core.File] = None,
- task_instructions: typing.Optional[str] = None,
- selected_model: typing.Optional[LargeLanguageModels] = None,
- avoid_repetition: typing.Optional[bool] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[float] = None,
- max_tokens: typing.Optional[int] = None,
- sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ sheet_url: typing.Optional[str] = OMIT,
+ selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[LargeLanguageModels] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> DocExtractPageOutput:
"""
Parameters
----------
- documents : typing.List[core.File]
- See core.File for more documentation
+ documents : typing.Sequence[str]
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- sheet_url : typing.Optional[core.File]
- See core.File for more documentation
+ sheet_url : typing.Optional[str]
- selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[DocExtractPageRequestSelectedAsrModel]
google_translate_target : typing.Optional[str]
- glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
task_instructions : typing.Optional[str]
@@ -6345,7 +6331,7 @@ async def synthesize_data(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType]
+ response_format_type : typing.Optional[DocExtractPageRequestResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -6369,7 +6355,9 @@ async def synthesize_data(
async def main() -> None:
- await client.synthesize_data()
+ await client.synthesize_data(
+ documents=["documents"],
+ )
asyncio.run(main())
@@ -6380,11 +6368,14 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "documents": documents,
+ "sheet_url": sheet_url,
"selected_asr_model": selected_asr_model,
"google_translate_target": google_translate_target,
+ "glossary_document": glossary_document,
"task_instructions": task_instructions,
"selected_model": selected_model,
"avoid_repetition": avoid_repetition,
@@ -6395,11 +6386,6 @@ async def main() -> None:
"response_format_type": response_format_type,
"settings": settings,
},
- files={
- "documents": documents,
- "sheet_url": sheet_url,
- "glossary_document": glossary_document,
- },
request_options=request_options,
omit=OMIT,
)
@@ -6922,34 +6908,33 @@ async def main() -> None:
async def doc_summary(
self,
*,
- documents: typing.List[core.File],
+ documents: typing.Sequence[str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- task_instructions: typing.Optional[str] = None,
- merge_instructions: typing.Optional[str] = None,
- selected_model: typing.Optional[LargeLanguageModels] = None,
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
- selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None,
- google_translate_target: typing.Optional[str] = None,
- avoid_repetition: typing.Optional[bool] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[float] = None,
- max_tokens: typing.Optional[int] = None,
- sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ merge_instructions: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[LargeLanguageModels] = OMIT,
+ chain_type: typing.Optional[typing.Literal["map_reduce"]] = OMIT,
+ selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> DocSummaryPageOutput:
"""
Parameters
----------
- documents : typing.List[core.File]
- See core.File for more documentation
+ documents : typing.Sequence[str]
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -6962,7 +6947,7 @@ async def doc_summary(
chain_type : typing.Optional[typing.Literal["map_reduce"]]
- selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[DocSummaryPageRequestSelectedAsrModel]
google_translate_target : typing.Optional[str]
@@ -6976,7 +6961,7 @@ async def doc_summary(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocSummaryRequestResponseFormatType]
+ response_format_type : typing.Optional[DocSummaryPageRequestResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -7000,7 +6985,9 @@ async def doc_summary(
async def main() -> None:
- await client.doc_summary()
+ await client.doc_summary(
+ documents=["documents"],
+ )
asyncio.run(main())
@@ -7011,9 +6998,10 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "documents": documents,
"task_instructions": task_instructions,
"merge_instructions": merge_instructions,
"selected_model": selected_model,
@@ -7028,9 +7016,6 @@ async def main() -> None:
"response_format_type": response_format_type,
"settings": settings,
},
- files={
- "documents": documents,
- },
request_options=request_options,
omit=OMIT,
)
@@ -7187,17 +7172,17 @@ async def lipsync(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- input_face: typing.Optional[core.File] = None,
- face_padding_top: typing.Optional[int] = None,
- face_padding_bottom: typing.Optional[int] = None,
- face_padding_left: typing.Optional[int] = None,
- face_padding_right: typing.Optional[int] = None,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
- selected_model: typing.Optional[LipsyncRequestSelectedModel] = None,
- input_audio: typing.Optional[core.File] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncPageOutput:
"""
@@ -7205,13 +7190,12 @@ async def lipsync(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- input_face : typing.Optional[core.File]
- See core.File for more documentation
+ input_face : typing.Optional[str]
face_padding_top : typing.Optional[int]
@@ -7223,10 +7207,9 @@ async def lipsync(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncPageRequestSelectedModel]
- input_audio : typing.Optional[core.File]
- See core.File for more documentation
+ input_audio : typing.Optional[str]
settings : typing.Optional[RunSettings]
@@ -7261,20 +7244,18 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
"face_padding_right": face_padding_right,
"sadtalker_settings": sadtalker_settings,
"selected_model": selected_model,
- "settings": settings,
- },
- files={
- "input_face": input_face,
"input_audio": input_audio,
+ "settings": settings,
},
request_options=request_options,
omit=OMIT,
@@ -7328,34 +7309,34 @@ async def lipsync_tts(
*,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None,
- uberduck_voice_name: typing.Optional[str] = None,
- uberduck_speaking_rate: typing.Optional[float] = None,
- google_voice_name: typing.Optional[str] = None,
- google_speaking_rate: typing.Optional[float] = None,
- google_pitch: typing.Optional[float] = None,
- bark_history_prompt: typing.Optional[str] = None,
- elevenlabs_voice_name: typing.Optional[str] = None,
- elevenlabs_api_key: typing.Optional[str] = None,
- elevenlabs_voice_id: typing.Optional[str] = None,
- elevenlabs_model: typing.Optional[str] = None,
- elevenlabs_stability: typing.Optional[float] = None,
- elevenlabs_similarity_boost: typing.Optional[float] = None,
- elevenlabs_style: typing.Optional[float] = None,
- elevenlabs_speaker_boost: typing.Optional[bool] = None,
- azure_voice_name: typing.Optional[str] = None,
- openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None,
- openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None,
- input_face: typing.Optional[core.File] = None,
- face_padding_top: typing.Optional[int] = None,
- face_padding_bottom: typing.Optional[int] = None,
- face_padding_left: typing.Optional[int] = None,
- face_padding_right: typing.Optional[int] = None,
- sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
- selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncTtsPageOutput:
"""
@@ -7365,12 +7346,12 @@ async def lipsync_tts(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider]
+ tts_provider : typing.Optional[LipsyncTtsPageRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -7403,12 +7384,11 @@ async def lipsync_tts(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel]
- input_face : typing.Optional[core.File]
- See core.File for more documentation
+ input_face : typing.Optional[str]
face_padding_top : typing.Optional[int]
@@ -7420,7 +7400,7 @@ async def lipsync_tts(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncTtsRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncTtsPageRequestSelectedModel]
settings : typing.Optional[RunSettings]
@@ -7457,7 +7437,7 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
"text_prompt": text_prompt,
@@ -7479,6 +7459,7 @@ async def main() -> None:
"azure_voice_name": azure_voice_name,
"openai_voice_name": openai_voice_name,
"openai_tts_model": openai_tts_model,
+ "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
@@ -7487,9 +7468,6 @@ async def main() -> None:
"selected_model": selected_model,
"settings": settings,
},
- files={
- "input_face": input_face,
- },
request_options=request_options,
omit=OMIT,
)
@@ -7723,41 +7701,40 @@ async def main() -> None:
async def speech_recognition(
self,
*,
- documents: typing.List[core.File],
+ documents: typing.Sequence[str],
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None,
- language: typing.Optional[str] = None,
- translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
- output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
- google_translate_target: typing.Optional[str] = None,
- translation_source: typing.Optional[str] = None,
- translation_target: typing.Optional[str] = None,
- glossary_document: typing.Optional[core.File] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ selected_model: typing.Optional[AsrPageRequestSelectedModel] = OMIT,
+ language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[AsrPageRequestTranslationModel] = OMIT,
+ output_format: typing.Optional[AsrPageRequestOutputFormat] = OMIT,
+ google_translate_target: typing.Optional[str] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> AsrPageOutput:
"""
Parameters
----------
- documents : typing.List[core.File]
- See core.File for more documentation
+ documents : typing.Sequence[str]
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel]
+ selected_model : typing.Optional[AsrPageRequestSelectedModel]
language : typing.Optional[str]
- translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel]
+ translation_model : typing.Optional[AsrPageRequestTranslationModel]
- output_format : typing.Optional[SpeechRecognitionRequestOutputFormat]
+ output_format : typing.Optional[AsrPageRequestOutputFormat]
google_translate_target : typing.Optional[str]
use `translation_model` & `translation_target` instead.
@@ -7766,8 +7743,9 @@ async def speech_recognition(
translation_target : typing.Optional[str]
- glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
settings : typing.Optional[RunSettings]
@@ -7791,7 +7769,9 @@ async def speech_recognition(
async def main() -> None:
- await client.speech_recognition()
+ await client.speech_recognition(
+ documents=["documents"],
+ )
asyncio.run(main())
@@ -7802,9 +7782,10 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "documents": documents,
"selected_model": selected_model,
"language": language,
"translation_model": translation_model,
@@ -7812,11 +7793,8 @@ async def main() -> None:
"google_translate_target": google_translate_target,
"translation_source": translation_source,
"translation_target": translation_target,
- "settings": settings,
- },
- files={
- "documents": documents,
"glossary_document": glossary_document,
+ "settings": settings,
},
request_options=request_options,
omit=OMIT,
@@ -8011,14 +7989,14 @@ async def translate(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- texts: typing.Optional[typing.List[str]] = None,
- selected_model: typing.Optional[TranslateRequestSelectedModel] = None,
- translation_source: typing.Optional[str] = None,
- translation_target: typing.Optional[str] = None,
- glossary_document: typing.Optional[core.File] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ texts: typing.Optional[typing.Sequence[str]] = OMIT,
+ selected_model: typing.Optional[TranslationPageRequestSelectedModel] = OMIT,
+ translation_source: typing.Optional[str] = OMIT,
+ translation_target: typing.Optional[str] = OMIT,
+ glossary_document: typing.Optional[str] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> TranslationPageOutput:
"""
@@ -8026,21 +8004,22 @@ async def translate(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- texts : typing.Optional[typing.List[str]]
+ texts : typing.Optional[typing.Sequence[str]]
- selected_model : typing.Optional[TranslateRequestSelectedModel]
+ selected_model : typing.Optional[TranslationPageRequestSelectedModel]
translation_source : typing.Optional[str]
translation_target : typing.Optional[str]
- glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ glossary_document : typing.Optional[str]
+ Provide a glossary to customize translation and improve accuracy of domain-specific terms.
+ If not specified or invalid, no glossary will be used. Read about the expected format [here](https://docs.google.com/document/d/1TwzAvFmFYekloRKql2PXNPIyqCbsHRL8ZtnWkzAYrh8/edit?usp=sharing).
settings : typing.Optional[RunSettings]
@@ -8075,17 +8054,15 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
"texts": texts,
"selected_model": selected_model,
"translation_source": translation_source,
"translation_target": translation_target,
- "settings": settings,
- },
- files={
"glossary_document": glossary_document,
+ "settings": settings,
},
request_options=request_options,
omit=OMIT,
@@ -8137,44 +8114,43 @@ async def main() -> None:
async def remix_image(
self,
*,
- input_image: core.File,
+ input_image: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- text_prompt: typing.Optional[str] = None,
- selected_model: typing.Optional[RemixImageRequestSelectedModel] = None,
- selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None,
- negative_prompt: typing.Optional[str] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[int] = None,
- output_width: typing.Optional[int] = None,
- output_height: typing.Optional[int] = None,
- guidance_scale: typing.Optional[float] = None,
- prompt_strength: typing.Optional[float] = None,
- controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
- seed: typing.Optional[int] = None,
- image_guidance_scale: typing.Optional[float] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ text_prompt: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = OMIT,
+ selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ prompt_strength: typing.Optional[float] = OMIT,
+ controlnet_conditioning_scale: typing.Optional[typing.Sequence[float]] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ image_guidance_scale: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> Img2ImgPageOutput:
"""
Parameters
----------
- input_image : core.File
- See core.File for more documentation
+ input_image : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
text_prompt : typing.Optional[str]
- selected_model : typing.Optional[RemixImageRequestSelectedModel]
+ selected_model : typing.Optional[Img2ImgPageRequestSelectedModel]
- selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel]
+ selected_controlnet_model : typing.Optional[Img2ImgPageRequestSelectedControlnetModel]
negative_prompt : typing.Optional[str]
@@ -8190,7 +8166,7 @@ async def remix_image(
prompt_strength : typing.Optional[float]
- controlnet_conditioning_scale : typing.Optional[typing.List[float]]
+ controlnet_conditioning_scale : typing.Optional[typing.Sequence[float]]
seed : typing.Optional[int]
@@ -8218,7 +8194,9 @@ async def remix_image(
async def main() -> None:
- await client.remix_image()
+ await client.remix_image(
+ input_image="input_image",
+ )
asyncio.run(main())
@@ -8229,9 +8207,10 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
"text_prompt": text_prompt,
"selected_model": selected_model,
"selected_controlnet_model": selected_controlnet_model,
@@ -8247,9 +8226,6 @@ async def main() -> None:
"image_guidance_scale": image_guidance_scale,
"settings": settings,
},
- files={
- "input_image": input_image,
- },
request_options=request_options,
omit=OMIT,
)
@@ -8466,38 +8442,37 @@ async def main() -> None:
async def product_image(
self,
*,
- input_image: core.File,
+ input_image: str,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- obj_scale: typing.Optional[float] = None,
- obj_pos_x: typing.Optional[float] = None,
- obj_pos_y: typing.Optional[float] = None,
- mask_threshold: typing.Optional[float] = None,
- selected_model: typing.Optional[ProductImageRequestSelectedModel] = None,
- negative_prompt: typing.Optional[str] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[int] = None,
- output_width: typing.Optional[int] = None,
- output_height: typing.Optional[int] = None,
- guidance_scale: typing.Optional[float] = None,
- sd2upscaling: typing.Optional[bool] = None,
- seed: typing.Optional[int] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ sd2upscaling: typing.Optional[bool] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> ObjectInpaintingPageOutput:
"""
Parameters
----------
- input_image : core.File
- See core.File for more documentation
+ input_image : str
text_prompt : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -8510,7 +8485,7 @@ async def product_image(
mask_threshold : typing.Optional[float]
- selected_model : typing.Optional[ProductImageRequestSelectedModel]
+ selected_model : typing.Optional[ObjectInpaintingPageRequestSelectedModel]
negative_prompt : typing.Optional[str]
@@ -8551,6 +8526,7 @@ async def product_image(
async def main() -> None:
await client.product_image(
+ input_image="input_image",
text_prompt="text_prompt",
)
@@ -8563,9 +8539,10 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
"text_prompt": text_prompt,
"obj_scale": obj_scale,
"obj_pos_x": obj_pos_x,
@@ -8582,9 +8559,6 @@ async def main() -> None:
"seed": seed,
"settings": settings,
},
- files={
- "input_image": input_image,
- },
request_options=request_options,
omit=OMIT,
)
@@ -8635,37 +8609,36 @@ async def main() -> None:
async def portrait(
self,
*,
- input_image: core.File,
+ input_image: str,
text_prompt: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- face_scale: typing.Optional[float] = None,
- face_pos_x: typing.Optional[float] = None,
- face_pos_y: typing.Optional[float] = None,
- selected_model: typing.Optional[PortraitRequestSelectedModel] = None,
- negative_prompt: typing.Optional[str] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[int] = None,
- upscale_factor: typing.Optional[float] = None,
- output_width: typing.Optional[int] = None,
- output_height: typing.Optional[int] = None,
- guidance_scale: typing.Optional[float] = None,
- seed: typing.Optional[int] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ face_scale: typing.Optional[float] = OMIT,
+ face_pos_x: typing.Optional[float] = OMIT,
+ face_pos_y: typing.Optional[float] = OMIT,
+ selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = OMIT,
+ negative_prompt: typing.Optional[str] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[int] = OMIT,
+ upscale_factor: typing.Optional[float] = OMIT,
+ output_width: typing.Optional[int] = OMIT,
+ output_height: typing.Optional[int] = OMIT,
+ guidance_scale: typing.Optional[float] = OMIT,
+ seed: typing.Optional[int] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> FaceInpaintingPageOutput:
"""
Parameters
----------
- input_image : core.File
- See core.File for more documentation
+ input_image : str
text_prompt : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -8676,7 +8649,7 @@ async def portrait(
face_pos_y : typing.Optional[float]
- selected_model : typing.Optional[PortraitRequestSelectedModel]
+ selected_model : typing.Optional[FaceInpaintingPageRequestSelectedModel]
negative_prompt : typing.Optional[str]
@@ -8717,7 +8690,8 @@ async def portrait(
async def main() -> None:
await client.portrait(
- text_prompt="text_prompt",
+ input_image="input_image",
+ text_prompt="tony stark from the iron man",
)
@@ -8729,9 +8703,10 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
"text_prompt": text_prompt,
"face_scale": face_scale,
"face_pos_x": face_pos_x,
@@ -8747,9 +8722,6 @@ async def main() -> None:
"seed": seed,
"settings": settings,
},
- files={
- "input_image": input_image,
- },
request_options=request_options,
omit=OMIT,
)
@@ -9159,34 +9131,33 @@ async def main() -> None:
async def remove_background(
self,
*,
- input_image: core.File,
+ input_image: str,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None,
- mask_threshold: typing.Optional[float] = None,
- rect_persepective_transform: typing.Optional[bool] = None,
- reflection_opacity: typing.Optional[float] = None,
- obj_scale: typing.Optional[float] = None,
- obj_pos_x: typing.Optional[float] = None,
- obj_pos_y: typing.Optional[float] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = OMIT,
+ mask_threshold: typing.Optional[float] = OMIT,
+ rect_persepective_transform: typing.Optional[bool] = OMIT,
+ reflection_opacity: typing.Optional[float] = OMIT,
+ obj_scale: typing.Optional[float] = OMIT,
+ obj_pos_x: typing.Optional[float] = OMIT,
+ obj_pos_y: typing.Optional[float] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> ImageSegmentationPageOutput:
"""
Parameters
----------
- input_image : core.File
- See core.File for more documentation
+ input_image : str
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel]
+ selected_model : typing.Optional[ImageSegmentationPageRequestSelectedModel]
mask_threshold : typing.Optional[float]
@@ -9222,7 +9193,9 @@ async def remove_background(
async def main() -> None:
- await client.remove_background()
+ await client.remove_background(
+ input_image="input_image",
+ )
asyncio.run(main())
@@ -9233,9 +9206,10 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
"selected_model": selected_model,
"mask_threshold": mask_threshold,
"rect_persepective_transform": rect_persepective_transform,
@@ -9245,9 +9219,6 @@ async def main() -> None:
"obj_pos_y": obj_pos_y,
"settings": settings,
},
- files={
- "input_image": input_image,
- },
request_options=request_options,
omit=OMIT,
)
@@ -9300,13 +9271,13 @@ async def upscale(
*,
scale: int,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[RecipeFunction]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- input_image: typing.Optional[core.File] = None,
- input_video: typing.Optional[core.File] = None,
- selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None,
- selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_image: typing.Optional[str] = OMIT,
+ input_video: typing.Optional[str] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]] = OMIT,
+ selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CompareUpscalerPageOutput:
"""
@@ -9317,18 +9288,18 @@ async def upscale(
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[RecipeFunction]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- input_image : typing.Optional[core.File]
- See core.File for more documentation
+ input_image : typing.Optional[str]
+ Input Image
- input_video : typing.Optional[core.File]
- See core.File for more documentation
+ input_video : typing.Optional[str]
+ Input Video
- selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]
+ selected_models : typing.Optional[typing.Sequence[CompareUpscalerPageRequestSelectedModelsItem]]
selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
@@ -9367,18 +9338,16 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
+ "input_image": input_image,
+ "input_video": input_video,
"scale": scale,
"selected_models": selected_models,
"selected_bg_model": selected_bg_model,
"settings": settings,
},
- files={
- "input_image": input_image,
- "input_video": input_video,
- },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/gooey/copilot/__init__.py b/src/gooey/copilot/__init__.py
index 3234b31..db36163 100644
--- a/src/gooey/copilot/__init__.py
+++ b/src/gooey/copilot/__init__.py
@@ -1,33 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
- CopilotCompletionRequestAsrModel,
- CopilotCompletionRequestCitationStyle,
- CopilotCompletionRequestEmbeddingModel,
- CopilotCompletionRequestFunctionsItem,
- CopilotCompletionRequestFunctionsItemTrigger,
- CopilotCompletionRequestLipsyncModel,
- CopilotCompletionRequestOpenaiTtsModel,
- CopilotCompletionRequestOpenaiVoiceName,
- CopilotCompletionRequestResponseFormatType,
- CopilotCompletionRequestSadtalkerSettings,
- CopilotCompletionRequestSadtalkerSettingsPreprocess,
- CopilotCompletionRequestTranslationModel,
- CopilotCompletionRequestTtsProvider,
+ VideoBotsPageRequestAsrModel,
+ VideoBotsPageRequestCitationStyle,
+ VideoBotsPageRequestEmbeddingModel,
+ VideoBotsPageRequestLipsyncModel,
+ VideoBotsPageRequestOpenaiTtsModel,
+ VideoBotsPageRequestOpenaiVoiceName,
+ VideoBotsPageRequestResponseFormatType,
+ VideoBotsPageRequestTranslationModel,
+ VideoBotsPageRequestTtsProvider,
)
__all__ = [
- "CopilotCompletionRequestAsrModel",
- "CopilotCompletionRequestCitationStyle",
- "CopilotCompletionRequestEmbeddingModel",
- "CopilotCompletionRequestFunctionsItem",
- "CopilotCompletionRequestFunctionsItemTrigger",
- "CopilotCompletionRequestLipsyncModel",
- "CopilotCompletionRequestOpenaiTtsModel",
- "CopilotCompletionRequestOpenaiVoiceName",
- "CopilotCompletionRequestResponseFormatType",
- "CopilotCompletionRequestSadtalkerSettings",
- "CopilotCompletionRequestSadtalkerSettingsPreprocess",
- "CopilotCompletionRequestTranslationModel",
- "CopilotCompletionRequestTtsProvider",
+ "VideoBotsPageRequestAsrModel",
+ "VideoBotsPageRequestCitationStyle",
+ "VideoBotsPageRequestEmbeddingModel",
+ "VideoBotsPageRequestLipsyncModel",
+ "VideoBotsPageRequestOpenaiTtsModel",
+ "VideoBotsPageRequestOpenaiVoiceName",
+ "VideoBotsPageRequestResponseFormatType",
+ "VideoBotsPageRequestTranslationModel",
+ "VideoBotsPageRequestTtsProvider",
]
diff --git a/src/gooey/copilot/client.py b/src/gooey/copilot/client.py
index 9dcc465..a27e8d5 100644
--- a/src/gooey/copilot/client.py
+++ b/src/gooey/copilot/client.py
@@ -2,21 +2,20 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
-from .types.copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem
-from .. import core
+from ..types.recipe_function import RecipeFunction
from ..types.conversation_entry import ConversationEntry
from ..types.large_language_models import LargeLanguageModels
-from .types.copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel
-from .types.copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle
-from .types.copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel
-from .types.copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel
-from .types.copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel
+from .types.video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from .types.video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from .types.video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from .types.video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from .types.video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
from ..types.llm_tools import LlmTools
-from .types.copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType
-from .types.copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider
-from .types.copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName
-from .types.copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel
-from .types.copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings
+from .types.video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
+from .types.video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
+from .types.video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
+from .types.video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
+from ..types.sad_talker_settings import SadTalkerSettings
from ..types.run_settings import RunSettings
from ..core.request_options import RequestOptions
from ..types.video_bots_page_output import VideoBotsPageOutput
@@ -42,67 +41,67 @@ def completion(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- input_prompt: typing.Optional[str] = None,
- input_audio: typing.Optional[str] = None,
- input_images: typing.Optional[typing.List[core.File]] = None,
- input_documents: typing.Optional[typing.List[core.File]] = None,
- doc_extract_url: typing.Optional[str] = None,
- messages: typing.Optional[typing.List[ConversationEntry]] = None,
- bot_script: typing.Optional[str] = None,
- selected_model: typing.Optional[LargeLanguageModels] = None,
- document_model: typing.Optional[str] = None,
- task_instructions: typing.Optional[str] = None,
- query_instructions: typing.Optional[str] = None,
- keyword_instructions: typing.Optional[str] = None,
- documents: typing.Optional[typing.List[core.File]] = None,
- max_references: typing.Optional[int] = None,
- max_context_words: typing.Optional[int] = None,
- scroll_jump: typing.Optional[int] = None,
- embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None,
- dense_weight: typing.Optional[float] = None,
- citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None,
- use_url_shortener: typing.Optional[bool] = None,
- asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None,
- asr_language: typing.Optional[str] = None,
- translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None,
- user_language: typing.Optional[str] = None,
- input_glossary_document: typing.Optional[core.File] = None,
- output_glossary_document: typing.Optional[core.File] = None,
- lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None,
- tools: typing.Optional[typing.List[LlmTools]] = None,
- avoid_repetition: typing.Optional[bool] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[float] = None,
- max_tokens: typing.Optional[int] = None,
- sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None,
- tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None,
- uberduck_voice_name: typing.Optional[str] = None,
- uberduck_speaking_rate: typing.Optional[float] = None,
- google_voice_name: typing.Optional[str] = None,
- google_speaking_rate: typing.Optional[float] = None,
- google_pitch: typing.Optional[float] = None,
- bark_history_prompt: typing.Optional[str] = None,
- elevenlabs_voice_name: typing.Optional[str] = None,
- elevenlabs_api_key: typing.Optional[str] = None,
- elevenlabs_voice_id: typing.Optional[str] = None,
- elevenlabs_model: typing.Optional[str] = None,
- elevenlabs_stability: typing.Optional[float] = None,
- elevenlabs_similarity_boost: typing.Optional[float] = None,
- elevenlabs_style: typing.Optional[float] = None,
- elevenlabs_speaker_boost: typing.Optional[bool] = None,
- azure_voice_name: typing.Optional[str] = None,
- openai_voice_name: typing.Optional[CopilotCompletionRequestOpenaiVoiceName] = None,
- openai_tts_model: typing.Optional[CopilotCompletionRequestOpenaiTtsModel] = None,
- input_face: typing.Optional[core.File] = None,
- face_padding_top: typing.Optional[int] = None,
- face_padding_bottom: typing.Optional[int] = None,
- face_padding_left: typing.Optional[int] = None,
- face_padding_right: typing.Optional[int] = None,
- sadtalker_settings: typing.Optional[CopilotCompletionRequestSadtalkerSettings] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ input_images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
+ bot_script: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[LargeLanguageModels] = OMIT,
+ document_model: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ keyword_instructions: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
+ asr_language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
+ user_language: typing.Optional[str] = OMIT,
+ input_glossary_document: typing.Optional[str] = OMIT,
+ output_glossary_document: typing.Optional[str] = OMIT,
+ lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
+ tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT,
+ tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> VideoBotsPageOutput:
"""
@@ -110,7 +109,7 @@ def completion(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -119,16 +118,14 @@ def completion(
input_audio : typing.Optional[str]
- input_images : typing.Optional[typing.List[core.File]]
- See core.File for more documentation
+ input_images : typing.Optional[typing.Sequence[str]]
- input_documents : typing.Optional[typing.List[core.File]]
- See core.File for more documentation
+ input_documents : typing.Optional[typing.Sequence[str]]
doc_extract_url : typing.Optional[str]
Select a workflow to extract text from documents and images.
- messages : typing.Optional[typing.List[ConversationEntry]]
+ messages : typing.Optional[typing.Sequence[ConversationEntry]]
bot_script : typing.Optional[str]
@@ -143,8 +140,7 @@ def completion(
keyword_instructions : typing.Optional[str]
- documents : typing.Optional[typing.List[core.File]]
- See core.File for more documentation
+ documents : typing.Optional[typing.Sequence[str]]
max_references : typing.Optional[int]
@@ -152,7 +148,7 @@ def completion(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel]
+ embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
dense_weight : typing.Optional[float]
@@ -160,30 +156,34 @@ def completion(
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- citation_style : typing.Optional[CopilotCompletionRequestCitationStyle]
+ citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
use_url_shortener : typing.Optional[bool]
- asr_model : typing.Optional[CopilotCompletionRequestAsrModel]
+ asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
Choose a model to transcribe incoming audio messages to text.
asr_language : typing.Optional[str]
Choose a language to transcribe incoming audio messages to text.
- translation_model : typing.Optional[CopilotCompletionRequestTranslationModel]
+ translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
user_language : typing.Optional[str]
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
- input_glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ input_glossary_document : typing.Optional[str]
- output_glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ Translation Glossary for User Langauge -> LLM Language (English)
- lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel]
- tools : typing.Optional[typing.List[LlmTools]]
+ output_glossary_document : typing.Optional[str]
+
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+
+ lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
+
+ tools : typing.Optional[typing.Sequence[LlmTools]]
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
avoid_repetition : typing.Optional[bool]
@@ -196,9 +196,9 @@ def completion(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType]
+ response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType]
- tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider]
+ tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -231,12 +231,11 @@ def completion(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[CopilotCompletionRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[CopilotCompletionRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
- input_face : typing.Optional[core.File]
- See core.File for more documentation
+ input_face : typing.Optional[str]
face_padding_top : typing.Optional[int]
@@ -246,7 +245,7 @@ def completion(
face_padding_right : typing.Optional[int]
- sadtalker_settings : typing.Optional[CopilotCompletionRequestSadtalkerSettings]
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
settings : typing.Optional[RunSettings]
@@ -273,11 +272,13 @@ def completion(
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
"input_prompt": input_prompt,
"input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
"doc_extract_url": doc_extract_url,
"messages": messages,
"bot_script": bot_script,
@@ -286,6 +287,7 @@ def completion(
"task_instructions": task_instructions,
"query_instructions": query_instructions,
"keyword_instructions": keyword_instructions,
+ "documents": documents,
"max_references": max_references,
"max_context_words": max_context_words,
"scroll_jump": scroll_jump,
@@ -297,6 +299,8 @@ def completion(
"asr_language": asr_language,
"translation_model": translation_model,
"user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
"lipsync_model": lipsync_model,
"tools": tools,
"avoid_repetition": avoid_repetition,
@@ -323,6 +327,7 @@ def completion(
"azure_voice_name": azure_voice_name,
"openai_voice_name": openai_voice_name,
"openai_tts_model": openai_tts_model,
+ "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
@@ -330,14 +335,6 @@ def completion(
"sadtalker_settings": sadtalker_settings,
"settings": settings,
},
- files={
- "input_images": input_images,
- "input_documents": input_documents,
- "documents": documents,
- "input_glossary_document": input_glossary_document,
- "output_glossary_document": output_glossary_document,
- "input_face": input_face,
- },
request_options=request_options,
omit=OMIT,
)
@@ -394,67 +391,67 @@ async def completion(
self,
*,
example_id: typing.Optional[str] = None,
- functions: typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]] = None,
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- input_prompt: typing.Optional[str] = None,
- input_audio: typing.Optional[str] = None,
- input_images: typing.Optional[typing.List[core.File]] = None,
- input_documents: typing.Optional[typing.List[core.File]] = None,
- doc_extract_url: typing.Optional[str] = None,
- messages: typing.Optional[typing.List[ConversationEntry]] = None,
- bot_script: typing.Optional[str] = None,
- selected_model: typing.Optional[LargeLanguageModels] = None,
- document_model: typing.Optional[str] = None,
- task_instructions: typing.Optional[str] = None,
- query_instructions: typing.Optional[str] = None,
- keyword_instructions: typing.Optional[str] = None,
- documents: typing.Optional[typing.List[core.File]] = None,
- max_references: typing.Optional[int] = None,
- max_context_words: typing.Optional[int] = None,
- scroll_jump: typing.Optional[int] = None,
- embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None,
- dense_weight: typing.Optional[float] = None,
- citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None,
- use_url_shortener: typing.Optional[bool] = None,
- asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None,
- asr_language: typing.Optional[str] = None,
- translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None,
- user_language: typing.Optional[str] = None,
- input_glossary_document: typing.Optional[core.File] = None,
- output_glossary_document: typing.Optional[core.File] = None,
- lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None,
- tools: typing.Optional[typing.List[LlmTools]] = None,
- avoid_repetition: typing.Optional[bool] = None,
- num_outputs: typing.Optional[int] = None,
- quality: typing.Optional[float] = None,
- max_tokens: typing.Optional[int] = None,
- sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None,
- tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None,
- uberduck_voice_name: typing.Optional[str] = None,
- uberduck_speaking_rate: typing.Optional[float] = None,
- google_voice_name: typing.Optional[str] = None,
- google_speaking_rate: typing.Optional[float] = None,
- google_pitch: typing.Optional[float] = None,
- bark_history_prompt: typing.Optional[str] = None,
- elevenlabs_voice_name: typing.Optional[str] = None,
- elevenlabs_api_key: typing.Optional[str] = None,
- elevenlabs_voice_id: typing.Optional[str] = None,
- elevenlabs_model: typing.Optional[str] = None,
- elevenlabs_stability: typing.Optional[float] = None,
- elevenlabs_similarity_boost: typing.Optional[float] = None,
- elevenlabs_style: typing.Optional[float] = None,
- elevenlabs_speaker_boost: typing.Optional[bool] = None,
- azure_voice_name: typing.Optional[str] = None,
- openai_voice_name: typing.Optional[CopilotCompletionRequestOpenaiVoiceName] = None,
- openai_tts_model: typing.Optional[CopilotCompletionRequestOpenaiTtsModel] = None,
- input_face: typing.Optional[core.File] = None,
- face_padding_top: typing.Optional[int] = None,
- face_padding_bottom: typing.Optional[int] = None,
- face_padding_left: typing.Optional[int] = None,
- face_padding_right: typing.Optional[int] = None,
- sadtalker_settings: typing.Optional[CopilotCompletionRequestSadtalkerSettings] = None,
- settings: typing.Optional[RunSettings] = None,
+ functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
+ variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
+ input_prompt: typing.Optional[str] = OMIT,
+ input_audio: typing.Optional[str] = OMIT,
+ input_images: typing.Optional[typing.Sequence[str]] = OMIT,
+ input_documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ doc_extract_url: typing.Optional[str] = OMIT,
+ messages: typing.Optional[typing.Sequence[ConversationEntry]] = OMIT,
+ bot_script: typing.Optional[str] = OMIT,
+ selected_model: typing.Optional[LargeLanguageModels] = OMIT,
+ document_model: typing.Optional[str] = OMIT,
+ task_instructions: typing.Optional[str] = OMIT,
+ query_instructions: typing.Optional[str] = OMIT,
+ keyword_instructions: typing.Optional[str] = OMIT,
+ documents: typing.Optional[typing.Sequence[str]] = OMIT,
+ max_references: typing.Optional[int] = OMIT,
+ max_context_words: typing.Optional[int] = OMIT,
+ scroll_jump: typing.Optional[int] = OMIT,
+ embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = OMIT,
+ dense_weight: typing.Optional[float] = OMIT,
+ citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = OMIT,
+ use_url_shortener: typing.Optional[bool] = OMIT,
+ asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = OMIT,
+ asr_language: typing.Optional[str] = OMIT,
+ translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = OMIT,
+ user_language: typing.Optional[str] = OMIT,
+ input_glossary_document: typing.Optional[str] = OMIT,
+ output_glossary_document: typing.Optional[str] = OMIT,
+ lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = OMIT,
+ tools: typing.Optional[typing.Sequence[LlmTools]] = OMIT,
+ avoid_repetition: typing.Optional[bool] = OMIT,
+ num_outputs: typing.Optional[int] = OMIT,
+ quality: typing.Optional[float] = OMIT,
+ max_tokens: typing.Optional[int] = OMIT,
+ sampling_temperature: typing.Optional[float] = OMIT,
+ response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = OMIT,
+ tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = OMIT,
+ uberduck_voice_name: typing.Optional[str] = OMIT,
+ uberduck_speaking_rate: typing.Optional[float] = OMIT,
+ google_voice_name: typing.Optional[str] = OMIT,
+ google_speaking_rate: typing.Optional[float] = OMIT,
+ google_pitch: typing.Optional[float] = OMIT,
+ bark_history_prompt: typing.Optional[str] = OMIT,
+ elevenlabs_voice_name: typing.Optional[str] = OMIT,
+ elevenlabs_api_key: typing.Optional[str] = OMIT,
+ elevenlabs_voice_id: typing.Optional[str] = OMIT,
+ elevenlabs_model: typing.Optional[str] = OMIT,
+ elevenlabs_stability: typing.Optional[float] = OMIT,
+ elevenlabs_similarity_boost: typing.Optional[float] = OMIT,
+ elevenlabs_style: typing.Optional[float] = OMIT,
+ elevenlabs_speaker_boost: typing.Optional[bool] = OMIT,
+ azure_voice_name: typing.Optional[str] = OMIT,
+ openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = OMIT,
+ openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = OMIT,
+ input_face: typing.Optional[str] = OMIT,
+ face_padding_top: typing.Optional[int] = OMIT,
+ face_padding_bottom: typing.Optional[int] = OMIT,
+ face_padding_left: typing.Optional[int] = OMIT,
+ face_padding_right: typing.Optional[int] = OMIT,
+ sadtalker_settings: typing.Optional[SadTalkerSettings] = OMIT,
+ settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> VideoBotsPageOutput:
"""
@@ -462,7 +459,7 @@ async def completion(
----------
example_id : typing.Optional[str]
- functions : typing.Optional[typing.List[CopilotCompletionRequestFunctionsItem]]
+ functions : typing.Optional[typing.Sequence[RecipeFunction]]
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
@@ -471,16 +468,14 @@ async def completion(
input_audio : typing.Optional[str]
- input_images : typing.Optional[typing.List[core.File]]
- See core.File for more documentation
+ input_images : typing.Optional[typing.Sequence[str]]
- input_documents : typing.Optional[typing.List[core.File]]
- See core.File for more documentation
+ input_documents : typing.Optional[typing.Sequence[str]]
doc_extract_url : typing.Optional[str]
Select a workflow to extract text from documents and images.
- messages : typing.Optional[typing.List[ConversationEntry]]
+ messages : typing.Optional[typing.Sequence[ConversationEntry]]
bot_script : typing.Optional[str]
@@ -495,8 +490,7 @@ async def completion(
keyword_instructions : typing.Optional[str]
- documents : typing.Optional[typing.List[core.File]]
- See core.File for more documentation
+ documents : typing.Optional[typing.Sequence[str]]
max_references : typing.Optional[int]
@@ -504,7 +498,7 @@ async def completion(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel]
+ embedding_model : typing.Optional[VideoBotsPageRequestEmbeddingModel]
dense_weight : typing.Optional[float]
@@ -512,30 +506,34 @@ async def completion(
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- citation_style : typing.Optional[CopilotCompletionRequestCitationStyle]
+ citation_style : typing.Optional[VideoBotsPageRequestCitationStyle]
use_url_shortener : typing.Optional[bool]
- asr_model : typing.Optional[CopilotCompletionRequestAsrModel]
+ asr_model : typing.Optional[VideoBotsPageRequestAsrModel]
Choose a model to transcribe incoming audio messages to text.
asr_language : typing.Optional[str]
Choose a language to transcribe incoming audio messages to text.
- translation_model : typing.Optional[CopilotCompletionRequestTranslationModel]
+ translation_model : typing.Optional[VideoBotsPageRequestTranslationModel]
user_language : typing.Optional[str]
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
- input_glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ input_glossary_document : typing.Optional[str]
- output_glossary_document : typing.Optional[core.File]
- See core.File for more documentation
+ Translation Glossary for User Langauge -> LLM Language (English)
- lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel]
- tools : typing.Optional[typing.List[LlmTools]]
+ output_glossary_document : typing.Optional[str]
+
+ Translation Glossary for LLM Language (English) -> User Langauge
+
+
+ lipsync_model : typing.Optional[VideoBotsPageRequestLipsyncModel]
+
+ tools : typing.Optional[typing.Sequence[LlmTools]]
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
avoid_repetition : typing.Optional[bool]
@@ -548,9 +546,9 @@ async def completion(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType]
+ response_format_type : typing.Optional[VideoBotsPageRequestResponseFormatType]
- tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider]
+ tts_provider : typing.Optional[VideoBotsPageRequestTtsProvider]
uberduck_voice_name : typing.Optional[str]
@@ -583,12 +581,11 @@ async def completion(
azure_voice_name : typing.Optional[str]
- openai_voice_name : typing.Optional[CopilotCompletionRequestOpenaiVoiceName]
+ openai_voice_name : typing.Optional[VideoBotsPageRequestOpenaiVoiceName]
- openai_tts_model : typing.Optional[CopilotCompletionRequestOpenaiTtsModel]
+ openai_tts_model : typing.Optional[VideoBotsPageRequestOpenaiTtsModel]
- input_face : typing.Optional[core.File]
- See core.File for more documentation
+ input_face : typing.Optional[str]
face_padding_top : typing.Optional[int]
@@ -598,7 +595,7 @@ async def completion(
face_padding_right : typing.Optional[int]
- sadtalker_settings : typing.Optional[CopilotCompletionRequestSadtalkerSettings]
+ sadtalker_settings : typing.Optional[SadTalkerSettings]
settings : typing.Optional[RunSettings]
@@ -633,11 +630,13 @@ async def main() -> None:
params={
"example_id": example_id,
},
- data={
+ json={
"functions": functions,
"variables": variables,
"input_prompt": input_prompt,
"input_audio": input_audio,
+ "input_images": input_images,
+ "input_documents": input_documents,
"doc_extract_url": doc_extract_url,
"messages": messages,
"bot_script": bot_script,
@@ -646,6 +645,7 @@ async def main() -> None:
"task_instructions": task_instructions,
"query_instructions": query_instructions,
"keyword_instructions": keyword_instructions,
+ "documents": documents,
"max_references": max_references,
"max_context_words": max_context_words,
"scroll_jump": scroll_jump,
@@ -657,6 +657,8 @@ async def main() -> None:
"asr_language": asr_language,
"translation_model": translation_model,
"user_language": user_language,
+ "input_glossary_document": input_glossary_document,
+ "output_glossary_document": output_glossary_document,
"lipsync_model": lipsync_model,
"tools": tools,
"avoid_repetition": avoid_repetition,
@@ -683,6 +685,7 @@ async def main() -> None:
"azure_voice_name": azure_voice_name,
"openai_voice_name": openai_voice_name,
"openai_tts_model": openai_tts_model,
+ "input_face": input_face,
"face_padding_top": face_padding_top,
"face_padding_bottom": face_padding_bottom,
"face_padding_left": face_padding_left,
@@ -690,14 +693,6 @@ async def main() -> None:
"sadtalker_settings": sadtalker_settings,
"settings": settings,
},
- files={
- "input_images": input_images,
- "input_documents": input_documents,
- "documents": documents,
- "input_glossary_document": input_glossary_document,
- "output_glossary_document": output_glossary_document,
- "input_face": input_face,
- },
request_options=request_options,
omit=OMIT,
)
diff --git a/src/gooey/copilot/types/__init__.py b/src/gooey/copilot/types/__init__.py
index 1cdf619..8de9ee0 100644
--- a/src/gooey/copilot/types/__init__.py
+++ b/src/gooey/copilot/types/__init__.py
@@ -1,33 +1,23 @@
# This file was auto-generated by Fern from our API Definition.
-from .copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel
-from .copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle
-from .copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel
-from .copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem
-from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger
-from .copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel
-from .copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel
-from .copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName
-from .copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType
-from .copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings
-from .copilot_completion_request_sadtalker_settings_preprocess import (
- CopilotCompletionRequestSadtalkerSettingsPreprocess,
-)
-from .copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel
-from .copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider
+from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
+from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
+from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
+from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
__all__ = [
- "CopilotCompletionRequestAsrModel",
- "CopilotCompletionRequestCitationStyle",
- "CopilotCompletionRequestEmbeddingModel",
- "CopilotCompletionRequestFunctionsItem",
- "CopilotCompletionRequestFunctionsItemTrigger",
- "CopilotCompletionRequestLipsyncModel",
- "CopilotCompletionRequestOpenaiTtsModel",
- "CopilotCompletionRequestOpenaiVoiceName",
- "CopilotCompletionRequestResponseFormatType",
- "CopilotCompletionRequestSadtalkerSettings",
- "CopilotCompletionRequestSadtalkerSettingsPreprocess",
- "CopilotCompletionRequestTranslationModel",
- "CopilotCompletionRequestTtsProvider",
+ "VideoBotsPageRequestAsrModel",
+ "VideoBotsPageRequestCitationStyle",
+ "VideoBotsPageRequestEmbeddingModel",
+ "VideoBotsPageRequestLipsyncModel",
+ "VideoBotsPageRequestOpenaiTtsModel",
+ "VideoBotsPageRequestOpenaiVoiceName",
+ "VideoBotsPageRequestResponseFormatType",
+ "VideoBotsPageRequestTranslationModel",
+ "VideoBotsPageRequestTtsProvider",
]
diff --git a/src/gooey/copilot/types/copilot_completion_request_asr_model.py b/src/gooey/copilot/types/copilot_completion_request_asr_model.py
deleted file mode 100644
index 65ae0f5..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_asr_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestAsrModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/copilot/types/copilot_completion_request_citation_style.py b/src/gooey/copilot/types/copilot_completion_request_citation_style.py
deleted file mode 100644
index 1bb273a..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_citation_style.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestCitationStyle = typing.Union[
- typing.Literal[
- "number",
- "title",
- "url",
- "symbol",
- "markdown",
- "html",
- "slack_mrkdwn",
- "plaintext",
- "number_markdown",
- "number_html",
- "number_slack_mrkdwn",
- "number_plaintext",
- "symbol_markdown",
- "symbol_html",
- "symbol_slack_mrkdwn",
- "symbol_plaintext",
- ],
- typing.Any,
-]
diff --git a/src/gooey/copilot/types/copilot_completion_request_embedding_model.py b/src/gooey/copilot/types/copilot_completion_request_embedding_model.py
deleted file mode 100644
index 4655801..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/copilot/types/copilot_completion_request_functions_item.py b/src/gooey/copilot/types/copilot_completion_request_functions_item.py
deleted file mode 100644
index c9654f1..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_functions_item.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ...core.pydantic_utilities import UniversalBaseModel
-from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger
-import pydantic
-from ...core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
-
-
-class CopilotCompletionRequestFunctionsItem(UniversalBaseModel):
- url: str
- trigger: CopilotCompletionRequestFunctionsItemTrigger = pydantic.Field()
- """
- When to run this function. `pre` runs before the recipe, `post` runs after the recipe.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py b/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py
deleted file mode 100644
index cf3e214..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py b/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py
deleted file mode 100644
index 865bc4b..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py b/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py
deleted file mode 100644
index 4f4a35b..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py b/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py
deleted file mode 100644
index f60a6b3..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestOpenaiVoiceName = typing.Union[
- typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
-]
diff --git a/src/gooey/copilot/types/copilot_completion_request_response_format_type.py b/src/gooey/copilot/types/copilot_completion_request_response_format_type.py
deleted file mode 100644
index 3c9dbb0..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py
deleted file mode 100644
index 12ae458..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ...core.pydantic_utilities import UniversalBaseModel
-import typing
-from .copilot_completion_request_sadtalker_settings_preprocess import (
- CopilotCompletionRequestSadtalkerSettingsPreprocess,
-)
-import pydantic
-from ...core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class CopilotCompletionRequestSadtalkerSettings(UniversalBaseModel):
- still: typing.Optional[bool] = None
- preprocess: typing.Optional[CopilotCompletionRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None)
- """
- SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping.
- """
-
- pose_style: typing.Optional[int] = pydantic.Field(default=None)
- """
- Random seed 0-45 inclusive that affects how the pose is animated.
- """
-
- expression_scale: typing.Optional[float] = pydantic.Field(default=None)
- """
- Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot.
- """
-
- ref_eyeblink: typing.Optional[str] = None
- ref_pose: typing.Optional[str] = None
- input_yaw: typing.Optional[typing.List[int]] = None
- input_pitch: typing.Optional[typing.List[int]] = None
- input_roll: typing.Optional[typing.List[int]] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py b/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py
deleted file mode 100644
index 88add2e..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_sadtalker_settings_preprocess.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestSadtalkerSettingsPreprocess = typing.Union[
- typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any
-]
diff --git a/src/gooey/copilot/types/copilot_completion_request_translation_model.py b/src/gooey/copilot/types/copilot_completion_request_translation_model.py
deleted file mode 100644
index 10b0b5a..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_tts_provider.py b/src/gooey/copilot/types/copilot_completion_request_tts_provider.py
deleted file mode 100644
index 4dec4b0..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_tts_provider.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestTtsProvider = typing.Union[
- typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
-]
diff --git a/src/gooey/types/video_bots_page_request_asr_model.py b/src/gooey/copilot/types/video_bots_page_request_asr_model.py
similarity index 100%
rename from src/gooey/types/video_bots_page_request_asr_model.py
rename to src/gooey/copilot/types/video_bots_page_request_asr_model.py
diff --git a/src/gooey/types/video_bots_page_request_citation_style.py b/src/gooey/copilot/types/video_bots_page_request_citation_style.py
similarity index 100%
rename from src/gooey/types/video_bots_page_request_citation_style.py
rename to src/gooey/copilot/types/video_bots_page_request_citation_style.py
diff --git a/src/gooey/types/video_bots_page_request_embedding_model.py b/src/gooey/copilot/types/video_bots_page_request_embedding_model.py
similarity index 100%
rename from src/gooey/types/video_bots_page_request_embedding_model.py
rename to src/gooey/copilot/types/video_bots_page_request_embedding_model.py
diff --git a/src/gooey/types/video_bots_page_request_lipsync_model.py b/src/gooey/copilot/types/video_bots_page_request_lipsync_model.py
similarity index 100%
rename from src/gooey/types/video_bots_page_request_lipsync_model.py
rename to src/gooey/copilot/types/video_bots_page_request_lipsync_model.py
diff --git a/src/gooey/types/video_bots_page_request_openai_tts_model.py b/src/gooey/copilot/types/video_bots_page_request_openai_tts_model.py
similarity index 100%
rename from src/gooey/types/video_bots_page_request_openai_tts_model.py
rename to src/gooey/copilot/types/video_bots_page_request_openai_tts_model.py
diff --git a/src/gooey/types/video_bots_page_request_openai_voice_name.py b/src/gooey/copilot/types/video_bots_page_request_openai_voice_name.py
similarity index 100%
rename from src/gooey/types/video_bots_page_request_openai_voice_name.py
rename to src/gooey/copilot/types/video_bots_page_request_openai_voice_name.py
diff --git a/src/gooey/types/video_bots_page_request_response_format_type.py b/src/gooey/copilot/types/video_bots_page_request_response_format_type.py
similarity index 100%
rename from src/gooey/types/video_bots_page_request_response_format_type.py
rename to src/gooey/copilot/types/video_bots_page_request_response_format_type.py
diff --git a/src/gooey/types/video_bots_page_request_translation_model.py b/src/gooey/copilot/types/video_bots_page_request_translation_model.py
similarity index 100%
rename from src/gooey/types/video_bots_page_request_translation_model.py
rename to src/gooey/copilot/types/video_bots_page_request_translation_model.py
diff --git a/src/gooey/types/video_bots_page_request_tts_provider.py b/src/gooey/copilot/types/video_bots_page_request_tts_provider.py
similarity index 100%
rename from src/gooey/types/video_bots_page_request_tts_provider.py
rename to src/gooey/copilot/types/video_bots_page_request_tts_provider.py
diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py
index d25497a..f34b80e 100644
--- a/src/gooey/core/client_wrapper.py
+++ b/src/gooey/core/client_wrapper.py
@@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "gooeyai",
- "X-Fern-SDK-Version": "0.0.1-beta18",
+ "X-Fern-SDK-Version": "0.0.1-beta19",
}
headers["Authorization"] = f"Bearer {self._get_api_key()}"
return headers
diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py
index 9087b38..ca0369d 100644
--- a/src/gooey/types/__init__.py
+++ b/src/gooey/types/__init__.py
@@ -9,7 +9,6 @@
from .asr_output_json import AsrOutputJson
from .asr_page_output import AsrPageOutput
from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem
-from .asr_page_request import AsrPageRequest
from .asr_page_request_output_format import AsrPageRequestOutputFormat
from .asr_page_request_selected_model import AsrPageRequestSelectedModel
from .asr_page_request_translation_model import AsrPageRequestTranslationModel
@@ -22,7 +21,6 @@
from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
from .bulk_eval_page_status_response import BulkEvalPageStatusResponse
from .bulk_runner_page_output import BulkRunnerPageOutput
-from .bulk_runner_page_request import BulkRunnerPageRequest
from .bulk_runner_page_status_response import BulkRunnerPageStatusResponse
from .button_pressed import ButtonPressed
from .called_function_response import CalledFunctionResponse
@@ -40,7 +38,6 @@
from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse
from .compare_upscaler_page_output import CompareUpscalerPageOutput
-from .compare_upscaler_page_request import CompareUpscalerPageRequest
from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse
from .console_logs import ConsoleLogs
@@ -69,7 +66,6 @@
from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
from .deforum_sd_page_status_response import DeforumSdPageStatusResponse
from .doc_extract_page_output import DocExtractPageOutput
-from .doc_extract_page_request import DocExtractPageRequest
from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
from .doc_extract_page_status_response import DocExtractPageStatusResponse
@@ -80,12 +76,9 @@
from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
from .doc_search_page_status_response import DocSearchPageStatusResponse
from .doc_summary_page_output import DocSummaryPageOutput
-from .doc_summary_page_request import DocSummaryPageRequest
from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
from .doc_summary_page_status_response import DocSummaryPageStatusResponse
-from .doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
-from .doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel
from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
@@ -94,7 +87,6 @@
from .embeddings_page_status_response import EmbeddingsPageStatusResponse
from .eval_prompt import EvalPrompt
from .face_inpainting_page_output import FaceInpaintingPageOutput
-from .face_inpainting_page_request import FaceInpaintingPageRequest
from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse
from .final_response import FinalResponse
@@ -111,13 +103,11 @@
from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse
from .http_validation_error import HttpValidationError
from .image_segmentation_page_output import ImageSegmentationPageOutput
-from .image_segmentation_page_request import ImageSegmentationPageRequest
from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .img2img_page_output import Img2ImgPageOutput
-from .img2img_page_request import Img2ImgPageRequest
from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem
from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
@@ -127,33 +117,22 @@
from .letter_writer_page_request import LetterWriterPageRequest
from .letter_writer_page_status_response import LetterWriterPageStatusResponse
from .lipsync_page_output import LipsyncPageOutput
-from .lipsync_page_request import LipsyncPageRequest
from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
from .lipsync_page_status_response import LipsyncPageStatusResponse
-from .lipsync_request_selected_model import LipsyncRequestSelectedModel
from .lipsync_tts_page_output import LipsyncTtsPageOutput
-from .lipsync_tts_page_request import LipsyncTtsPageRequest
from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse
-from .lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
-from .lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
-from .lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel
-from .lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
from .llm_tools import LlmTools
from .message_part import MessagePart
from .object_inpainting_page_output import ObjectInpaintingPageOutput
-from .object_inpainting_page_request import ObjectInpaintingPageRequest
from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse
-from .portrait_request_selected_model import PortraitRequestSelectedModel
-from .product_image_request_selected_model import ProductImageRequestSelectedModel
from .prompt_tree_node import PromptTreeNode
from .prompt_tree_node_prompt import PromptTreeNodePrompt
from .qr_code_generator_page_output import QrCodeGeneratorPageOutput
-from .qr_code_generator_page_request import QrCodeGeneratorPageRequest
from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
)
@@ -163,10 +142,6 @@
)
from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
from .qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse
-from .qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem
-from .qr_code_request_scheduler import QrCodeRequestScheduler
-from .qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem
-from .qr_code_request_selected_model import QrCodeRequestSelectedModel
from .recipe_function import RecipeFunction
from .recipe_function_trigger import RecipeFunctionTrigger
from .recipe_run_state import RecipeRunState
@@ -182,10 +157,6 @@
from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse
-from .remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel
-from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem
-from .remix_image_request_selected_model import RemixImageRequestSelectedModel
-from .remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel
from .reply_button import ReplyButton
from .response_model import ResponseModel
from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery
@@ -207,12 +178,7 @@
from .social_lookup_email_page_output import SocialLookupEmailPageOutput
from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
-from .speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
-from .speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel
-from .speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
from .stream_error import StreamError
-from .synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
-from .synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel
from .text2audio_page_output import Text2AudioPageOutput
from .text2audio_page_status_response import Text2AudioPageStatusResponse
from .text_to_speech_page_output import TextToSpeechPageOutput
@@ -221,32 +187,15 @@
from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse
from .training_data_model import TrainingDataModel
-from .translate_request_selected_model import TranslateRequestSelectedModel
from .translation_page_output import TranslationPageOutput
-from .translation_page_request import TranslationPageRequest
from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
from .translation_page_status_response import TranslationPageStatusResponse
-from .upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem
from .validation_error import ValidationError
from .validation_error_loc_item import ValidationErrorLocItem
from .vcard import Vcard
from .video_bots_page_output import VideoBotsPageOutput
from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery
from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt
-from .video_bots_page_request import VideoBotsPageRequest
-from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
-from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
-from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem
-from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger
-from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
-from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
-from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
-from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
-from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings
-from .video_bots_page_request_sadtalker_settings_preprocess import VideoBotsPageRequestSadtalkerSettingsPreprocess
-from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
from .video_bots_page_status_response import VideoBotsPageStatusResponse
__all__ = [
@@ -259,7 +208,6 @@
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
- "AsrPageRequest",
"AsrPageRequestOutputFormat",
"AsrPageRequestSelectedModel",
"AsrPageRequestTranslationModel",
@@ -272,7 +220,6 @@
"BulkEvalPageRequestResponseFormatType",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
- "BulkRunnerPageRequest",
"BulkRunnerPageStatusResponse",
"ButtonPressed",
"CalledFunctionResponse",
@@ -290,7 +237,6 @@
"CompareText2ImgPageRequestSelectedModelsItem",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
- "CompareUpscalerPageRequest",
"CompareUpscalerPageRequestSelectedModelsItem",
"CompareUpscalerPageStatusResponse",
"ConsoleLogs",
@@ -317,7 +263,6 @@
"DeforumSdPageRequestSelectedModel",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
- "DocExtractPageRequest",
"DocExtractPageRequestResponseFormatType",
"DocExtractPageRequestSelectedAsrModel",
"DocExtractPageStatusResponse",
@@ -328,12 +273,9 @@
"DocSearchPageRequestResponseFormatType",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
- "DocSummaryPageRequest",
"DocSummaryPageRequestResponseFormatType",
"DocSummaryPageRequestSelectedAsrModel",
"DocSummaryPageStatusResponse",
- "DocSummaryRequestResponseFormatType",
- "DocSummaryRequestSelectedAsrModel",
"EmailFaceInpaintingPageOutput",
"EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageStatusResponse",
@@ -342,7 +284,6 @@
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
- "FaceInpaintingPageRequest",
"FaceInpaintingPageRequestSelectedModel",
"FaceInpaintingPageStatusResponse",
"FinalResponse",
@@ -359,13 +300,11 @@
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
"ImageSegmentationPageOutput",
- "ImageSegmentationPageRequest",
"ImageSegmentationPageRequestSelectedModel",
"ImageSegmentationPageStatusResponse",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
- "Img2ImgPageRequest",
"Img2ImgPageRequestSelectedControlnetModel",
"Img2ImgPageRequestSelectedControlnetModelItem",
"Img2ImgPageRequestSelectedModel",
@@ -375,42 +314,27 @@
"LetterWriterPageRequest",
"LetterWriterPageStatusResponse",
"LipsyncPageOutput",
- "LipsyncPageRequest",
"LipsyncPageRequestSelectedModel",
"LipsyncPageStatusResponse",
- "LipsyncRequestSelectedModel",
"LipsyncTtsPageOutput",
- "LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
"LipsyncTtsPageRequestSelectedModel",
"LipsyncTtsPageRequestTtsProvider",
"LipsyncTtsPageStatusResponse",
- "LipsyncTtsRequestOpenaiTtsModel",
- "LipsyncTtsRequestOpenaiVoiceName",
- "LipsyncTtsRequestSelectedModel",
- "LipsyncTtsRequestTtsProvider",
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
- "ObjectInpaintingPageRequest",
"ObjectInpaintingPageRequestSelectedModel",
"ObjectInpaintingPageStatusResponse",
- "PortraitRequestSelectedModel",
- "ProductImageRequestSelectedModel",
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
- "QrCodeGeneratorPageRequest",
"QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
"QrCodeGeneratorPageRequestScheduler",
"QrCodeGeneratorPageRequestSelectedControlnetModelItem",
"QrCodeGeneratorPageRequestSelectedModel",
"QrCodeGeneratorPageStatusResponse",
- "QrCodeRequestImagePromptControlnetModelsItem",
- "QrCodeRequestScheduler",
- "QrCodeRequestSelectedControlnetModelItem",
- "QrCodeRequestSelectedModel",
"RecipeFunction",
"RecipeFunctionTrigger",
"RecipeRunState",
@@ -426,10 +350,6 @@
"RelatedQnAPageRequestEmbeddingModel",
"RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageStatusResponse",
- "RemixImageRequestSelectedControlnetModel",
- "RemixImageRequestSelectedControlnetModelItem",
- "RemixImageRequestSelectedModel",
- "RemoveBackgroundRequestSelectedModel",
"ReplyButton",
"ResponseModel",
"ResponseModelFinalKeywordQuery",
@@ -451,12 +371,7 @@
"SocialLookupEmailPageOutput",
"SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageStatusResponse",
- "SpeechRecognitionRequestOutputFormat",
- "SpeechRecognitionRequestSelectedModel",
- "SpeechRecognitionRequestTranslationModel",
"StreamError",
- "SynthesizeDataRequestResponseFormatType",
- "SynthesizeDataRequestSelectedAsrModel",
"Text2AudioPageOutput",
"Text2AudioPageStatusResponse",
"TextToSpeechPageOutput",
@@ -465,31 +380,14 @@
"TextToSpeechPageRequestTtsProvider",
"TextToSpeechPageStatusResponse",
"TrainingDataModel",
- "TranslateRequestSelectedModel",
"TranslationPageOutput",
- "TranslationPageRequest",
"TranslationPageRequestSelectedModel",
"TranslationPageStatusResponse",
- "UpscaleRequestSelectedModelsItem",
"ValidationError",
"ValidationErrorLocItem",
"Vcard",
"VideoBotsPageOutput",
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
- "VideoBotsPageRequest",
- "VideoBotsPageRequestAsrModel",
- "VideoBotsPageRequestCitationStyle",
- "VideoBotsPageRequestEmbeddingModel",
- "VideoBotsPageRequestFunctionsItem",
- "VideoBotsPageRequestFunctionsItemTrigger",
- "VideoBotsPageRequestLipsyncModel",
- "VideoBotsPageRequestOpenaiTtsModel",
- "VideoBotsPageRequestOpenaiVoiceName",
- "VideoBotsPageRequestResponseFormatType",
- "VideoBotsPageRequestSadtalkerSettings",
- "VideoBotsPageRequestSadtalkerSettingsPreprocess",
- "VideoBotsPageRequestTranslationModel",
- "VideoBotsPageRequestTtsProvider",
"VideoBotsPageStatusResponse",
]
diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py
deleted file mode 100644
index 1d35181..0000000
--- a/src/gooey/types/asr_page_request.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .asr_page_request_selected_model import AsrPageRequestSelectedModel
-from .asr_page_request_translation_model import AsrPageRequestTranslationModel
-from .asr_page_request_output_format import AsrPageRequestOutputFormat
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class AsrPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- documents: typing.List[str]
- selected_model: typing.Optional[AsrPageRequestSelectedModel] = None
- language: typing.Optional[str] = None
- translation_model: typing.Optional[AsrPageRequestTranslationModel] = None
- output_format: typing.Optional[AsrPageRequestOutputFormat] = None
- google_translate_target: typing.Optional[str] = pydantic.Field(default=None)
- """
- use `translation_model` & `translation_target` instead.
- """
-
- translation_source: typing.Optional[str] = None
- translation_target: typing.Optional[str] = None
- glossary_document: typing.Optional[str] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/bulk_runner_page_request.py b/src/gooey/types/bulk_runner_page_request.py
deleted file mode 100644
index a4129d9..0000000
--- a/src/gooey/types/bulk_runner_page_request.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class BulkRunnerPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- documents: typing.List[str] = pydantic.Field()
- """
- Upload or link to a CSV or google sheet that contains your sample input data.
- For example, for Copilot, this would sample questions or for Art QR Code, would would be pairs of image descriptions and URLs.
- Remember to includes header names in your CSV too.
- """
-
- run_urls: typing.List[str] = pydantic.Field()
- """
- Provide one or more Gooey.AI workflow runs.
- You can add multiple runs from the same recipe (e.g. two versions of your copilot) and we'll run the inputs over both of them.
- """
-
- input_columns: typing.Dict[str, str] = pydantic.Field()
- """
- For each input field in the Gooey.AI workflow, specify the column in your input data that corresponds to it.
- """
-
- output_columns: typing.Dict[str, str] = pydantic.Field()
- """
- For each output field in the Gooey.AI workflow, specify the column name that you'd like to use for it in the output data.
- """
-
- eval_urls: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- _(optional)_ Add one or more Gooey.AI Evaluator Workflows to evaluate the results of your runs.
- """
-
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py
deleted file mode 100644
index 8cfb4e7..0000000
--- a/src/gooey/types/compare_upscaler_page_request.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class CompareUpscalerPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: typing.Optional[str] = None
- input_video: typing.Optional[str] = None
- scale: int = pydantic.Field()
- """
- The final upsampling scale of the image
- """
-
- selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None
- selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py
deleted file mode 100644
index 9690c6c..0000000
--- a/src/gooey/types/doc_extract_page_request.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
-from .large_language_models import LargeLanguageModels
-from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class DocExtractPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- documents: typing.List[str]
- sheet_url: typing.Optional[str] = None
- selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None
- google_translate_target: typing.Optional[str] = None
- glossary_document: typing.Optional[str] = None
- task_instructions: typing.Optional[str] = None
- selected_model: typing.Optional[LargeLanguageModels] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py
deleted file mode 100644
index 466ddc1..0000000
--- a/src/gooey/types/doc_summary_page_request.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .large_language_models import LargeLanguageModels
-from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
-from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class DocSummaryPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- documents: typing.List[str]
- task_instructions: typing.Optional[str] = None
- merge_instructions: typing.Optional[str] = None
- selected_model: typing.Optional[LargeLanguageModels] = None
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = None
- selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None
- google_translate_target: typing.Optional[str] = None
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/doc_summary_request_response_format_type.py b/src/gooey/types/doc_summary_request_response_format_type.py
deleted file mode 100644
index 8fabf9b..0000000
--- a/src/gooey/types/doc_summary_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSummaryRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_summary_request_selected_asr_model.py b/src/gooey/types/doc_summary_request_selected_asr_model.py
deleted file mode 100644
index 8b8a338..0000000
--- a/src/gooey/types/doc_summary_request_selected_asr_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSummaryRequestSelectedAsrModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py
deleted file mode 100644
index a653205..0000000
--- a/src/gooey/types/face_inpainting_page_request.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class FaceInpaintingPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: str
- text_prompt: str
- face_scale: typing.Optional[float] = None
- face_pos_x: typing.Optional[float] = None
- face_pos_y: typing.Optional[float] = None
- selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None
- negative_prompt: typing.Optional[str] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- upscale_factor: typing.Optional[float] = None
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- seed: typing.Optional[int] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py
deleted file mode 100644
index a2ea60d..0000000
--- a/src/gooey/types/image_segmentation_page_request.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class ImageSegmentationPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: str
- selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None
- mask_threshold: typing.Optional[float] = None
- rect_persepective_transform: typing.Optional[bool] = None
- reflection_opacity: typing.Optional[float] = None
- obj_scale: typing.Optional[float] = None
- obj_pos_x: typing.Optional[float] = None
- obj_pos_y: typing.Optional[float] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py
deleted file mode 100644
index f3cfd2f..0000000
--- a/src/gooey/types/img2img_page_request.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
-from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class Img2ImgPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: str
- text_prompt: typing.Optional[str] = None
- selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None
- selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None
- negative_prompt: typing.Optional[str] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- prompt_strength: typing.Optional[float] = None
- controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
- seed: typing.Optional[int] = None
- image_guidance_scale: typing.Optional[float] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py
deleted file mode 100644
index 2914a1e..0000000
--- a/src/gooey/types/lipsync_page_request.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .sad_talker_settings import SadTalkerSettings
-from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class LipsyncPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_face: typing.Optional[str] = None
- face_padding_top: typing.Optional[int] = None
- face_padding_bottom: typing.Optional[int] = None
- face_padding_left: typing.Optional[int] = None
- face_padding_right: typing.Optional[int] = None
- sadtalker_settings: typing.Optional[SadTalkerSettings] = None
- selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None
- input_audio: typing.Optional[str] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_request_selected_model.py b/src/gooey/types/lipsync_request_selected_model.py
deleted file mode 100644
index c5614b4..0000000
--- a/src/gooey/types/lipsync_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py
deleted file mode 100644
index f4f5293..0000000
--- a/src/gooey/types/lipsync_tts_page_request.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
-from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
-from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
-from .sad_talker_settings import SadTalkerSettings
-from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class LipsyncTtsPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- text_prompt: str
- tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None
- uberduck_voice_name: typing.Optional[str] = None
- uberduck_speaking_rate: typing.Optional[float] = None
- google_voice_name: typing.Optional[str] = None
- google_speaking_rate: typing.Optional[float] = None
- google_pitch: typing.Optional[float] = None
- bark_history_prompt: typing.Optional[str] = None
- elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
- """
- Use `elevenlabs_voice_id` instead
- """
-
- elevenlabs_api_key: typing.Optional[str] = None
- elevenlabs_voice_id: typing.Optional[str] = None
- elevenlabs_model: typing.Optional[str] = None
- elevenlabs_stability: typing.Optional[float] = None
- elevenlabs_similarity_boost: typing.Optional[float] = None
- elevenlabs_style: typing.Optional[float] = None
- elevenlabs_speaker_boost: typing.Optional[bool] = None
- azure_voice_name: typing.Optional[str] = None
- openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = None
- openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = None
- input_face: typing.Optional[str] = None
- face_padding_top: typing.Optional[int] = None
- face_padding_bottom: typing.Optional[int] = None
- face_padding_left: typing.Optional[int] = None
- face_padding_right: typing.Optional[int] = None
- sadtalker_settings: typing.Optional[SadTalkerSettings] = None
- selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/lipsync_tts_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_request_openai_tts_model.py
deleted file mode 100644
index 510dcfb..0000000
--- a/src/gooey/types/lipsync_tts_request_openai_tts_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncTtsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_request_openai_voice_name.py b/src/gooey/types/lipsync_tts_request_openai_voice_name.py
deleted file mode 100644
index 7ea601b..0000000
--- a/src/gooey/types/lipsync_tts_request_openai_voice_name.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncTtsRequestOpenaiVoiceName = typing.Union[
- typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any
-]
diff --git a/src/gooey/types/lipsync_tts_request_selected_model.py b/src/gooey/types/lipsync_tts_request_selected_model.py
deleted file mode 100644
index 9ece5a9..0000000
--- a/src/gooey/types/lipsync_tts_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncTtsRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_request_tts_provider.py b/src/gooey/types/lipsync_tts_request_tts_provider.py
deleted file mode 100644
index 1a23fe3..0000000
--- a/src/gooey/types/lipsync_tts_request_tts_provider.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncTtsRequestTtsProvider = typing.Union[
- typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
-]
diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py
deleted file mode 100644
index 50b5b72..0000000
--- a/src/gooey/types/object_inpainting_page_request.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class ObjectInpaintingPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_image: str
- text_prompt: str
- obj_scale: typing.Optional[float] = None
- obj_pos_x: typing.Optional[float] = None
- obj_pos_y: typing.Optional[float] = None
- mask_threshold: typing.Optional[float] = None
- selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None
- negative_prompt: typing.Optional[str] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- sd2upscaling: typing.Optional[bool] = pydantic.Field(alias="sd_2_upscaling", default=None)
- seed: typing.Optional[int] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/portrait_request_selected_model.py b/src/gooey/types/portrait_request_selected_model.py
deleted file mode 100644
index 6c4a5ce..0000000
--- a/src/gooey/types/portrait_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-PortraitRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any]
diff --git a/src/gooey/types/product_image_request_selected_model.py b/src/gooey/types/product_image_request_selected_model.py
deleted file mode 100644
index f1ce039..0000000
--- a/src/gooey/types/product_image_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ProductImageRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any]
diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py
deleted file mode 100644
index 68f3730..0000000
--- a/src/gooey/types/qr_code_generator_page_request.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .vcard import Vcard
-from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
- QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
-)
-from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
-from .qr_code_generator_page_request_selected_controlnet_model_item import (
- QrCodeGeneratorPageRequestSelectedControlnetModelItem,
-)
-from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class QrCodeGeneratorPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- qr_code_data: typing.Optional[str] = None
- qr_code_input_image: typing.Optional[str] = None
- qr_code_vcard: typing.Optional[Vcard] = None
- qr_code_file: typing.Optional[str] = None
- use_url_shortener: typing.Optional[bool] = None
- text_prompt: str
- negative_prompt: typing.Optional[str] = None
- image_prompt: typing.Optional[str] = None
- image_prompt_controlnet_models: typing.Optional[
- typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
- ] = None
- image_prompt_strength: typing.Optional[float] = None
- image_prompt_scale: typing.Optional[float] = None
- image_prompt_pos_x: typing.Optional[float] = None
- image_prompt_pos_y: typing.Optional[float] = None
- selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None
- selected_controlnet_model: typing.Optional[typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] = (
- None
- )
- output_width: typing.Optional[int] = None
- output_height: typing.Optional[int] = None
- guidance_scale: typing.Optional[float] = None
- controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[int] = None
- scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = None
- seed: typing.Optional[int] = None
- obj_scale: typing.Optional[float] = None
- obj_pos_x: typing.Optional[float] = None
- obj_pos_y: typing.Optional[float] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
deleted file mode 100644
index 3be2ab6..0000000
--- a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-QrCodeRequestImagePromptControlnetModelsItem = typing.Union[
- typing.Literal[
- "sd_controlnet_canny",
- "sd_controlnet_depth",
- "sd_controlnet_hed",
- "sd_controlnet_mlsd",
- "sd_controlnet_normal",
- "sd_controlnet_openpose",
- "sd_controlnet_scribble",
- "sd_controlnet_seg",
- "sd_controlnet_tile",
- "sd_controlnet_brightness",
- "control_v1p_sd15_qrcode_monster_v2",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/qr_code_request_scheduler.py b/src/gooey/types/qr_code_request_scheduler.py
deleted file mode 100644
index 890b204..0000000
--- a/src/gooey/types/qr_code_request_scheduler.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-QrCodeRequestScheduler = typing.Union[
- typing.Literal[
- "singlestep_dpm_solver",
- "multistep_dpm_solver",
- "dpm_sde",
- "dpm_discrete",
- "dpm_discrete_ancestral",
- "unipc",
- "lms_discrete",
- "heun",
- "euler",
- "euler_ancestral",
- "pndm",
- "ddpm",
- "ddim",
- "deis",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_request_selected_controlnet_model_item.py
deleted file mode 100644
index c5cdc8d..0000000
--- a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-QrCodeRequestSelectedControlnetModelItem = typing.Union[
- typing.Literal[
- "sd_controlnet_canny",
- "sd_controlnet_depth",
- "sd_controlnet_hed",
- "sd_controlnet_mlsd",
- "sd_controlnet_normal",
- "sd_controlnet_openpose",
- "sd_controlnet_scribble",
- "sd_controlnet_seg",
- "sd_controlnet_tile",
- "sd_controlnet_brightness",
- "control_v1p_sd15_qrcode_monster_v2",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/qr_code_request_selected_model.py b/src/gooey/types/qr_code_request_selected_model.py
deleted file mode 100644
index 7ea963c..0000000
--- a/src/gooey/types/qr_code_request_selected_model.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-QrCodeRequestSelectedModel = typing.Union[
- typing.Literal[
- "dream_shaper",
- "dreamlike_2",
- "sd_2",
- "sd_1_5",
- "dall_e",
- "dall_e_3",
- "openjourney_2",
- "openjourney",
- "analog_diffusion",
- "protogen_5_3",
- "jack_qiao",
- "rodent_diffusion_1_5",
- "deepfloyd_if",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/recipe_function.py b/src/gooey/types/recipe_function.py
index 08bea99..ed79772 100644
--- a/src/gooey/types/recipe_function.py
+++ b/src/gooey/types/recipe_function.py
@@ -1,14 +1,18 @@
# This file was auto-generated by Fern from our API Definition.
from ..core.pydantic_utilities import UniversalBaseModel
-from .recipe_function_trigger import RecipeFunctionTrigger
import pydantic
+from .recipe_function_trigger import RecipeFunctionTrigger
from ..core.pydantic_utilities import IS_PYDANTIC_V2
import typing
class RecipeFunction(UniversalBaseModel):
- url: str
+ url: str = pydantic.Field()
+ """
+ The URL of the [function](https://gooey.ai/functions) to call.
+ """
+
trigger: RecipeFunctionTrigger = pydantic.Field()
"""
When to run this function. `pre` runs before the recipe, `post` runs after the recipe.
diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model.py b/src/gooey/types/remix_image_request_selected_controlnet_model.py
deleted file mode 100644
index eea207f..0000000
--- a/src/gooey/types/remix_image_request_selected_controlnet_model.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem
-
-RemixImageRequestSelectedControlnetModel = typing.Union[
- typing.List[RemixImageRequestSelectedControlnetModelItem],
- typing.Literal["sd_controlnet_canny"],
- typing.Literal["sd_controlnet_depth"],
- typing.Literal["sd_controlnet_hed"],
- typing.Literal["sd_controlnet_mlsd"],
- typing.Literal["sd_controlnet_normal"],
- typing.Literal["sd_controlnet_openpose"],
- typing.Literal["sd_controlnet_scribble"],
- typing.Literal["sd_controlnet_seg"],
- typing.Literal["sd_controlnet_tile"],
- typing.Literal["sd_controlnet_brightness"],
- typing.Literal["control_v1p_sd15_qrcode_monster_v2"],
-]
diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py
deleted file mode 100644
index b4f3ff0..0000000
--- a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RemixImageRequestSelectedControlnetModelItem = typing.Union[
- typing.Literal[
- "sd_controlnet_canny",
- "sd_controlnet_depth",
- "sd_controlnet_hed",
- "sd_controlnet_mlsd",
- "sd_controlnet_normal",
- "sd_controlnet_openpose",
- "sd_controlnet_scribble",
- "sd_controlnet_seg",
- "sd_controlnet_tile",
- "sd_controlnet_brightness",
- "control_v1p_sd15_qrcode_monster_v2",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/remix_image_request_selected_model.py b/src/gooey/types/remix_image_request_selected_model.py
deleted file mode 100644
index 245d6b0..0000000
--- a/src/gooey/types/remix_image_request_selected_model.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RemixImageRequestSelectedModel = typing.Union[
- typing.Literal[
- "dream_shaper",
- "dreamlike_2",
- "sd_2",
- "sd_1_5",
- "dall_e",
- "instruct_pix2pix",
- "openjourney_2",
- "openjourney",
- "analog_diffusion",
- "protogen_5_3",
- "jack_qiao",
- "rodent_diffusion_1_5",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/remove_background_request_selected_model.py b/src/gooey/types/remove_background_request_selected_model.py
deleted file mode 100644
index c84f0e7..0000000
--- a/src/gooey/types/remove_background_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RemoveBackgroundRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any]
diff --git a/src/gooey/types/sad_talker_settings.py b/src/gooey/types/sad_talker_settings.py
index c9200b4..85464e7 100644
--- a/src/gooey/types/sad_talker_settings.py
+++ b/src/gooey/types/sad_talker_settings.py
@@ -24,8 +24,16 @@ class SadTalkerSettings(UniversalBaseModel):
Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot.
"""
- ref_eyeblink: typing.Optional[str] = None
- ref_pose: typing.Optional[str] = None
+ ref_eyeblink: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Optional reference video for eyeblinks to make the eyebrow movement more natural.
+ """
+
+ ref_pose: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ Optional reference video to pose the head.
+ """
+
input_yaw: typing.Optional[typing.List[int]] = None
input_pitch: typing.Optional[typing.List[int]] = None
input_roll: typing.Optional[typing.List[int]] = None
diff --git a/src/gooey/types/speech_recognition_request_output_format.py b/src/gooey/types/speech_recognition_request_output_format.py
deleted file mode 100644
index 4d2cf2b..0000000
--- a/src/gooey/types/speech_recognition_request_output_format.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SpeechRecognitionRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any]
diff --git a/src/gooey/types/speech_recognition_request_selected_model.py b/src/gooey/types/speech_recognition_request_selected_model.py
deleted file mode 100644
index 9d2d28f..0000000
--- a/src/gooey/types/speech_recognition_request_selected_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SpeechRecognitionRequestSelectedModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/speech_recognition_request_translation_model.py b/src/gooey/types/speech_recognition_request_translation_model.py
deleted file mode 100644
index 886ab92..0000000
--- a/src/gooey/types/speech_recognition_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SpeechRecognitionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/synthesize_data_request_response_format_type.py b/src/gooey/types/synthesize_data_request_response_format_type.py
deleted file mode 100644
index 3ab37a9..0000000
--- a/src/gooey/types/synthesize_data_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SynthesizeDataRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/synthesize_data_request_selected_asr_model.py b/src/gooey/types/synthesize_data_request_selected_asr_model.py
deleted file mode 100644
index 6c1bc21..0000000
--- a/src/gooey/types/synthesize_data_request_selected_asr_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SynthesizeDataRequestSelectedAsrModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/translate_request_selected_model.py b/src/gooey/types/translate_request_selected_model.py
deleted file mode 100644
index b774b56..0000000
--- a/src/gooey/types/translate_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-TranslateRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py
deleted file mode 100644
index 9c033a6..0000000
--- a/src/gooey/types/translation_page_request.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .recipe_function import RecipeFunction
-import pydantic
-from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class TranslationPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[RecipeFunction]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- texts: typing.Optional[typing.List[str]] = None
- selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None
- translation_source: typing.Optional[str] = None
- translation_target: typing.Optional[str] = None
- glossary_document: typing.Optional[str] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/upscale_request_selected_models_item.py b/src/gooey/types/upscale_request_selected_models_item.py
deleted file mode 100644
index 1a8362e..0000000
--- a/src/gooey/types/upscale_request_selected_models_item.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-UpscaleRequestSelectedModelsItem = typing.Union[
- typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any
-]
diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py
deleted file mode 100644
index 6fb8b5e..0000000
--- a/src/gooey/types/video_bots_page_request.py
+++ /dev/null
@@ -1,131 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem
-import pydantic
-from .conversation_entry import ConversationEntry
-from .large_language_models import LargeLanguageModels
-from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
-from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
-from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
-from .llm_tools import LlmTools
-from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
-from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
-from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
-from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
-from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings
-from .run_settings import RunSettings
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class VideoBotsPageRequest(UniversalBaseModel):
- functions: typing.Optional[typing.List[VideoBotsPageRequestFunctionsItem]] = None
- variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Variables to be used as Jinja prompt templates and in functions as arguments
- """
-
- input_prompt: typing.Optional[str] = None
- input_audio: typing.Optional[str] = None
- input_images: typing.Optional[typing.List[str]] = None
- input_documents: typing.Optional[typing.List[str]] = None
- doc_extract_url: typing.Optional[str] = pydantic.Field(default=None)
- """
- Select a workflow to extract text from documents and images.
- """
-
- messages: typing.Optional[typing.List[ConversationEntry]] = None
- bot_script: typing.Optional[str] = None
- selected_model: typing.Optional[LargeLanguageModels] = None
- document_model: typing.Optional[str] = pydantic.Field(default=None)
- """
- When your copilot users upload a photo or pdf, what kind of document are they mostly likely to upload? (via [Azure](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/how-to-guides/use-sdk-rest-api?view=doc-intel-3.1.0&tabs=linux&pivots=programming-language-rest-api))
- """
-
- task_instructions: typing.Optional[str] = None
- query_instructions: typing.Optional[str] = None
- keyword_instructions: typing.Optional[str] = None
- documents: typing.Optional[typing.List[str]] = None
- max_references: typing.Optional[int] = None
- max_context_words: typing.Optional[int] = None
- scroll_jump: typing.Optional[int] = None
- embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None
- dense_weight: typing.Optional[float] = pydantic.Field(default=None)
- """
- Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
- Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- """
-
- citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None
- use_url_shortener: typing.Optional[bool] = None
- asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic.Field(default=None)
- """
- Choose a model to transcribe incoming audio messages to text.
- """
-
- asr_language: typing.Optional[str] = pydantic.Field(default=None)
- """
- Choose a language to transcribe incoming audio messages to text.
- """
-
- translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None
- user_language: typing.Optional[str] = pydantic.Field(default=None)
- """
- Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
- """
-
- input_glossary_document: typing.Optional[str] = None
- output_glossary_document: typing.Optional[str] = None
- lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None
- tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None)
- """
- Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
- """
-
- avoid_repetition: typing.Optional[bool] = None
- num_outputs: typing.Optional[int] = None
- quality: typing.Optional[float] = None
- max_tokens: typing.Optional[int] = None
- sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = None
- tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None
- uberduck_voice_name: typing.Optional[str] = None
- uberduck_speaking_rate: typing.Optional[float] = None
- google_voice_name: typing.Optional[str] = None
- google_speaking_rate: typing.Optional[float] = None
- google_pitch: typing.Optional[float] = None
- bark_history_prompt: typing.Optional[str] = None
- elevenlabs_voice_name: typing.Optional[str] = pydantic.Field(default=None)
- """
- Use `elevenlabs_voice_id` instead
- """
-
- elevenlabs_api_key: typing.Optional[str] = None
- elevenlabs_voice_id: typing.Optional[str] = None
- elevenlabs_model: typing.Optional[str] = None
- elevenlabs_stability: typing.Optional[float] = None
- elevenlabs_similarity_boost: typing.Optional[float] = None
- elevenlabs_style: typing.Optional[float] = None
- elevenlabs_speaker_boost: typing.Optional[bool] = None
- azure_voice_name: typing.Optional[str] = None
- openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = None
- openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = None
- input_face: typing.Optional[str] = None
- face_padding_top: typing.Optional[int] = None
- face_padding_bottom: typing.Optional[int] = None
- face_padding_left: typing.Optional[int] = None
- face_padding_right: typing.Optional[int] = None
- sadtalker_settings: typing.Optional[VideoBotsPageRequestSadtalkerSettings] = None
- settings: typing.Optional[RunSettings] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/video_bots_page_request_functions_item.py b/src/gooey/types/video_bots_page_request_functions_item.py
deleted file mode 100644
index 5803c05..0000000
--- a/src/gooey/types/video_bots_page_request_functions_item.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger
-import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import typing
-
-
-class VideoBotsPageRequestFunctionsItem(UniversalBaseModel):
- url: str
- trigger: VideoBotsPageRequestFunctionsItemTrigger = pydantic.Field()
- """
- When to run this function. `pre` runs before the recipe, `post` runs after the recipe.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/video_bots_page_request_functions_item_trigger.py b/src/gooey/types/video_bots_page_request_functions_item_trigger.py
deleted file mode 100644
index b3c2078..0000000
--- a/src/gooey/types/video_bots_page_request_functions_item_trigger.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_sadtalker_settings.py b/src/gooey/types/video_bots_page_request_sadtalker_settings.py
deleted file mode 100644
index 6749388..0000000
--- a/src/gooey/types/video_bots_page_request_sadtalker_settings.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.pydantic_utilities import UniversalBaseModel
-import typing
-from .video_bots_page_request_sadtalker_settings_preprocess import VideoBotsPageRequestSadtalkerSettingsPreprocess
-import pydantic
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class VideoBotsPageRequestSadtalkerSettings(UniversalBaseModel):
- still: typing.Optional[bool] = None
- preprocess: typing.Optional[VideoBotsPageRequestSadtalkerSettingsPreprocess] = pydantic.Field(default=None)
- """
- SadTalker only generates 512x512 output. 'crop' handles this by cropping the input to 512x512. 'resize' scales down the input to fit 512x512 and scales it back up after lipsyncing (does not work well for full person images, better for portraits). 'full' processes the cropped region and pastes it back into the original input. 'extcrop' and 'extfull' are similar to 'crop' and 'full' but with extended cropping.
- """
-
- pose_style: typing.Optional[int] = pydantic.Field(default=None)
- """
- Random seed 0-45 inclusive that affects how the pose is animated.
- """
-
- expression_scale: typing.Optional[float] = pydantic.Field(default=None)
- """
- Scale the amount of expression motion. 1.0 is normal, 0.5 is very reduced, and 2.0 is quite a lot.
- """
-
- ref_eyeblink: typing.Optional[str] = None
- ref_pose: typing.Optional[str] = None
- input_yaw: typing.Optional[typing.List[int]] = None
- input_pitch: typing.Optional[typing.List[int]] = None
- input_roll: typing.Optional[typing.List[int]] = None
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py b/src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py
deleted file mode 100644
index 4a625ac..0000000
--- a/src/gooey/types/video_bots_page_request_sadtalker_settings_preprocess.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestSadtalkerSettingsPreprocess = typing.Union[
- typing.Literal["crop", "extcrop", "resize", "full", "extfull"], typing.Any
-]