diff --git a/pyproject.toml b/pyproject.toml
index c75a245..318af9f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "gooeyai"
-version = "0.0.1-beta24"
+version = "0.0.1-beta25"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 0266830..ad2abad 100644
--- a/reference.md
+++ b/reference.md
@@ -308,7 +308,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**image_prompt_controlnet_models:** `typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]`
+**image_prompt_controlnet_models:** `typing.Optional[typing.List[ControlNetModels]]`
@@ -348,7 +348,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[QrCodeRequestSelectedModel]`
+**selected_model:** `typing.Optional[TextToImageModels]`
@@ -356,7 +356,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_controlnet_model:** `typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]`
+**selected_controlnet_model:** `typing.Optional[typing.List[ControlNetModels]]`
@@ -412,7 +412,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**scheduler:** `typing.Optional[QrCodeRequestScheduler]`
+**scheduler:** `typing.Optional[Schedulers]`
@@ -666,7 +666,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**response_format_type:** `typing.Optional[RelatedQnAPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -893,7 +893,7 @@ client.seo_content(
-
-**response_format_type:** `typing.Optional[SeoSummaryPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -1147,7 +1147,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**response_format_type:** `typing.Optional[GoogleGptPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -1331,7 +1331,7 @@ client.personalize_email(
-
-**response_format_type:** `typing.Optional[SocialLookupEmailPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -1647,7 +1647,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-
-**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -1835,7 +1835,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**response_format_type:** `typing.Optional[SynthesizeDataRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -1977,7 +1977,7 @@ client.llm()
-
-**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -2166,7 +2166,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]`
+**citation_style:** `typing.Optional[CitationStyles]`
@@ -2214,7 +2214,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**response_format_type:** `typing.Optional[DocSearchPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -2382,7 +2382,7 @@ client.smart_gpt(
-
-**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -2502,7 +2502,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]`
+**chain_type:** `typing.Optional[CombineDocumentsChains]`
@@ -2566,7 +2566,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -2886,7 +2886,7 @@ client.lipsync_tts(
-
-**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]`
+**tts_provider:** `typing.Optional[TextToSpeechProviders]`
@@ -3176,7 +3176,7 @@ client.text_to_speech(
-
-**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]`
+**tts_provider:** `typing.Optional[TextToSpeechProviders]`
@@ -3424,7 +3424,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]`
+**translation_model:** `typing.Optional[TranslationModels]`
@@ -3432,7 +3432,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]`
+**output_format:** `typing.Optional[AsrOutputFormat]`
@@ -3618,7 +3618,7 @@ client.text_to_music(
-
-**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]`
+**selected_models:** `typing.Optional[typing.Sequence[Text2AudioModels]]`
@@ -3712,7 +3712,7 @@ client.translate()
-
-**selected_model:** `typing.Optional[TranslateRequestSelectedModel]`
+**selected_model:** `typing.Optional[TranslationModels]`
@@ -3842,7 +3842,7 @@ core.File` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]`
+**selected_model:** `typing.Optional[ImageToImageModels]`
@@ -4106,7 +4106,7 @@ client.text_to_image(
-
-**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]`
+**selected_models:** `typing.Optional[typing.Sequence[TextToImageModels]]`
@@ -4114,7 +4114,7 @@ client.text_to_image(
-
-**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]`
+**scheduler:** `typing.Optional[Schedulers]`
@@ -4268,7 +4268,7 @@ core.File` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]`
+**selected_model:** `typing.Optional[InpaintingModels]`
@@ -4462,7 +4462,7 @@ core.File` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[PortraitRequestSelectedModel]`
+**selected_model:** `typing.Optional[InpaintingModels]`
@@ -4663,7 +4663,7 @@ client.image_from_email(
-
-**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[InpaintingModels]`
@@ -4912,7 +4912,7 @@ client.image_from_web_search(
-
-**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[ImageToImageModels]`
@@ -5072,7 +5072,7 @@ core.File` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]`
+**selected_model:** `typing.Optional[ImageSegmentationModels]`
@@ -5236,7 +5236,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]`
+**selected_models:** `typing.Optional[typing.List[UpscalerModels]]`
@@ -5529,7 +5529,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]`
+**citation_style:** `typing.Optional[CitationStyles]`
@@ -5577,7 +5577,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -5897,7 +5897,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**citation_style:** `typing.Optional[CopilotCompletionRequestCitationStyle]`
+**citation_style:** `typing.Optional[CitationStyles]`
@@ -5929,7 +5929,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**translation_model:** `typing.Optional[CopilotCompletionRequestTranslationModel]`
+**translation_model:** `typing.Optional[TranslationModels]`
@@ -6021,7 +6021,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**response_format_type:** `typing.Optional[CopilotCompletionRequestResponseFormatType]`
+**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -6029,7 +6029,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**tts_provider:** `typing.Optional[CopilotCompletionRequestTtsProvider]`
+**tts_provider:** `typing.Optional[TextToSpeechProviders]`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py
index 5b2ed59..b9e8980 100644
--- a/src/gooey/__init__.py
+++ b/src/gooey/__init__.py
@@ -9,19 +9,17 @@
AnimationPrompt,
AsrChunk,
AsrModels,
+ AsrOutputFormat,
AsrOutputJson,
AsrPageOutput,
AsrPageOutputOutputTextItem,
AsrPageRequest,
- AsrPageRequestOutputFormat,
- AsrPageRequestTranslationModel,
AsrPageStatusResponse,
AsyncApiResponseModelV3,
BalanceResponse,
BotBroadcastFilters,
BotBroadcastRequestModel,
BulkEvalPageOutput,
- BulkEvalPageRequestResponseFormatType,
BulkEvalPageStatusResponse,
BulkRunnerPageOutput,
BulkRunnerPageRequest,
@@ -34,19 +32,18 @@
ChyronPlantPageOutput,
ChyronPlantPageRequest,
ChyronPlantPageStatusResponse,
+ CitationStyles,
+ CombineDocumentsChains,
CompareLlmPageOutput,
- CompareLlmPageRequestResponseFormatType,
CompareLlmPageStatusResponse,
CompareText2ImgPageOutput,
- CompareText2ImgPageRequestScheduler,
- CompareText2ImgPageRequestSelectedModelsItem,
CompareText2ImgPageStatusResponse,
CompareUpscalerPageOutput,
CompareUpscalerPageRequest,
- CompareUpscalerPageRequestSelectedModelsItem,
CompareUpscalerPageStatusResponse,
ConsoleLogs,
ConsoleLogsLevel,
+ ControlNetModels,
ConversationEntry,
ConversationEntryContent,
ConversationEntryContentItem,
@@ -55,31 +52,21 @@
ConversationEntryRole,
ConversationStart,
CreateStreamRequest,
- CreateStreamRequestCitationStyle,
CreateStreamRequestOpenaiTtsModel,
CreateStreamRequestOpenaiVoiceName,
- CreateStreamRequestResponseFormatType,
- CreateStreamRequestTranslationModel,
- CreateStreamRequestTtsProvider,
CreateStreamResponse,
DeforumSdPageOutput,
DeforumSdPageStatusResponse,
DocExtractPageOutput,
DocExtractPageRequest,
- DocExtractPageRequestResponseFormatType,
DocExtractPageStatusResponse,
DocSearchPageOutput,
- DocSearchPageRequestCitationStyle,
DocSearchPageRequestKeywordQuery,
- DocSearchPageRequestResponseFormatType,
DocSearchPageStatusResponse,
DocSummaryPageOutput,
DocSummaryPageRequest,
- DocSummaryPageRequestResponseFormatType,
DocSummaryPageStatusResponse,
- DocSummaryRequestResponseFormatType,
EmailFaceInpaintingPageOutput,
- EmailFaceInpaintingPageRequestSelectedModel,
EmailFaceInpaintingPageStatusResponse,
EmbeddingModels,
EmbeddingsPageOutput,
@@ -87,7 +74,6 @@
EvalPrompt,
FaceInpaintingPageOutput,
FaceInpaintingPageRequest,
- FaceInpaintingPageRequestSelectedModel,
FaceInpaintingPageStatusResponse,
FinalResponse,
FunctionsPageOutput,
@@ -95,24 +81,22 @@
GenericErrorResponse,
GenericErrorResponseDetail,
GoogleGptPageOutput,
- GoogleGptPageRequestResponseFormatType,
GoogleGptPageStatusResponse,
GoogleImageGenPageOutput,
- GoogleImageGenPageRequestSelectedModel,
GoogleImageGenPageStatusResponse,
HttpValidationError,
+ ImageSegmentationModels,
ImageSegmentationPageOutput,
ImageSegmentationPageRequest,
- ImageSegmentationPageRequestSelectedModel,
ImageSegmentationPageStatusResponse,
+ ImageToImageModels,
ImageUrl,
ImageUrlDetail,
Img2ImgPageOutput,
Img2ImgPageRequest,
Img2ImgPageRequestSelectedControlnetModel,
- Img2ImgPageRequestSelectedControlnetModelItem,
- Img2ImgPageRequestSelectedModel,
Img2ImgPageStatusResponse,
+ InpaintingModels,
LargeLanguageModels,
LetterWriterPageOutput,
LetterWriterPageRequest,
@@ -125,50 +109,32 @@
LipsyncTtsPageRequest,
LipsyncTtsPageRequestOpenaiTtsModel,
LipsyncTtsPageRequestOpenaiVoiceName,
- LipsyncTtsPageRequestTtsProvider,
LipsyncTtsPageStatusResponse,
LipsyncTtsRequestOpenaiTtsModel,
LipsyncTtsRequestOpenaiVoiceName,
- LipsyncTtsRequestTtsProvider,
LlmTools,
MessagePart,
ObjectInpaintingPageOutput,
ObjectInpaintingPageRequest,
- ObjectInpaintingPageRequestSelectedModel,
ObjectInpaintingPageStatusResponse,
- PortraitRequestSelectedModel,
- ProductImageRequestSelectedModel,
PromptTreeNode,
PromptTreeNodePrompt,
QrCodeGeneratorPageOutput,
QrCodeGeneratorPageRequest,
- QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
- QrCodeGeneratorPageRequestScheduler,
- QrCodeGeneratorPageRequestSelectedControlnetModelItem,
- QrCodeGeneratorPageRequestSelectedModel,
QrCodeGeneratorPageStatusResponse,
- QrCodeRequestImagePromptControlnetModelsItem,
- QrCodeRequestScheduler,
- QrCodeRequestSelectedControlnetModelItem,
- QrCodeRequestSelectedModel,
RecipeFunction,
RecipeFunctionTrigger,
RecipeRunState,
RelatedDocSearchResponse,
RelatedGoogleGptResponse,
RelatedQnADocPageOutput,
- RelatedQnADocPageRequestCitationStyle,
RelatedQnADocPageRequestKeywordQuery,
- RelatedQnADocPageRequestResponseFormatType,
RelatedQnADocPageStatusResponse,
RelatedQnAPageOutput,
- RelatedQnAPageRequestResponseFormatType,
RelatedQnAPageStatusResponse,
RemixImageRequestSelectedControlnetModel,
- RemixImageRequestSelectedControlnetModelItem,
- RemixImageRequestSelectedModel,
- RemoveBackgroundRequestSelectedModel,
ReplyButton,
+ ResponseFormatType,
ResponseModel,
ResponseModelFinalKeywordQuery,
ResponseModelFinalPrompt,
@@ -177,36 +143,32 @@
RunStart,
SadTalkerSettings,
SadTalkerSettingsPreprocess,
+ Schedulers,
SearchReference,
SeoSummaryPageOutput,
- SeoSummaryPageRequestResponseFormatType,
SeoSummaryPageStatusResponse,
SerpSearchLocations,
SerpSearchType,
SmartGptPageOutput,
- SmartGptPageRequestResponseFormatType,
SmartGptPageStatusResponse,
SocialLookupEmailPageOutput,
- SocialLookupEmailPageRequestResponseFormatType,
SocialLookupEmailPageStatusResponse,
- SpeechRecognitionRequestOutputFormat,
- SpeechRecognitionRequestTranslationModel,
StreamError,
- SynthesizeDataRequestResponseFormatType,
+ Text2AudioModels,
Text2AudioPageOutput,
Text2AudioPageStatusResponse,
+ TextToImageModels,
TextToSpeechPageOutput,
TextToSpeechPageRequestOpenaiTtsModel,
TextToSpeechPageRequestOpenaiVoiceName,
- TextToSpeechPageRequestTtsProvider,
TextToSpeechPageStatusResponse,
+ TextToSpeechProviders,
TrainingDataModel,
- TranslateRequestSelectedModel,
+ TranslationModels,
TranslationPageOutput,
TranslationPageRequest,
- TranslationPageRequestSelectedModel,
TranslationPageStatusResponse,
- UpscaleRequestSelectedModelsItem,
+ UpscalerModels,
ValidationError,
ValidationErrorLocItem,
Vcard,
@@ -214,32 +176,24 @@
VideoBotsPageOutputFinalKeywordQuery,
VideoBotsPageOutputFinalPrompt,
VideoBotsPageRequest,
- VideoBotsPageRequestCitationStyle,
VideoBotsPageRequestFunctionsItem,
VideoBotsPageRequestFunctionsItemTrigger,
VideoBotsPageRequestOpenaiTtsModel,
VideoBotsPageRequestOpenaiVoiceName,
- VideoBotsPageRequestResponseFormatType,
VideoBotsPageRequestSadtalkerSettings,
VideoBotsPageRequestSadtalkerSettingsPreprocess,
- VideoBotsPageRequestTranslationModel,
- VideoBotsPageRequestTtsProvider,
VideoBotsPageStatusResponse,
)
from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError
from . import copilot
from .client import AsyncGooey, Gooey
from .copilot import (
- CopilotCompletionRequestCitationStyle,
CopilotCompletionRequestFunctionsItem,
CopilotCompletionRequestFunctionsItemTrigger,
CopilotCompletionRequestOpenaiTtsModel,
CopilotCompletionRequestOpenaiVoiceName,
- CopilotCompletionRequestResponseFormatType,
CopilotCompletionRequestSadtalkerSettings,
CopilotCompletionRequestSadtalkerSettingsPreprocess,
- CopilotCompletionRequestTranslationModel,
- CopilotCompletionRequestTtsProvider,
)
from .environment import GooeyEnvironment
from .version import __version__
@@ -253,12 +207,11 @@
"AnimationPrompt",
"AsrChunk",
"AsrModels",
+ "AsrOutputFormat",
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
"AsrPageRequest",
- "AsrPageRequestOutputFormat",
- "AsrPageRequestTranslationModel",
"AsrPageStatusResponse",
"AsyncApiResponseModelV3",
"AsyncGooey",
@@ -266,7 +219,6 @@
"BotBroadcastFilters",
"BotBroadcastRequestModel",
"BulkEvalPageOutput",
- "BulkEvalPageRequestResponseFormatType",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
"BulkRunnerPageRequest",
@@ -279,19 +231,18 @@
"ChyronPlantPageOutput",
"ChyronPlantPageRequest",
"ChyronPlantPageStatusResponse",
+ "CitationStyles",
+ "CombineDocumentsChains",
"CompareLlmPageOutput",
- "CompareLlmPageRequestResponseFormatType",
"CompareLlmPageStatusResponse",
"CompareText2ImgPageOutput",
- "CompareText2ImgPageRequestScheduler",
- "CompareText2ImgPageRequestSelectedModelsItem",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
"CompareUpscalerPageRequest",
- "CompareUpscalerPageRequestSelectedModelsItem",
"CompareUpscalerPageStatusResponse",
"ConsoleLogs",
"ConsoleLogsLevel",
+ "ControlNetModels",
"ConversationEntry",
"ConversationEntryContent",
"ConversationEntryContentItem",
@@ -299,42 +250,28 @@
"ConversationEntryContentItem_Text",
"ConversationEntryRole",
"ConversationStart",
- "CopilotCompletionRequestCitationStyle",
"CopilotCompletionRequestFunctionsItem",
"CopilotCompletionRequestFunctionsItemTrigger",
"CopilotCompletionRequestOpenaiTtsModel",
"CopilotCompletionRequestOpenaiVoiceName",
- "CopilotCompletionRequestResponseFormatType",
"CopilotCompletionRequestSadtalkerSettings",
"CopilotCompletionRequestSadtalkerSettingsPreprocess",
- "CopilotCompletionRequestTranslationModel",
- "CopilotCompletionRequestTtsProvider",
"CreateStreamRequest",
- "CreateStreamRequestCitationStyle",
"CreateStreamRequestOpenaiTtsModel",
"CreateStreamRequestOpenaiVoiceName",
- "CreateStreamRequestResponseFormatType",
- "CreateStreamRequestTranslationModel",
- "CreateStreamRequestTtsProvider",
"CreateStreamResponse",
"DeforumSdPageOutput",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
"DocExtractPageRequest",
- "DocExtractPageRequestResponseFormatType",
"DocExtractPageStatusResponse",
"DocSearchPageOutput",
- "DocSearchPageRequestCitationStyle",
"DocSearchPageRequestKeywordQuery",
- "DocSearchPageRequestResponseFormatType",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
"DocSummaryPageRequest",
- "DocSummaryPageRequestResponseFormatType",
"DocSummaryPageStatusResponse",
- "DocSummaryRequestResponseFormatType",
"EmailFaceInpaintingPageOutput",
- "EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageStatusResponse",
"EmbeddingModels",
"EmbeddingsPageOutput",
@@ -342,7 +279,6 @@
"EvalPrompt",
"FaceInpaintingPageOutput",
"FaceInpaintingPageRequest",
- "FaceInpaintingPageRequestSelectedModel",
"FaceInpaintingPageStatusResponse",
"FinalResponse",
"FunctionsPageOutput",
@@ -352,24 +288,22 @@
"Gooey",
"GooeyEnvironment",
"GoogleGptPageOutput",
- "GoogleGptPageRequestResponseFormatType",
"GoogleGptPageStatusResponse",
"GoogleImageGenPageOutput",
- "GoogleImageGenPageRequestSelectedModel",
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
+ "ImageSegmentationModels",
"ImageSegmentationPageOutput",
"ImageSegmentationPageRequest",
- "ImageSegmentationPageRequestSelectedModel",
"ImageSegmentationPageStatusResponse",
+ "ImageToImageModels",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
"Img2ImgPageRequest",
"Img2ImgPageRequestSelectedControlnetModel",
- "Img2ImgPageRequestSelectedControlnetModelItem",
- "Img2ImgPageRequestSelectedModel",
"Img2ImgPageStatusResponse",
+ "InpaintingModels",
"LargeLanguageModels",
"LetterWriterPageOutput",
"LetterWriterPageRequest",
@@ -382,51 +316,33 @@
"LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
- "LipsyncTtsPageRequestTtsProvider",
"LipsyncTtsPageStatusResponse",
"LipsyncTtsRequestOpenaiTtsModel",
"LipsyncTtsRequestOpenaiVoiceName",
- "LipsyncTtsRequestTtsProvider",
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
"ObjectInpaintingPageRequest",
- "ObjectInpaintingPageRequestSelectedModel",
"ObjectInpaintingPageStatusResponse",
"PaymentRequiredError",
- "PortraitRequestSelectedModel",
- "ProductImageRequestSelectedModel",
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
"QrCodeGeneratorPageRequest",
- "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
- "QrCodeGeneratorPageRequestScheduler",
- "QrCodeGeneratorPageRequestSelectedControlnetModelItem",
- "QrCodeGeneratorPageRequestSelectedModel",
"QrCodeGeneratorPageStatusResponse",
- "QrCodeRequestImagePromptControlnetModelsItem",
- "QrCodeRequestScheduler",
- "QrCodeRequestSelectedControlnetModelItem",
- "QrCodeRequestSelectedModel",
"RecipeFunction",
"RecipeFunctionTrigger",
"RecipeRunState",
"RelatedDocSearchResponse",
"RelatedGoogleGptResponse",
"RelatedQnADocPageOutput",
- "RelatedQnADocPageRequestCitationStyle",
"RelatedQnADocPageRequestKeywordQuery",
- "RelatedQnADocPageRequestResponseFormatType",
"RelatedQnADocPageStatusResponse",
"RelatedQnAPageOutput",
- "RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageStatusResponse",
"RemixImageRequestSelectedControlnetModel",
- "RemixImageRequestSelectedControlnetModelItem",
- "RemixImageRequestSelectedModel",
- "RemoveBackgroundRequestSelectedModel",
"ReplyButton",
+ "ResponseFormatType",
"ResponseModel",
"ResponseModelFinalKeywordQuery",
"ResponseModelFinalPrompt",
@@ -435,38 +351,34 @@
"RunStart",
"SadTalkerSettings",
"SadTalkerSettingsPreprocess",
+ "Schedulers",
"SearchReference",
"SeoSummaryPageOutput",
- "SeoSummaryPageRequestResponseFormatType",
"SeoSummaryPageStatusResponse",
"SerpSearchLocations",
"SerpSearchType",
"SmartGptPageOutput",
- "SmartGptPageRequestResponseFormatType",
"SmartGptPageStatusResponse",
"SocialLookupEmailPageOutput",
- "SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageStatusResponse",
- "SpeechRecognitionRequestOutputFormat",
- "SpeechRecognitionRequestTranslationModel",
"StreamError",
- "SynthesizeDataRequestResponseFormatType",
+ "Text2AudioModels",
"Text2AudioPageOutput",
"Text2AudioPageStatusResponse",
+ "TextToImageModels",
"TextToSpeechPageOutput",
"TextToSpeechPageRequestOpenaiTtsModel",
"TextToSpeechPageRequestOpenaiVoiceName",
- "TextToSpeechPageRequestTtsProvider",
"TextToSpeechPageStatusResponse",
+ "TextToSpeechProviders",
"TooManyRequestsError",
"TrainingDataModel",
- "TranslateRequestSelectedModel",
+ "TranslationModels",
"TranslationPageOutput",
"TranslationPageRequest",
- "TranslationPageRequestSelectedModel",
"TranslationPageStatusResponse",
"UnprocessableEntityError",
- "UpscaleRequestSelectedModelsItem",
+ "UpscalerModels",
"ValidationError",
"ValidationErrorLocItem",
"Vcard",
@@ -474,16 +386,12 @@
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
"VideoBotsPageRequest",
- "VideoBotsPageRequestCitationStyle",
"VideoBotsPageRequestFunctionsItem",
"VideoBotsPageRequestFunctionsItemTrigger",
"VideoBotsPageRequestOpenaiTtsModel",
"VideoBotsPageRequestOpenaiVoiceName",
- "VideoBotsPageRequestResponseFormatType",
"VideoBotsPageRequestSadtalkerSettings",
"VideoBotsPageRequestSadtalkerSettingsPreprocess",
- "VideoBotsPageRequestTranslationModel",
- "VideoBotsPageRequestTtsProvider",
"VideoBotsPageStatusResponse",
"__version__",
"copilot",
diff --git a/src/gooey/client.py b/src/gooey/client.py
index fc6fa26..10c5a84 100644
--- a/src/gooey/client.py
+++ b/src/gooey/client.py
@@ -22,81 +22,64 @@
from json.decoder import JSONDecodeError
from . import core
from .types.vcard import Vcard
-from .types.qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem
-from .types.qr_code_request_selected_model import QrCodeRequestSelectedModel
-from .types.qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem
-from .types.qr_code_request_scheduler import QrCodeRequestScheduler
+from .types.control_net_models import ControlNetModels
+from .types.text_to_image_models import TextToImageModels
+from .types.schedulers import Schedulers
from .types.qr_code_generator_page_output import QrCodeGeneratorPageOutput
from .types.large_language_models import LargeLanguageModels
from .types.embedding_models import EmbeddingModels
-from .types.related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
+from .types.response_format_type import ResponseFormatType
from .types.serp_search_locations import SerpSearchLocations
from .types.serp_search_type import SerpSearchType
from .types.related_qn_a_page_output import RelatedQnAPageOutput
-from .types.seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType
from .types.seo_summary_page_output import SeoSummaryPageOutput
-from .types.google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType
from .types.google_gpt_page_output import GoogleGptPageOutput
-from .types.social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
from .types.social_lookup_email_page_output import SocialLookupEmailPageOutput
from .types.bulk_runner_page_output import BulkRunnerPageOutput
from .types.eval_prompt import EvalPrompt
from .types.agg_function import AggFunction
-from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
from .types.bulk_eval_page_output import BulkEvalPageOutput
from .types.asr_models import AsrModels
-from .types.synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
from .types.doc_extract_page_output import DocExtractPageOutput
-from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
from .types.compare_llm_page_output import CompareLlmPageOutput
from .types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
-from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
-from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
+from .types.citation_styles import CitationStyles
from .types.doc_search_page_output import DocSearchPageOutput
-from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
from .types.smart_gpt_page_output import SmartGptPageOutput
-from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
+from .types.combine_documents_chains import CombineDocumentsChains
from .types.doc_summary_page_output import DocSummaryPageOutput
from .types.functions_page_output import FunctionsPageOutput
from .types.sad_talker_settings import SadTalkerSettings
from .types.lipsync_models import LipsyncModels
from .types.lipsync_page_output import LipsyncPageOutput
-from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
+from .types.text_to_speech_providers import TextToSpeechProviders
from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
from .types.lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
from .types.lipsync_tts_page_output import LipsyncTtsPageOutput
-from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
from .types.text_to_speech_page_output import TextToSpeechPageOutput
-from .types.speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
-from .types.speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
+from .types.translation_models import TranslationModels
+from .types.asr_output_format import AsrOutputFormat
from .types.asr_page_output import AsrPageOutput
+from .types.text2audio_models import Text2AudioModels
from .types.text2audio_page_output import Text2AudioPageOutput
-from .types.translate_request_selected_model import TranslateRequestSelectedModel
from .types.translation_page_output import TranslationPageOutput
-from .types.remix_image_request_selected_model import RemixImageRequestSelectedModel
+from .types.image_to_image_models import ImageToImageModels
from .types.remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel
from .types.img2img_page_output import Img2ImgPageOutput
-from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
-from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler
from .types.compare_text2img_page_output import CompareText2ImgPageOutput
-from .types.product_image_request_selected_model import ProductImageRequestSelectedModel
+from .types.inpainting_models import InpaintingModels
from .types.object_inpainting_page_output import ObjectInpaintingPageOutput
-from .types.portrait_request_selected_model import PortraitRequestSelectedModel
from .types.face_inpainting_page_output import FaceInpaintingPageOutput
-from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
from .types.email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
-from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
from .types.google_image_gen_page_output import GoogleImageGenPageOutput
-from .types.remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel
+from .types.image_segmentation_models import ImageSegmentationModels
from .types.image_segmentation_page_output import ImageSegmentationPageOutput
-from .types.upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem
+from .types.upscaler_models import UpscalerModels
from .types.compare_upscaler_page_output import CompareUpscalerPageOutput
from .types.embeddings_page_output import EmbeddingsPageOutput
from .types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
-from .types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
-from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType
from .types.related_qn_a_doc_page_output import RelatedQnADocPageOutput
from .types.balance_response import BalanceResponse
from .core.client_wrapper import AsyncClientWrapper
@@ -333,22 +316,20 @@ def qr_code(
use_url_shortener: typing.Optional[bool] = None,
negative_prompt: typing.Optional[str] = None,
image_prompt: typing.Optional[str] = None,
- image_prompt_controlnet_models: typing.Optional[
- typing.List[QrCodeRequestImagePromptControlnetModelsItem]
- ] = None,
+ image_prompt_controlnet_models: typing.Optional[typing.List[ControlNetModels]] = None,
image_prompt_strength: typing.Optional[float] = None,
image_prompt_scale: typing.Optional[float] = None,
image_prompt_pos_x: typing.Optional[float] = None,
image_prompt_pos_y: typing.Optional[float] = None,
- selected_model: typing.Optional[QrCodeRequestSelectedModel] = None,
- selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None,
+ selected_model: typing.Optional[TextToImageModels] = None,
+ selected_controlnet_model: typing.Optional[typing.List[ControlNetModels]] = None,
output_width: typing.Optional[int] = None,
output_height: typing.Optional[int] = None,
guidance_scale: typing.Optional[float] = None,
controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
num_outputs: typing.Optional[int] = None,
quality: typing.Optional[int] = None,
- scheduler: typing.Optional[QrCodeRequestScheduler] = None,
+ scheduler: typing.Optional[Schedulers] = None,
seed: typing.Optional[int] = None,
obj_scale: typing.Optional[float] = None,
obj_pos_x: typing.Optional[float] = None,
@@ -384,7 +365,7 @@ def qr_code(
image_prompt : typing.Optional[str]
- image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]
+ image_prompt_controlnet_models : typing.Optional[typing.List[ControlNetModels]]
image_prompt_strength : typing.Optional[float]
@@ -394,9 +375,9 @@ def qr_code(
image_prompt_pos_y : typing.Optional[float]
- selected_model : typing.Optional[QrCodeRequestSelectedModel]
+ selected_model : typing.Optional[TextToImageModels]
- selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]
+ selected_controlnet_model : typing.Optional[typing.List[ControlNetModels]]
output_width : typing.Optional[int]
@@ -410,7 +391,7 @@ def qr_code(
quality : typing.Optional[int]
- scheduler : typing.Optional[QrCodeRequestScheduler]
+ scheduler : typing.Optional[Schedulers]
seed : typing.Optional[int]
@@ -549,7 +530,7 @@ def seo_people_also_ask(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
@@ -603,7 +584,7 @@ def seo_people_also_ask(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
serp_search_location : typing.Optional[SerpSearchLocations]
@@ -735,7 +716,7 @@ def seo_content(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
@@ -778,7 +759,7 @@ def seo_content(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
serp_search_location : typing.Optional[SerpSearchLocations]
@@ -912,7 +893,7 @@ def web_search_llm(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
@@ -966,7 +947,7 @@ def web_search_llm(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
serp_search_location : typing.Optional[SerpSearchLocations]
@@ -1093,7 +1074,7 @@ def personalize_email(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> SocialLookupEmailPageOutput:
@@ -1123,7 +1104,7 @@ def personalize_email(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -1364,7 +1345,7 @@ def eval(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> BulkEvalPageOutput:
@@ -1408,7 +1389,7 @@ def eval(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -1517,7 +1498,7 @@ def synthesize_data(
quality: typing.Optional[float] = None,
max_tokens: typing.Optional[int] = None,
sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None,
+ response_format_type: typing.Optional[ResponseFormatType] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> DocExtractPageOutput:
@@ -1558,7 +1539,7 @@ def synthesize_data(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -1665,7 +1646,7 @@ def llm(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CompareLlmPageOutput:
@@ -1693,7 +1674,7 @@ def llm(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -1798,13 +1779,13 @@ def rag(
task_instructions: typing.Optional[str] = OMIT,
query_instructions: typing.Optional[str] = OMIT,
selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT,
+ citation_style: typing.Optional[CitationStyles] = OMIT,
avoid_repetition: typing.Optional[bool] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> DocSearchPageOutput:
@@ -1846,7 +1827,7 @@ def rag(
selected_model : typing.Optional[LargeLanguageModels]
- citation_style : typing.Optional[DocSearchPageRequestCitationStyle]
+ citation_style : typing.Optional[CitationStyles]
avoid_repetition : typing.Optional[bool]
@@ -1858,7 +1839,7 @@ def rag(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -1974,7 +1955,7 @@ def smart_gpt(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> SmartGptPageOutput:
@@ -2008,7 +1989,7 @@ def smart_gpt(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -2110,7 +2091,7 @@ def doc_summary(
task_instructions: typing.Optional[str] = None,
merge_instructions: typing.Optional[str] = None,
selected_model: typing.Optional[LargeLanguageModels] = None,
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
+ chain_type: typing.Optional[CombineDocumentsChains] = None,
selected_asr_model: typing.Optional[AsrModels] = None,
google_translate_target: typing.Optional[str] = None,
avoid_repetition: typing.Optional[bool] = None,
@@ -2118,7 +2099,7 @@ def doc_summary(
quality: typing.Optional[float] = None,
max_tokens: typing.Optional[int] = None,
sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None,
+ response_format_type: typing.Optional[ResponseFormatType] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> DocSummaryPageOutput:
@@ -2141,7 +2122,7 @@ def doc_summary(
selected_model : typing.Optional[LargeLanguageModels]
- chain_type : typing.Optional[typing.Literal["map_reduce"]]
+ chain_type : typing.Optional[CombineDocumentsChains]
selected_asr_model : typing.Optional[AsrModels]
@@ -2157,7 +2138,7 @@ def doc_summary(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocSummaryRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -2487,7 +2468,7 @@ def lipsync_tts(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None,
+ tts_provider: typing.Optional[TextToSpeechProviders] = None,
uberduck_voice_name: typing.Optional[str] = None,
uberduck_speaking_rate: typing.Optional[float] = None,
google_voice_name: typing.Optional[str] = None,
@@ -2527,7 +2508,7 @@ def lipsync_tts(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider]
+ tts_provider : typing.Optional[TextToSpeechProviders]
uberduck_voice_name : typing.Optional[str]
@@ -2693,7 +2674,7 @@ def text_to_speech(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT,
+ tts_provider: typing.Optional[TextToSpeechProviders] = OMIT,
uberduck_voice_name: typing.Optional[str] = OMIT,
uberduck_speaking_rate: typing.Optional[float] = OMIT,
google_voice_name: typing.Optional[str] = OMIT,
@@ -2726,7 +2707,7 @@ def text_to_speech(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider]
+ tts_provider : typing.Optional[TextToSpeechProviders]
uberduck_voice_name : typing.Optional[str]
@@ -2870,8 +2851,8 @@ def speech_recognition(
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
selected_model: typing.Optional[AsrModels] = None,
language: typing.Optional[str] = None,
- translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
- output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
+ translation_model: typing.Optional[TranslationModels] = None,
+ output_format: typing.Optional[AsrOutputFormat] = None,
google_translate_target: typing.Optional[str] = None,
translation_source: typing.Optional[str] = None,
translation_target: typing.Optional[str] = None,
@@ -2896,9 +2877,9 @@ def speech_recognition(
language : typing.Optional[str]
- translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel]
+ translation_model : typing.Optional[TranslationModels]
- output_format : typing.Optional[SpeechRecognitionRequestOutputFormat]
+ output_format : typing.Optional[AsrOutputFormat]
google_translate_target : typing.Optional[str]
use `translation_model` & `translation_target` instead.
@@ -3012,7 +2993,7 @@ def text_to_music(
guidance_scale: typing.Optional[float] = OMIT,
seed: typing.Optional[int] = OMIT,
sd2upscaling: typing.Optional[bool] = OMIT,
- selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[Text2AudioModels]] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> Text2AudioPageOutput:
@@ -3042,7 +3023,7 @@ def text_to_music(
sd2upscaling : typing.Optional[bool]
- selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]
+ selected_models : typing.Optional[typing.Sequence[Text2AudioModels]]
settings : typing.Optional[RunSettings]
@@ -3139,7 +3120,7 @@ def translate(
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
texts: typing.Optional[typing.List[str]] = None,
- selected_model: typing.Optional[TranslateRequestSelectedModel] = None,
+ selected_model: typing.Optional[TranslationModels] = None,
translation_source: typing.Optional[str] = None,
translation_target: typing.Optional[str] = None,
glossary_document: typing.Optional[core.File] = None,
@@ -3158,7 +3139,7 @@ def translate(
texts : typing.Optional[typing.List[str]]
- selected_model : typing.Optional[TranslateRequestSelectedModel]
+ selected_model : typing.Optional[TranslationModels]
translation_source : typing.Optional[str]
@@ -3259,7 +3240,7 @@ def remix_image(
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
text_prompt: typing.Optional[str] = None,
- selected_model: typing.Optional[RemixImageRequestSelectedModel] = None,
+ selected_model: typing.Optional[ImageToImageModels] = None,
selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None,
negative_prompt: typing.Optional[str] = None,
num_outputs: typing.Optional[int] = None,
@@ -3289,7 +3270,7 @@ def remix_image(
text_prompt : typing.Optional[str]
- selected_model : typing.Optional[RemixImageRequestSelectedModel]
+ selected_model : typing.Optional[ImageToImageModels]
selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel]
@@ -3423,8 +3404,8 @@ def text_to_image(
guidance_scale: typing.Optional[float] = OMIT,
seed: typing.Optional[int] = OMIT,
sd2upscaling: typing.Optional[bool] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT,
- scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[TextToImageModels]] = OMIT,
+ scheduler: typing.Optional[Schedulers] = OMIT,
edit_instruction: typing.Optional[str] = OMIT,
image_guidance_scale: typing.Optional[float] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
@@ -3462,9 +3443,9 @@ def text_to_image(
sd2upscaling : typing.Optional[bool]
- selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]
+ selected_models : typing.Optional[typing.Sequence[TextToImageModels]]
- scheduler : typing.Optional[CompareText2ImgPageRequestScheduler]
+ scheduler : typing.Optional[Schedulers]
edit_instruction : typing.Optional[str]
@@ -3576,7 +3557,7 @@ def product_image(
obj_pos_x: typing.Optional[float] = None,
obj_pos_y: typing.Optional[float] = None,
mask_threshold: typing.Optional[float] = None,
- selected_model: typing.Optional[ProductImageRequestSelectedModel] = None,
+ selected_model: typing.Optional[InpaintingModels] = None,
negative_prompt: typing.Optional[str] = None,
num_outputs: typing.Optional[int] = None,
quality: typing.Optional[int] = None,
@@ -3611,7 +3592,7 @@ def product_image(
mask_threshold : typing.Optional[float]
- selected_model : typing.Optional[ProductImageRequestSelectedModel]
+ selected_model : typing.Optional[InpaintingModels]
negative_prompt : typing.Optional[str]
@@ -3736,7 +3717,7 @@ def portrait(
face_scale: typing.Optional[float] = None,
face_pos_x: typing.Optional[float] = None,
face_pos_y: typing.Optional[float] = None,
- selected_model: typing.Optional[PortraitRequestSelectedModel] = None,
+ selected_model: typing.Optional[InpaintingModels] = None,
negative_prompt: typing.Optional[str] = None,
num_outputs: typing.Optional[int] = None,
quality: typing.Optional[int] = None,
@@ -3769,7 +3750,7 @@ def portrait(
face_pos_y : typing.Optional[float]
- selected_model : typing.Optional[PortraitRequestSelectedModel]
+ selected_model : typing.Optional[InpaintingModels]
negative_prompt : typing.Optional[str]
@@ -3894,7 +3875,7 @@ def image_from_email(
face_scale: typing.Optional[float] = OMIT,
face_pos_x: typing.Optional[float] = OMIT,
face_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT,
+ selected_model: typing.Optional[InpaintingModels] = OMIT,
negative_prompt: typing.Optional[str] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
quality: typing.Optional[int] = OMIT,
@@ -3936,7 +3917,7 @@ def image_from_email(
face_pos_y : typing.Optional[float]
- selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]
+ selected_model : typing.Optional[InpaintingModels]
negative_prompt : typing.Optional[str]
@@ -4083,7 +4064,7 @@ def image_from_web_search(
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
+ selected_model: typing.Optional[ImageToImageModels] = OMIT,
negative_prompt: typing.Optional[str] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
quality: typing.Optional[int] = OMIT,
@@ -4114,7 +4095,7 @@ def image_from_web_search(
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
- selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel]
+ selected_model : typing.Optional[ImageToImageModels]
negative_prompt : typing.Optional[str]
@@ -4232,7 +4213,7 @@ def remove_background(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None,
+ selected_model: typing.Optional[ImageSegmentationModels] = None,
mask_threshold: typing.Optional[float] = None,
rect_persepective_transform: typing.Optional[bool] = None,
reflection_opacity: typing.Optional[float] = None,
@@ -4255,7 +4236,7 @@ def remove_background(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel]
+ selected_model : typing.Optional[ImageSegmentationModels]
mask_threshold : typing.Optional[float]
@@ -4365,7 +4346,7 @@ def upscale(
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
input_image: typing.Optional[core.File] = None,
input_video: typing.Optional[core.File] = None,
- selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None,
+ selected_models: typing.Optional[typing.List[UpscalerModels]] = None,
selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
@@ -4389,7 +4370,7 @@ def upscale(
input_video : typing.Optional[core.File]
See core.File for more documentation
- selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]
+ selected_models : typing.Optional[typing.List[UpscalerModels]]
selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
@@ -4603,13 +4584,13 @@ def seo_people_also_ask_doc(
task_instructions: typing.Optional[str] = OMIT,
query_instructions: typing.Optional[str] = OMIT,
selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT,
+ citation_style: typing.Optional[CitationStyles] = OMIT,
avoid_repetition: typing.Optional[bool] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
@@ -4655,7 +4636,7 @@ def seo_people_also_ask_doc(
selected_model : typing.Optional[LargeLanguageModels]
- citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle]
+ citation_style : typing.Optional[CitationStyles]
avoid_repetition : typing.Optional[bool]
@@ -4667,7 +4648,7 @@ def seo_people_also_ask_doc(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
serp_search_location : typing.Optional[SerpSearchLocations]
@@ -5057,22 +5038,20 @@ async def qr_code(
use_url_shortener: typing.Optional[bool] = None,
negative_prompt: typing.Optional[str] = None,
image_prompt: typing.Optional[str] = None,
- image_prompt_controlnet_models: typing.Optional[
- typing.List[QrCodeRequestImagePromptControlnetModelsItem]
- ] = None,
+ image_prompt_controlnet_models: typing.Optional[typing.List[ControlNetModels]] = None,
image_prompt_strength: typing.Optional[float] = None,
image_prompt_scale: typing.Optional[float] = None,
image_prompt_pos_x: typing.Optional[float] = None,
image_prompt_pos_y: typing.Optional[float] = None,
- selected_model: typing.Optional[QrCodeRequestSelectedModel] = None,
- selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None,
+ selected_model: typing.Optional[TextToImageModels] = None,
+ selected_controlnet_model: typing.Optional[typing.List[ControlNetModels]] = None,
output_width: typing.Optional[int] = None,
output_height: typing.Optional[int] = None,
guidance_scale: typing.Optional[float] = None,
controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None,
num_outputs: typing.Optional[int] = None,
quality: typing.Optional[int] = None,
- scheduler: typing.Optional[QrCodeRequestScheduler] = None,
+ scheduler: typing.Optional[Schedulers] = None,
seed: typing.Optional[int] = None,
obj_scale: typing.Optional[float] = None,
obj_pos_x: typing.Optional[float] = None,
@@ -5108,7 +5087,7 @@ async def qr_code(
image_prompt : typing.Optional[str]
- image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]
+ image_prompt_controlnet_models : typing.Optional[typing.List[ControlNetModels]]
image_prompt_strength : typing.Optional[float]
@@ -5118,9 +5097,9 @@ async def qr_code(
image_prompt_pos_y : typing.Optional[float]
- selected_model : typing.Optional[QrCodeRequestSelectedModel]
+ selected_model : typing.Optional[TextToImageModels]
- selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]
+ selected_controlnet_model : typing.Optional[typing.List[ControlNetModels]]
output_width : typing.Optional[int]
@@ -5134,7 +5113,7 @@ async def qr_code(
quality : typing.Optional[int]
- scheduler : typing.Optional[QrCodeRequestScheduler]
+ scheduler : typing.Optional[Schedulers]
seed : typing.Optional[int]
@@ -5281,7 +5260,7 @@ async def seo_people_also_ask(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
@@ -5335,7 +5314,7 @@ async def seo_people_also_ask(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
serp_search_location : typing.Optional[SerpSearchLocations]
@@ -5475,7 +5454,7 @@ async def seo_content(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
@@ -5518,7 +5497,7 @@ async def seo_content(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
serp_search_location : typing.Optional[SerpSearchLocations]
@@ -5660,7 +5639,7 @@ async def web_search_llm(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
@@ -5714,7 +5693,7 @@ async def web_search_llm(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
serp_search_location : typing.Optional[SerpSearchLocations]
@@ -5849,7 +5828,7 @@ async def personalize_email(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> SocialLookupEmailPageOutput:
@@ -5879,7 +5858,7 @@ async def personalize_email(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -6136,7 +6115,7 @@ async def eval(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> BulkEvalPageOutput:
@@ -6180,7 +6159,7 @@ async def eval(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -6297,7 +6276,7 @@ async def synthesize_data(
quality: typing.Optional[float] = None,
max_tokens: typing.Optional[int] = None,
sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None,
+ response_format_type: typing.Optional[ResponseFormatType] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> DocExtractPageOutput:
@@ -6338,7 +6317,7 @@ async def synthesize_data(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -6453,7 +6432,7 @@ async def llm(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> CompareLlmPageOutput:
@@ -6481,7 +6460,7 @@ async def llm(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -6594,13 +6573,13 @@ async def rag(
task_instructions: typing.Optional[str] = OMIT,
query_instructions: typing.Optional[str] = OMIT,
selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT,
+ citation_style: typing.Optional[CitationStyles] = OMIT,
avoid_repetition: typing.Optional[bool] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> DocSearchPageOutput:
@@ -6642,7 +6621,7 @@ async def rag(
selected_model : typing.Optional[LargeLanguageModels]
- citation_style : typing.Optional[DocSearchPageRequestCitationStyle]
+ citation_style : typing.Optional[CitationStyles]
avoid_repetition : typing.Optional[bool]
@@ -6654,7 +6633,7 @@ async def rag(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -6778,7 +6757,7 @@ async def smart_gpt(
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> SmartGptPageOutput:
@@ -6812,7 +6791,7 @@ async def smart_gpt(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -6922,7 +6901,7 @@ async def doc_summary(
task_instructions: typing.Optional[str] = None,
merge_instructions: typing.Optional[str] = None,
selected_model: typing.Optional[LargeLanguageModels] = None,
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
+ chain_type: typing.Optional[CombineDocumentsChains] = None,
selected_asr_model: typing.Optional[AsrModels] = None,
google_translate_target: typing.Optional[str] = None,
avoid_repetition: typing.Optional[bool] = None,
@@ -6930,7 +6909,7 @@ async def doc_summary(
quality: typing.Optional[float] = None,
max_tokens: typing.Optional[int] = None,
sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None,
+ response_format_type: typing.Optional[ResponseFormatType] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> DocSummaryPageOutput:
@@ -6953,7 +6932,7 @@ async def doc_summary(
selected_model : typing.Optional[LargeLanguageModels]
- chain_type : typing.Optional[typing.Literal["map_reduce"]]
+ chain_type : typing.Optional[CombineDocumentsChains]
selected_asr_model : typing.Optional[AsrModels]
@@ -6969,7 +6948,7 @@ async def doc_summary(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[DocSummaryRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
settings : typing.Optional[RunSettings]
@@ -7323,7 +7302,7 @@ async def lipsync_tts(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None,
+ tts_provider: typing.Optional[TextToSpeechProviders] = None,
uberduck_voice_name: typing.Optional[str] = None,
uberduck_speaking_rate: typing.Optional[float] = None,
google_voice_name: typing.Optional[str] = None,
@@ -7363,7 +7342,7 @@ async def lipsync_tts(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider]
+ tts_provider : typing.Optional[TextToSpeechProviders]
uberduck_voice_name : typing.Optional[str]
@@ -7537,7 +7516,7 @@ async def text_to_speech(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT,
+ tts_provider: typing.Optional[TextToSpeechProviders] = OMIT,
uberduck_voice_name: typing.Optional[str] = OMIT,
uberduck_speaking_rate: typing.Optional[float] = OMIT,
google_voice_name: typing.Optional[str] = OMIT,
@@ -7570,7 +7549,7 @@ async def text_to_speech(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider]
+ tts_provider : typing.Optional[TextToSpeechProviders]
uberduck_voice_name : typing.Optional[str]
@@ -7722,8 +7701,8 @@ async def speech_recognition(
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
selected_model: typing.Optional[AsrModels] = None,
language: typing.Optional[str] = None,
- translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
- output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
+ translation_model: typing.Optional[TranslationModels] = None,
+ output_format: typing.Optional[AsrOutputFormat] = None,
google_translate_target: typing.Optional[str] = None,
translation_source: typing.Optional[str] = None,
translation_target: typing.Optional[str] = None,
@@ -7748,9 +7727,9 @@ async def speech_recognition(
language : typing.Optional[str]
- translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel]
+ translation_model : typing.Optional[TranslationModels]
- output_format : typing.Optional[SpeechRecognitionRequestOutputFormat]
+ output_format : typing.Optional[AsrOutputFormat]
google_translate_target : typing.Optional[str]
use `translation_model` & `translation_target` instead.
@@ -7872,7 +7851,7 @@ async def text_to_music(
guidance_scale: typing.Optional[float] = OMIT,
seed: typing.Optional[int] = OMIT,
sd2upscaling: typing.Optional[bool] = OMIT,
- selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[Text2AudioModels]] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> Text2AudioPageOutput:
@@ -7902,7 +7881,7 @@ async def text_to_music(
sd2upscaling : typing.Optional[bool]
- selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]
+ selected_models : typing.Optional[typing.Sequence[Text2AudioModels]]
settings : typing.Optional[RunSettings]
@@ -8007,7 +7986,7 @@ async def translate(
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
texts: typing.Optional[typing.List[str]] = None,
- selected_model: typing.Optional[TranslateRequestSelectedModel] = None,
+ selected_model: typing.Optional[TranslationModels] = None,
translation_source: typing.Optional[str] = None,
translation_target: typing.Optional[str] = None,
glossary_document: typing.Optional[core.File] = None,
@@ -8026,7 +8005,7 @@ async def translate(
texts : typing.Optional[typing.List[str]]
- selected_model : typing.Optional[TranslateRequestSelectedModel]
+ selected_model : typing.Optional[TranslationModels]
translation_source : typing.Optional[str]
@@ -8135,7 +8114,7 @@ async def remix_image(
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
text_prompt: typing.Optional[str] = None,
- selected_model: typing.Optional[RemixImageRequestSelectedModel] = None,
+ selected_model: typing.Optional[ImageToImageModels] = None,
selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None,
negative_prompt: typing.Optional[str] = None,
num_outputs: typing.Optional[int] = None,
@@ -8165,7 +8144,7 @@ async def remix_image(
text_prompt : typing.Optional[str]
- selected_model : typing.Optional[RemixImageRequestSelectedModel]
+ selected_model : typing.Optional[ImageToImageModels]
selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel]
@@ -8307,8 +8286,8 @@ async def text_to_image(
guidance_scale: typing.Optional[float] = OMIT,
seed: typing.Optional[int] = OMIT,
sd2upscaling: typing.Optional[bool] = OMIT,
- selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT,
- scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT,
+ selected_models: typing.Optional[typing.Sequence[TextToImageModels]] = OMIT,
+ scheduler: typing.Optional[Schedulers] = OMIT,
edit_instruction: typing.Optional[str] = OMIT,
image_guidance_scale: typing.Optional[float] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
@@ -8346,9 +8325,9 @@ async def text_to_image(
sd2upscaling : typing.Optional[bool]
- selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]
+ selected_models : typing.Optional[typing.Sequence[TextToImageModels]]
- scheduler : typing.Optional[CompareText2ImgPageRequestScheduler]
+ scheduler : typing.Optional[Schedulers]
edit_instruction : typing.Optional[str]
@@ -8468,7 +8447,7 @@ async def product_image(
obj_pos_x: typing.Optional[float] = None,
obj_pos_y: typing.Optional[float] = None,
mask_threshold: typing.Optional[float] = None,
- selected_model: typing.Optional[ProductImageRequestSelectedModel] = None,
+ selected_model: typing.Optional[InpaintingModels] = None,
negative_prompt: typing.Optional[str] = None,
num_outputs: typing.Optional[int] = None,
quality: typing.Optional[int] = None,
@@ -8503,7 +8482,7 @@ async def product_image(
mask_threshold : typing.Optional[float]
- selected_model : typing.Optional[ProductImageRequestSelectedModel]
+ selected_model : typing.Optional[InpaintingModels]
negative_prompt : typing.Optional[str]
@@ -8636,7 +8615,7 @@ async def portrait(
face_scale: typing.Optional[float] = None,
face_pos_x: typing.Optional[float] = None,
face_pos_y: typing.Optional[float] = None,
- selected_model: typing.Optional[PortraitRequestSelectedModel] = None,
+ selected_model: typing.Optional[InpaintingModels] = None,
negative_prompt: typing.Optional[str] = None,
num_outputs: typing.Optional[int] = None,
quality: typing.Optional[int] = None,
@@ -8669,7 +8648,7 @@ async def portrait(
face_pos_y : typing.Optional[float]
- selected_model : typing.Optional[PortraitRequestSelectedModel]
+ selected_model : typing.Optional[InpaintingModels]
negative_prompt : typing.Optional[str]
@@ -8802,7 +8781,7 @@ async def image_from_email(
face_scale: typing.Optional[float] = OMIT,
face_pos_x: typing.Optional[float] = OMIT,
face_pos_y: typing.Optional[float] = OMIT,
- selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT,
+ selected_model: typing.Optional[InpaintingModels] = OMIT,
negative_prompt: typing.Optional[str] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
quality: typing.Optional[int] = OMIT,
@@ -8844,7 +8823,7 @@ async def image_from_email(
face_pos_y : typing.Optional[float]
- selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]
+ selected_model : typing.Optional[InpaintingModels]
negative_prompt : typing.Optional[str]
@@ -8999,7 +8978,7 @@ async def image_from_web_search(
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
- selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
+ selected_model: typing.Optional[ImageToImageModels] = OMIT,
negative_prompt: typing.Optional[str] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
quality: typing.Optional[int] = OMIT,
@@ -9030,7 +9009,7 @@ async def image_from_web_search(
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
- selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel]
+ selected_model : typing.Optional[ImageToImageModels]
negative_prompt : typing.Optional[str]
@@ -9156,7 +9135,7 @@ async def remove_background(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None,
+ selected_model: typing.Optional[ImageSegmentationModels] = None,
mask_threshold: typing.Optional[float] = None,
rect_persepective_transform: typing.Optional[bool] = None,
reflection_opacity: typing.Optional[float] = None,
@@ -9179,7 +9158,7 @@ async def remove_background(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel]
+ selected_model : typing.Optional[ImageSegmentationModels]
mask_threshold : typing.Optional[float]
@@ -9297,7 +9276,7 @@ async def upscale(
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
input_image: typing.Optional[core.File] = None,
input_video: typing.Optional[core.File] = None,
- selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None,
+ selected_models: typing.Optional[typing.List[UpscalerModels]] = None,
selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
@@ -9321,7 +9300,7 @@ async def upscale(
input_video : typing.Optional[core.File]
See core.File for more documentation
- selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]
+ selected_models : typing.Optional[typing.List[UpscalerModels]]
selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]]
@@ -9551,13 +9530,13 @@ async def seo_people_also_ask_doc(
task_instructions: typing.Optional[str] = OMIT,
query_instructions: typing.Optional[str] = OMIT,
selected_model: typing.Optional[LargeLanguageModels] = OMIT,
- citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT,
+ citation_style: typing.Optional[CitationStyles] = OMIT,
avoid_repetition: typing.Optional[bool] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
quality: typing.Optional[float] = OMIT,
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
- response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT,
+ response_format_type: typing.Optional[ResponseFormatType] = OMIT,
serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
@@ -9603,7 +9582,7 @@ async def seo_people_also_ask_doc(
selected_model : typing.Optional[LargeLanguageModels]
- citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle]
+ citation_style : typing.Optional[CitationStyles]
avoid_repetition : typing.Optional[bool]
@@ -9615,7 +9594,7 @@ async def seo_people_also_ask_doc(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
serp_search_location : typing.Optional[SerpSearchLocations]
diff --git a/src/gooey/copilot/__init__.py b/src/gooey/copilot/__init__.py
index d33ab85..0d11408 100644
--- a/src/gooey/copilot/__init__.py
+++ b/src/gooey/copilot/__init__.py
@@ -1,27 +1,19 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
- CopilotCompletionRequestCitationStyle,
CopilotCompletionRequestFunctionsItem,
CopilotCompletionRequestFunctionsItemTrigger,
CopilotCompletionRequestOpenaiTtsModel,
CopilotCompletionRequestOpenaiVoiceName,
- CopilotCompletionRequestResponseFormatType,
CopilotCompletionRequestSadtalkerSettings,
CopilotCompletionRequestSadtalkerSettingsPreprocess,
- CopilotCompletionRequestTranslationModel,
- CopilotCompletionRequestTtsProvider,
)
__all__ = [
- "CopilotCompletionRequestCitationStyle",
"CopilotCompletionRequestFunctionsItem",
"CopilotCompletionRequestFunctionsItemTrigger",
"CopilotCompletionRequestOpenaiTtsModel",
"CopilotCompletionRequestOpenaiVoiceName",
- "CopilotCompletionRequestResponseFormatType",
"CopilotCompletionRequestSadtalkerSettings",
"CopilotCompletionRequestSadtalkerSettingsPreprocess",
- "CopilotCompletionRequestTranslationModel",
- "CopilotCompletionRequestTtsProvider",
]
diff --git a/src/gooey/copilot/client.py b/src/gooey/copilot/client.py
index 93b4f19..247a892 100644
--- a/src/gooey/copilot/client.py
+++ b/src/gooey/copilot/client.py
@@ -7,13 +7,13 @@
from ..types.conversation_entry import ConversationEntry
from ..types.large_language_models import LargeLanguageModels
from ..types.embedding_models import EmbeddingModels
-from .types.copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle
+from ..types.citation_styles import CitationStyles
from ..types.asr_models import AsrModels
-from .types.copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel
+from ..types.translation_models import TranslationModels
from ..types.lipsync_models import LipsyncModels
from ..types.llm_tools import LlmTools
-from .types.copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType
-from .types.copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider
+from ..types.response_format_type import ResponseFormatType
+from ..types.text_to_speech_providers import TextToSpeechProviders
from .types.copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName
from .types.copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel
from .types.copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings
@@ -62,11 +62,11 @@ def completion(
scroll_jump: typing.Optional[int] = None,
embedding_model: typing.Optional[EmbeddingModels] = None,
dense_weight: typing.Optional[float] = None,
- citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None,
+ citation_style: typing.Optional[CitationStyles] = None,
use_url_shortener: typing.Optional[bool] = None,
asr_model: typing.Optional[AsrModels] = None,
asr_language: typing.Optional[str] = None,
- translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None,
+ translation_model: typing.Optional[TranslationModels] = None,
user_language: typing.Optional[str] = None,
input_glossary_document: typing.Optional[core.File] = None,
output_glossary_document: typing.Optional[core.File] = None,
@@ -77,8 +77,8 @@ def completion(
quality: typing.Optional[float] = None,
max_tokens: typing.Optional[int] = None,
sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None,
- tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None,
+ response_format_type: typing.Optional[ResponseFormatType] = None,
+ tts_provider: typing.Optional[TextToSpeechProviders] = None,
uberduck_voice_name: typing.Optional[str] = None,
uberduck_speaking_rate: typing.Optional[float] = None,
google_voice_name: typing.Optional[str] = None,
@@ -160,7 +160,7 @@ def completion(
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- citation_style : typing.Optional[CopilotCompletionRequestCitationStyle]
+ citation_style : typing.Optional[CitationStyles]
use_url_shortener : typing.Optional[bool]
@@ -170,7 +170,7 @@ def completion(
asr_language : typing.Optional[str]
Choose a language to transcribe incoming audio messages to text.
- translation_model : typing.Optional[CopilotCompletionRequestTranslationModel]
+ translation_model : typing.Optional[TranslationModels]
user_language : typing.Optional[str]
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -196,9 +196,9 @@ def completion(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
- tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider]
+ tts_provider : typing.Optional[TextToSpeechProviders]
uberduck_voice_name : typing.Optional[str]
@@ -414,11 +414,11 @@ async def completion(
scroll_jump: typing.Optional[int] = None,
embedding_model: typing.Optional[EmbeddingModels] = None,
dense_weight: typing.Optional[float] = None,
- citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None,
+ citation_style: typing.Optional[CitationStyles] = None,
use_url_shortener: typing.Optional[bool] = None,
asr_model: typing.Optional[AsrModels] = None,
asr_language: typing.Optional[str] = None,
- translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None,
+ translation_model: typing.Optional[TranslationModels] = None,
user_language: typing.Optional[str] = None,
input_glossary_document: typing.Optional[core.File] = None,
output_glossary_document: typing.Optional[core.File] = None,
@@ -429,8 +429,8 @@ async def completion(
quality: typing.Optional[float] = None,
max_tokens: typing.Optional[int] = None,
sampling_temperature: typing.Optional[float] = None,
- response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None,
- tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None,
+ response_format_type: typing.Optional[ResponseFormatType] = None,
+ tts_provider: typing.Optional[TextToSpeechProviders] = None,
uberduck_voice_name: typing.Optional[str] = None,
uberduck_speaking_rate: typing.Optional[float] = None,
google_voice_name: typing.Optional[str] = None,
@@ -512,7 +512,7 @@ async def completion(
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
- citation_style : typing.Optional[CopilotCompletionRequestCitationStyle]
+ citation_style : typing.Optional[CitationStyles]
use_url_shortener : typing.Optional[bool]
@@ -522,7 +522,7 @@ async def completion(
asr_language : typing.Optional[str]
Choose a language to transcribe incoming audio messages to text.
- translation_model : typing.Optional[CopilotCompletionRequestTranslationModel]
+ translation_model : typing.Optional[TranslationModels]
user_language : typing.Optional[str]
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -548,9 +548,9 @@ async def completion(
sampling_temperature : typing.Optional[float]
- response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType]
+ response_format_type : typing.Optional[ResponseFormatType]
- tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider]
+ tts_provider : typing.Optional[TextToSpeechProviders]
uberduck_voice_name : typing.Optional[str]
diff --git a/src/gooey/copilot/types/__init__.py b/src/gooey/copilot/types/__init__.py
index 45a2775..2094b54 100644
--- a/src/gooey/copilot/types/__init__.py
+++ b/src/gooey/copilot/types/__init__.py
@@ -1,27 +1,19 @@
# This file was auto-generated by Fern from our API Definition.
-from .copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle
from .copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem
from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger
from .copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel
from .copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName
-from .copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType
from .copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings
from .copilot_completion_request_sadtalker_settings_preprocess import (
CopilotCompletionRequestSadtalkerSettingsPreprocess,
)
-from .copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel
-from .copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider
__all__ = [
- "CopilotCompletionRequestCitationStyle",
"CopilotCompletionRequestFunctionsItem",
"CopilotCompletionRequestFunctionsItemTrigger",
"CopilotCompletionRequestOpenaiTtsModel",
"CopilotCompletionRequestOpenaiVoiceName",
- "CopilotCompletionRequestResponseFormatType",
"CopilotCompletionRequestSadtalkerSettings",
"CopilotCompletionRequestSadtalkerSettingsPreprocess",
- "CopilotCompletionRequestTranslationModel",
- "CopilotCompletionRequestTtsProvider",
]
diff --git a/src/gooey/copilot/types/copilot_completion_request_citation_style.py b/src/gooey/copilot/types/copilot_completion_request_citation_style.py
deleted file mode 100644
index 1bb273a..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_citation_style.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestCitationStyle = typing.Union[
- typing.Literal[
- "number",
- "title",
- "url",
- "symbol",
- "markdown",
- "html",
- "slack_mrkdwn",
- "plaintext",
- "number_markdown",
- "number_html",
- "number_slack_mrkdwn",
- "number_plaintext",
- "symbol_markdown",
- "symbol_html",
- "symbol_slack_mrkdwn",
- "symbol_plaintext",
- ],
- typing.Any,
-]
diff --git a/src/gooey/copilot/types/copilot_completion_request_response_format_type.py b/src/gooey/copilot/types/copilot_completion_request_response_format_type.py
deleted file mode 100644
index 3c9dbb0..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_translation_model.py b/src/gooey/copilot/types/copilot_completion_request_translation_model.py
deleted file mode 100644
index 10b0b5a..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/copilot/types/copilot_completion_request_tts_provider.py b/src/gooey/copilot/types/copilot_completion_request_tts_provider.py
deleted file mode 100644
index 4dec4b0..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_tts_provider.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestTtsProvider = typing.Union[
- typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
-]
diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py
index 6fe0ebd..c630ac9 100644
--- a/src/gooey/core/client_wrapper.py
+++ b/src/gooey/core/client_wrapper.py
@@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "gooeyai",
- "X-Fern-SDK-Version": "0.0.1-beta24",
+ "X-Fern-SDK-Version": "0.0.1-beta25",
}
headers["Authorization"] = f"Bearer {self._get_api_key()}"
return headers
diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py
index fab1149..dfe0d3c 100644
--- a/src/gooey/types/__init__.py
+++ b/src/gooey/types/__init__.py
@@ -8,19 +8,17 @@
from .animation_prompt import AnimationPrompt
from .asr_chunk import AsrChunk
from .asr_models import AsrModels
+from .asr_output_format import AsrOutputFormat
from .asr_output_json import AsrOutputJson
from .asr_page_output import AsrPageOutput
from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem
from .asr_page_request import AsrPageRequest
-from .asr_page_request_output_format import AsrPageRequestOutputFormat
-from .asr_page_request_translation_model import AsrPageRequestTranslationModel
from .asr_page_status_response import AsrPageStatusResponse
from .async_api_response_model_v3 import AsyncApiResponseModelV3
from .balance_response import BalanceResponse
from .bot_broadcast_filters import BotBroadcastFilters
from .bot_broadcast_request_model import BotBroadcastRequestModel
from .bulk_eval_page_output import BulkEvalPageOutput
-from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
from .bulk_eval_page_status_response import BulkEvalPageStatusResponse
from .bulk_runner_page_output import BulkRunnerPageOutput
from .bulk_runner_page_request import BulkRunnerPageRequest
@@ -33,19 +31,18 @@
from .chyron_plant_page_output import ChyronPlantPageOutput
from .chyron_plant_page_request import ChyronPlantPageRequest
from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse
+from .citation_styles import CitationStyles
+from .combine_documents_chains import CombineDocumentsChains
from .compare_llm_page_output import CompareLlmPageOutput
-from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
from .compare_llm_page_status_response import CompareLlmPageStatusResponse
from .compare_text2img_page_output import CompareText2ImgPageOutput
-from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler
-from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem
from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse
from .compare_upscaler_page_output import CompareUpscalerPageOutput
from .compare_upscaler_page_request import CompareUpscalerPageRequest
-from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse
from .console_logs import ConsoleLogs
from .console_logs_level import ConsoleLogsLevel
+from .control_net_models import ControlNetModels
from .conversation_entry import ConversationEntry
from .conversation_entry_content import ConversationEntryContent
from .conversation_entry_content_item import (
@@ -56,31 +53,21 @@
from .conversation_entry_role import ConversationEntryRole
from .conversation_start import ConversationStart
from .create_stream_request import CreateStreamRequest
-from .create_stream_request_citation_style import CreateStreamRequestCitationStyle
from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel
from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName
-from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType
-from .create_stream_request_translation_model import CreateStreamRequestTranslationModel
-from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider
from .create_stream_response import CreateStreamResponse
from .deforum_sd_page_output import DeforumSdPageOutput
from .deforum_sd_page_status_response import DeforumSdPageStatusResponse
from .doc_extract_page_output import DocExtractPageOutput
from .doc_extract_page_request import DocExtractPageRequest
-from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
from .doc_extract_page_status_response import DocExtractPageStatusResponse
from .doc_search_page_output import DocSearchPageOutput
-from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
-from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
from .doc_search_page_status_response import DocSearchPageStatusResponse
from .doc_summary_page_output import DocSummaryPageOutput
from .doc_summary_page_request import DocSummaryPageRequest
-from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
from .doc_summary_page_status_response import DocSummaryPageStatusResponse
-from .doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
-from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
from .embedding_models import EmbeddingModels
from .embeddings_page_output import EmbeddingsPageOutput
@@ -88,7 +75,6 @@
from .eval_prompt import EvalPrompt
from .face_inpainting_page_output import FaceInpaintingPageOutput
from .face_inpainting_page_request import FaceInpaintingPageRequest
-from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse
from .final_response import FinalResponse
from .functions_page_output import FunctionsPageOutput
@@ -96,24 +82,22 @@
from .generic_error_response import GenericErrorResponse
from .generic_error_response_detail import GenericErrorResponseDetail
from .google_gpt_page_output import GoogleGptPageOutput
-from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType
from .google_gpt_page_status_response import GoogleGptPageStatusResponse
from .google_image_gen_page_output import GoogleImageGenPageOutput
-from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel
from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse
from .http_validation_error import HttpValidationError
+from .image_segmentation_models import ImageSegmentationModels
from .image_segmentation_page_output import ImageSegmentationPageOutput
from .image_segmentation_page_request import ImageSegmentationPageRequest
-from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse
+from .image_to_image_models import ImageToImageModels
from .image_url import ImageUrl
from .image_url_detail import ImageUrlDetail
from .img2img_page_output import Img2ImgPageOutput
from .img2img_page_request import Img2ImgPageRequest
from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
-from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem
-from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
from .img2img_page_status_response import Img2ImgPageStatusResponse
+from .inpainting_models import InpaintingModels
from .large_language_models import LargeLanguageModels
from .letter_writer_page_output import LetterWriterPageOutput
from .letter_writer_page_request import LetterWriterPageRequest
@@ -126,54 +110,32 @@
from .lipsync_tts_page_request import LipsyncTtsPageRequest
from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
-from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse
from .lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
from .lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
-from .lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
from .llm_tools import LlmTools
from .message_part import MessagePart
from .object_inpainting_page_output import ObjectInpaintingPageOutput
from .object_inpainting_page_request import ObjectInpaintingPageRequest
-from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse
-from .portrait_request_selected_model import PortraitRequestSelectedModel
-from .product_image_request_selected_model import ProductImageRequestSelectedModel
from .prompt_tree_node import PromptTreeNode
from .prompt_tree_node_prompt import PromptTreeNodePrompt
from .qr_code_generator_page_output import QrCodeGeneratorPageOutput
from .qr_code_generator_page_request import QrCodeGeneratorPageRequest
-from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
- QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
-)
-from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
-from .qr_code_generator_page_request_selected_controlnet_model_item import (
- QrCodeGeneratorPageRequestSelectedControlnetModelItem,
-)
-from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
from .qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse
-from .qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem
-from .qr_code_request_scheduler import QrCodeRequestScheduler
-from .qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem
-from .qr_code_request_selected_model import QrCodeRequestSelectedModel
from .recipe_function import RecipeFunction
from .recipe_function_trigger import RecipeFunctionTrigger
from .recipe_run_state import RecipeRunState
from .related_doc_search_response import RelatedDocSearchResponse
from .related_google_gpt_response import RelatedGoogleGptResponse
from .related_qn_a_doc_page_output import RelatedQnADocPageOutput
-from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
-from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType
from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse
from .related_qn_a_page_output import RelatedQnAPageOutput
-from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse
from .remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel
-from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem
-from .remix_image_request_selected_model import RemixImageRequestSelectedModel
-from .remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel
from .reply_button import ReplyButton
+from .response_format_type import ResponseFormatType
from .response_model import ResponseModel
from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery
from .response_model_final_prompt import ResponseModelFinalPrompt
@@ -182,36 +144,32 @@
from .run_start import RunStart
from .sad_talker_settings import SadTalkerSettings
from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess
+from .schedulers import Schedulers
from .search_reference import SearchReference
from .seo_summary_page_output import SeoSummaryPageOutput
-from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType
from .seo_summary_page_status_response import SeoSummaryPageStatusResponse
from .serp_search_locations import SerpSearchLocations
from .serp_search_type import SerpSearchType
from .smart_gpt_page_output import SmartGptPageOutput
-from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
from .smart_gpt_page_status_response import SmartGptPageStatusResponse
from .social_lookup_email_page_output import SocialLookupEmailPageOutput
-from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
-from .speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
-from .speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
from .stream_error import StreamError
-from .synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
+from .text2audio_models import Text2AudioModels
from .text2audio_page_output import Text2AudioPageOutput
from .text2audio_page_status_response import Text2AudioPageStatusResponse
+from .text_to_image_models import TextToImageModels
from .text_to_speech_page_output import TextToSpeechPageOutput
from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
-from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse
+from .text_to_speech_providers import TextToSpeechProviders
from .training_data_model import TrainingDataModel
-from .translate_request_selected_model import TranslateRequestSelectedModel
+from .translation_models import TranslationModels
from .translation_page_output import TranslationPageOutput
from .translation_page_request import TranslationPageRequest
-from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
from .translation_page_status_response import TranslationPageStatusResponse
-from .upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem
+from .upscaler_models import UpscalerModels
from .validation_error import ValidationError
from .validation_error_loc_item import ValidationErrorLocItem
from .vcard import Vcard
@@ -219,16 +177,12 @@
from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery
from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt
from .video_bots_page_request import VideoBotsPageRequest
-from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem
from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger
from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
-from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings
from .video_bots_page_request_sadtalker_settings_preprocess import VideoBotsPageRequestSadtalkerSettingsPreprocess
-from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
from .video_bots_page_status_response import VideoBotsPageStatusResponse
__all__ = [
@@ -240,19 +194,17 @@
"AnimationPrompt",
"AsrChunk",
"AsrModels",
+ "AsrOutputFormat",
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
"AsrPageRequest",
- "AsrPageRequestOutputFormat",
- "AsrPageRequestTranslationModel",
"AsrPageStatusResponse",
"AsyncApiResponseModelV3",
"BalanceResponse",
"BotBroadcastFilters",
"BotBroadcastRequestModel",
"BulkEvalPageOutput",
- "BulkEvalPageRequestResponseFormatType",
"BulkEvalPageStatusResponse",
"BulkRunnerPageOutput",
"BulkRunnerPageRequest",
@@ -265,19 +217,18 @@
"ChyronPlantPageOutput",
"ChyronPlantPageRequest",
"ChyronPlantPageStatusResponse",
+ "CitationStyles",
+ "CombineDocumentsChains",
"CompareLlmPageOutput",
- "CompareLlmPageRequestResponseFormatType",
"CompareLlmPageStatusResponse",
"CompareText2ImgPageOutput",
- "CompareText2ImgPageRequestScheduler",
- "CompareText2ImgPageRequestSelectedModelsItem",
"CompareText2ImgPageStatusResponse",
"CompareUpscalerPageOutput",
"CompareUpscalerPageRequest",
- "CompareUpscalerPageRequestSelectedModelsItem",
"CompareUpscalerPageStatusResponse",
"ConsoleLogs",
"ConsoleLogsLevel",
+ "ControlNetModels",
"ConversationEntry",
"ConversationEntryContent",
"ConversationEntryContentItem",
@@ -286,31 +237,21 @@
"ConversationEntryRole",
"ConversationStart",
"CreateStreamRequest",
- "CreateStreamRequestCitationStyle",
"CreateStreamRequestOpenaiTtsModel",
"CreateStreamRequestOpenaiVoiceName",
- "CreateStreamRequestResponseFormatType",
- "CreateStreamRequestTranslationModel",
- "CreateStreamRequestTtsProvider",
"CreateStreamResponse",
"DeforumSdPageOutput",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
"DocExtractPageRequest",
- "DocExtractPageRequestResponseFormatType",
"DocExtractPageStatusResponse",
"DocSearchPageOutput",
- "DocSearchPageRequestCitationStyle",
"DocSearchPageRequestKeywordQuery",
- "DocSearchPageRequestResponseFormatType",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
"DocSummaryPageRequest",
- "DocSummaryPageRequestResponseFormatType",
"DocSummaryPageStatusResponse",
- "DocSummaryRequestResponseFormatType",
"EmailFaceInpaintingPageOutput",
- "EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageStatusResponse",
"EmbeddingModels",
"EmbeddingsPageOutput",
@@ -318,7 +259,6 @@
"EvalPrompt",
"FaceInpaintingPageOutput",
"FaceInpaintingPageRequest",
- "FaceInpaintingPageRequestSelectedModel",
"FaceInpaintingPageStatusResponse",
"FinalResponse",
"FunctionsPageOutput",
@@ -326,24 +266,22 @@
"GenericErrorResponse",
"GenericErrorResponseDetail",
"GoogleGptPageOutput",
- "GoogleGptPageRequestResponseFormatType",
"GoogleGptPageStatusResponse",
"GoogleImageGenPageOutput",
- "GoogleImageGenPageRequestSelectedModel",
"GoogleImageGenPageStatusResponse",
"HttpValidationError",
+ "ImageSegmentationModels",
"ImageSegmentationPageOutput",
"ImageSegmentationPageRequest",
- "ImageSegmentationPageRequestSelectedModel",
"ImageSegmentationPageStatusResponse",
+ "ImageToImageModels",
"ImageUrl",
"ImageUrlDetail",
"Img2ImgPageOutput",
"Img2ImgPageRequest",
"Img2ImgPageRequestSelectedControlnetModel",
- "Img2ImgPageRequestSelectedControlnetModelItem",
- "Img2ImgPageRequestSelectedModel",
"Img2ImgPageStatusResponse",
+ "InpaintingModels",
"LargeLanguageModels",
"LetterWriterPageOutput",
"LetterWriterPageRequest",
@@ -356,50 +294,32 @@
"LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
- "LipsyncTtsPageRequestTtsProvider",
"LipsyncTtsPageStatusResponse",
"LipsyncTtsRequestOpenaiTtsModel",
"LipsyncTtsRequestOpenaiVoiceName",
- "LipsyncTtsRequestTtsProvider",
"LlmTools",
"MessagePart",
"ObjectInpaintingPageOutput",
"ObjectInpaintingPageRequest",
- "ObjectInpaintingPageRequestSelectedModel",
"ObjectInpaintingPageStatusResponse",
- "PortraitRequestSelectedModel",
- "ProductImageRequestSelectedModel",
"PromptTreeNode",
"PromptTreeNodePrompt",
"QrCodeGeneratorPageOutput",
"QrCodeGeneratorPageRequest",
- "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem",
- "QrCodeGeneratorPageRequestScheduler",
- "QrCodeGeneratorPageRequestSelectedControlnetModelItem",
- "QrCodeGeneratorPageRequestSelectedModel",
"QrCodeGeneratorPageStatusResponse",
- "QrCodeRequestImagePromptControlnetModelsItem",
- "QrCodeRequestScheduler",
- "QrCodeRequestSelectedControlnetModelItem",
- "QrCodeRequestSelectedModel",
"RecipeFunction",
"RecipeFunctionTrigger",
"RecipeRunState",
"RelatedDocSearchResponse",
"RelatedGoogleGptResponse",
"RelatedQnADocPageOutput",
- "RelatedQnADocPageRequestCitationStyle",
"RelatedQnADocPageRequestKeywordQuery",
- "RelatedQnADocPageRequestResponseFormatType",
"RelatedQnADocPageStatusResponse",
"RelatedQnAPageOutput",
- "RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageStatusResponse",
"RemixImageRequestSelectedControlnetModel",
- "RemixImageRequestSelectedControlnetModelItem",
- "RemixImageRequestSelectedModel",
- "RemoveBackgroundRequestSelectedModel",
"ReplyButton",
+ "ResponseFormatType",
"ResponseModel",
"ResponseModelFinalKeywordQuery",
"ResponseModelFinalPrompt",
@@ -408,36 +328,32 @@
"RunStart",
"SadTalkerSettings",
"SadTalkerSettingsPreprocess",
+ "Schedulers",
"SearchReference",
"SeoSummaryPageOutput",
- "SeoSummaryPageRequestResponseFormatType",
"SeoSummaryPageStatusResponse",
"SerpSearchLocations",
"SerpSearchType",
"SmartGptPageOutput",
- "SmartGptPageRequestResponseFormatType",
"SmartGptPageStatusResponse",
"SocialLookupEmailPageOutput",
- "SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageStatusResponse",
- "SpeechRecognitionRequestOutputFormat",
- "SpeechRecognitionRequestTranslationModel",
"StreamError",
- "SynthesizeDataRequestResponseFormatType",
+ "Text2AudioModels",
"Text2AudioPageOutput",
"Text2AudioPageStatusResponse",
+ "TextToImageModels",
"TextToSpeechPageOutput",
"TextToSpeechPageRequestOpenaiTtsModel",
"TextToSpeechPageRequestOpenaiVoiceName",
- "TextToSpeechPageRequestTtsProvider",
"TextToSpeechPageStatusResponse",
+ "TextToSpeechProviders",
"TrainingDataModel",
- "TranslateRequestSelectedModel",
+ "TranslationModels",
"TranslationPageOutput",
"TranslationPageRequest",
- "TranslationPageRequestSelectedModel",
"TranslationPageStatusResponse",
- "UpscaleRequestSelectedModelsItem",
+ "UpscalerModels",
"ValidationError",
"ValidationErrorLocItem",
"Vcard",
@@ -445,15 +361,11 @@
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
"VideoBotsPageRequest",
- "VideoBotsPageRequestCitationStyle",
"VideoBotsPageRequestFunctionsItem",
"VideoBotsPageRequestFunctionsItemTrigger",
"VideoBotsPageRequestOpenaiTtsModel",
"VideoBotsPageRequestOpenaiVoiceName",
- "VideoBotsPageRequestResponseFormatType",
"VideoBotsPageRequestSadtalkerSettings",
"VideoBotsPageRequestSadtalkerSettingsPreprocess",
- "VideoBotsPageRequestTranslationModel",
- "VideoBotsPageRequestTtsProvider",
"VideoBotsPageStatusResponse",
]
diff --git a/src/gooey/types/asr_output_format.py b/src/gooey/types/asr_output_format.py
new file mode 100644
index 0000000..b3b0e2d
--- /dev/null
+++ b/src/gooey/types/asr_output_format.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AsrOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any]
diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py
index 9e3c8a5..1210679 100644
--- a/src/gooey/types/asr_page_request.py
+++ b/src/gooey/types/asr_page_request.py
@@ -5,8 +5,8 @@
from .recipe_function import RecipeFunction
import pydantic
from .asr_models import AsrModels
-from .asr_page_request_translation_model import AsrPageRequestTranslationModel
-from .asr_page_request_output_format import AsrPageRequestOutputFormat
+from .translation_models import TranslationModels
+from .asr_output_format import AsrOutputFormat
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -21,8 +21,8 @@ class AsrPageRequest(UniversalBaseModel):
documents: typing.List[str]
selected_model: typing.Optional[AsrModels] = None
language: typing.Optional[str] = None
- translation_model: typing.Optional[AsrPageRequestTranslationModel] = None
- output_format: typing.Optional[AsrPageRequestOutputFormat] = None
+ translation_model: typing.Optional[TranslationModels] = None
+ output_format: typing.Optional[AsrOutputFormat] = None
google_translate_target: typing.Optional[str] = pydantic.Field(default=None)
"""
use `translation_model` & `translation_target` instead.
diff --git a/src/gooey/types/asr_page_request_output_format.py b/src/gooey/types/asr_page_request_output_format.py
deleted file mode 100644
index 101e681..0000000
--- a/src/gooey/types/asr_page_request_output_format.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-AsrPageRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any]
diff --git a/src/gooey/types/asr_page_request_translation_model.py b/src/gooey/types/asr_page_request_translation_model.py
deleted file mode 100644
index d5dcef6..0000000
--- a/src/gooey/types/asr_page_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-AsrPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/bulk_eval_page_request_response_format_type.py b/src/gooey/types/bulk_eval_page_request_response_format_type.py
deleted file mode 100644
index f1c242f..0000000
--- a/src/gooey/types/bulk_eval_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-BulkEvalPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/create_stream_request_citation_style.py b/src/gooey/types/citation_styles.py
similarity index 90%
rename from src/gooey/types/create_stream_request_citation_style.py
rename to src/gooey/types/citation_styles.py
index e57bab1..4d822c2 100644
--- a/src/gooey/types/create_stream_request_citation_style.py
+++ b/src/gooey/types/citation_styles.py
@@ -2,7 +2,7 @@
import typing
-CreateStreamRequestCitationStyle = typing.Union[
+CitationStyles = typing.Union[
typing.Literal[
"number",
"title",
diff --git a/src/gooey/types/combine_documents_chains.py b/src/gooey/types/combine_documents_chains.py
new file mode 100644
index 0000000..c457e00
--- /dev/null
+++ b/src/gooey/types/combine_documents_chains.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+CombineDocumentsChains = typing.Literal["map_reduce"]
diff --git a/src/gooey/types/compare_llm_page_request_response_format_type.py b/src/gooey/types/compare_llm_page_request_response_format_type.py
deleted file mode 100644
index a846068..0000000
--- a/src/gooey/types/compare_llm_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CompareLlmPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/compare_text2img_page_request_scheduler.py b/src/gooey/types/compare_text2img_page_request_scheduler.py
deleted file mode 100644
index 29ce840..0000000
--- a/src/gooey/types/compare_text2img_page_request_scheduler.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CompareText2ImgPageRequestScheduler = typing.Union[
- typing.Literal[
- "singlestep_dpm_solver",
- "multistep_dpm_solver",
- "dpm_sde",
- "dpm_discrete",
- "dpm_discrete_ancestral",
- "unipc",
- "lms_discrete",
- "heun",
- "euler",
- "euler_ancestral",
- "pndm",
- "ddpm",
- "ddim",
- "deis",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/compare_text2img_page_request_selected_models_item.py b/src/gooey/types/compare_text2img_page_request_selected_models_item.py
deleted file mode 100644
index 4154491..0000000
--- a/src/gooey/types/compare_text2img_page_request_selected_models_item.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CompareText2ImgPageRequestSelectedModelsItem = typing.Union[
- typing.Literal[
- "dream_shaper",
- "dreamlike_2",
- "sd_2",
- "sd_1_5",
- "dall_e",
- "dall_e_3",
- "openjourney_2",
- "openjourney",
- "analog_diffusion",
- "protogen_5_3",
- "jack_qiao",
- "rodent_diffusion_1_5",
- "deepfloyd_if",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py
index 8cfb4e7..849d668 100644
--- a/src/gooey/types/compare_upscaler_page_request.py
+++ b/src/gooey/types/compare_upscaler_page_request.py
@@ -4,7 +4,7 @@
import typing
from .recipe_function import RecipeFunction
import pydantic
-from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem
+from .upscaler_models import UpscalerModels
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -23,7 +23,7 @@ class CompareUpscalerPageRequest(UniversalBaseModel):
The final upsampling scale of the image
"""
- selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None
+ selected_models: typing.Optional[typing.List[UpscalerModels]] = None
selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None
settings: typing.Optional[RunSettings] = None
diff --git a/src/gooey/types/compare_upscaler_page_request_selected_models_item.py b/src/gooey/types/compare_upscaler_page_request_selected_models_item.py
deleted file mode 100644
index eff4f6e..0000000
--- a/src/gooey/types/compare_upscaler_page_request_selected_models_item.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CompareUpscalerPageRequestSelectedModelsItem = typing.Union[
- typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any
-]
diff --git a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py b/src/gooey/types/control_net_models.py
similarity index 89%
rename from src/gooey/types/qr_code_request_selected_controlnet_model_item.py
rename to src/gooey/types/control_net_models.py
index c5cdc8d..5c5f68a 100644
--- a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py
+++ b/src/gooey/types/control_net_models.py
@@ -2,7 +2,7 @@
import typing
-QrCodeRequestSelectedControlnetModelItem = typing.Union[
+ControlNetModels = typing.Union[
typing.Literal[
"sd_controlnet_canny",
"sd_controlnet_depth",
diff --git a/src/gooey/types/create_stream_request.py b/src/gooey/types/create_stream_request.py
index f412021..bf4541f 100644
--- a/src/gooey/types/create_stream_request.py
+++ b/src/gooey/types/create_stream_request.py
@@ -8,13 +8,13 @@
from .conversation_entry import ConversationEntry
from .large_language_models import LargeLanguageModels
from .embedding_models import EmbeddingModels
-from .create_stream_request_citation_style import CreateStreamRequestCitationStyle
+from .citation_styles import CitationStyles
from .asr_models import AsrModels
-from .create_stream_request_translation_model import CreateStreamRequestTranslationModel
+from .translation_models import TranslationModels
from .lipsync_models import LipsyncModels
from .llm_tools import LlmTools
-from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType
-from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider
+from .response_format_type import ResponseFormatType
+from .text_to_speech_providers import TextToSpeechProviders
from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName
from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel
from .sad_talker_settings import SadTalkerSettings
@@ -92,7 +92,7 @@ class CreateStreamRequest(UniversalBaseModel):
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
"""
- citation_style: typing.Optional[CreateStreamRequestCitationStyle] = None
+ citation_style: typing.Optional[CitationStyles] = None
use_url_shortener: typing.Optional[bool] = None
asr_model: typing.Optional[AsrModels] = pydantic.Field(default=None)
"""
@@ -104,7 +104,7 @@ class CreateStreamRequest(UniversalBaseModel):
Choose a language to transcribe incoming audio messages to text.
"""
- translation_model: typing.Optional[CreateStreamRequestTranslationModel] = None
+ translation_model: typing.Optional[TranslationModels] = None
user_language: typing.Optional[str] = pydantic.Field(default=None)
"""
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -131,8 +131,8 @@ class CreateStreamRequest(UniversalBaseModel):
quality: typing.Optional[float] = None
max_tokens: typing.Optional[int] = None
sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = None
- tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = None
+ response_format_type: typing.Optional[ResponseFormatType] = None
+ tts_provider: typing.Optional[TextToSpeechProviders] = None
uberduck_voice_name: typing.Optional[str] = None
uberduck_speaking_rate: typing.Optional[float] = None
google_voice_name: typing.Optional[str] = None
diff --git a/src/gooey/types/create_stream_request_response_format_type.py b/src/gooey/types/create_stream_request_response_format_type.py
deleted file mode 100644
index dc5024d..0000000
--- a/src/gooey/types/create_stream_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CreateStreamRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/create_stream_request_translation_model.py b/src/gooey/types/create_stream_request_translation_model.py
deleted file mode 100644
index 3876937..0000000
--- a/src/gooey/types/create_stream_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CreateStreamRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/create_stream_request_tts_provider.py b/src/gooey/types/create_stream_request_tts_provider.py
deleted file mode 100644
index cad602d..0000000
--- a/src/gooey/types/create_stream_request_tts_provider.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CreateStreamRequestTtsProvider = typing.Union[
- typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
-]
diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py
index 6182439..4beeb94 100644
--- a/src/gooey/types/doc_extract_page_request.py
+++ b/src/gooey/types/doc_extract_page_request.py
@@ -6,7 +6,7 @@
import pydantic
from .asr_models import AsrModels
from .large_language_models import LargeLanguageModels
-from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
+from .response_format_type import ResponseFormatType
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -30,7 +30,7 @@ class DocExtractPageRequest(UniversalBaseModel):
quality: typing.Optional[float] = None
max_tokens: typing.Optional[int] = None
sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = None
+ response_format_type: typing.Optional[ResponseFormatType] = None
settings: typing.Optional[RunSettings] = None
if IS_PYDANTIC_V2:
diff --git a/src/gooey/types/doc_extract_page_request_response_format_type.py b/src/gooey/types/doc_extract_page_request_response_format_type.py
deleted file mode 100644
index 0ad7c14..0000000
--- a/src/gooey/types/doc_extract_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocExtractPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_search_page_request_citation_style.py b/src/gooey/types/doc_search_page_request_citation_style.py
deleted file mode 100644
index b47b3be..0000000
--- a/src/gooey/types/doc_search_page_request_citation_style.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSearchPageRequestCitationStyle = typing.Union[
- typing.Literal[
- "number",
- "title",
- "url",
- "symbol",
- "markdown",
- "html",
- "slack_mrkdwn",
- "plaintext",
- "number_markdown",
- "number_html",
- "number_slack_mrkdwn",
- "number_plaintext",
- "symbol_markdown",
- "symbol_html",
- "symbol_slack_mrkdwn",
- "symbol_plaintext",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/doc_search_page_request_response_format_type.py b/src/gooey/types/doc_search_page_request_response_format_type.py
deleted file mode 100644
index 856b641..0000000
--- a/src/gooey/types/doc_search_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSearchPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py
index 9bd8770..dadd11a 100644
--- a/src/gooey/types/doc_summary_page_request.py
+++ b/src/gooey/types/doc_summary_page_request.py
@@ -5,8 +5,9 @@
from .recipe_function import RecipeFunction
import pydantic
from .large_language_models import LargeLanguageModels
+from .combine_documents_chains import CombineDocumentsChains
from .asr_models import AsrModels
-from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
+from .response_format_type import ResponseFormatType
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -22,7 +23,7 @@ class DocSummaryPageRequest(UniversalBaseModel):
task_instructions: typing.Optional[str] = None
merge_instructions: typing.Optional[str] = None
selected_model: typing.Optional[LargeLanguageModels] = None
- chain_type: typing.Optional[typing.Literal["map_reduce"]] = None
+ chain_type: typing.Optional[CombineDocumentsChains] = None
selected_asr_model: typing.Optional[AsrModels] = None
google_translate_target: typing.Optional[str] = None
avoid_repetition: typing.Optional[bool] = None
@@ -30,7 +31,7 @@ class DocSummaryPageRequest(UniversalBaseModel):
quality: typing.Optional[float] = None
max_tokens: typing.Optional[int] = None
sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = None
+ response_format_type: typing.Optional[ResponseFormatType] = None
settings: typing.Optional[RunSettings] = None
if IS_PYDANTIC_V2:
diff --git a/src/gooey/types/doc_summary_page_request_response_format_type.py b/src/gooey/types/doc_summary_page_request_response_format_type.py
deleted file mode 100644
index 318ad7f..0000000
--- a/src/gooey/types/doc_summary_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/doc_summary_request_response_format_type.py b/src/gooey/types/doc_summary_request_response_format_type.py
deleted file mode 100644
index 8fabf9b..0000000
--- a/src/gooey/types/doc_summary_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSummaryRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/email_face_inpainting_page_request_selected_model.py b/src/gooey/types/email_face_inpainting_page_request_selected_model.py
deleted file mode 100644
index 822b5a6..0000000
--- a/src/gooey/types/email_face_inpainting_page_request_selected_model.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-EmailFaceInpaintingPageRequestSelectedModel = typing.Union[
- typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any
-]
diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py
index a653205..8e88a4f 100644
--- a/src/gooey/types/face_inpainting_page_request.py
+++ b/src/gooey/types/face_inpainting_page_request.py
@@ -4,7 +4,7 @@
import typing
from .recipe_function import RecipeFunction
import pydantic
-from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel
+from .inpainting_models import InpaintingModels
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -21,7 +21,7 @@ class FaceInpaintingPageRequest(UniversalBaseModel):
face_scale: typing.Optional[float] = None
face_pos_x: typing.Optional[float] = None
face_pos_y: typing.Optional[float] = None
- selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None
+ selected_model: typing.Optional[InpaintingModels] = None
negative_prompt: typing.Optional[str] = None
num_outputs: typing.Optional[int] = None
quality: typing.Optional[int] = None
diff --git a/src/gooey/types/face_inpainting_page_request_selected_model.py b/src/gooey/types/face_inpainting_page_request_selected_model.py
deleted file mode 100644
index 9b8eab6..0000000
--- a/src/gooey/types/face_inpainting_page_request_selected_model.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-FaceInpaintingPageRequestSelectedModel = typing.Union[
- typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any
-]
diff --git a/src/gooey/types/google_gpt_page_request_response_format_type.py b/src/gooey/types/google_gpt_page_request_response_format_type.py
deleted file mode 100644
index dd04dec..0000000
--- a/src/gooey/types/google_gpt_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-GoogleGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/google_image_gen_page_request_selected_model.py b/src/gooey/types/google_image_gen_page_request_selected_model.py
deleted file mode 100644
index c872962..0000000
--- a/src/gooey/types/google_image_gen_page_request_selected_model.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-GoogleImageGenPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "dream_shaper",
- "dreamlike_2",
- "sd_2",
- "sd_1_5",
- "dall_e",
- "instruct_pix2pix",
- "openjourney_2",
- "openjourney",
- "analog_diffusion",
- "protogen_5_3",
- "jack_qiao",
- "rodent_diffusion_1_5",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/image_segmentation_models.py b/src/gooey/types/image_segmentation_models.py
new file mode 100644
index 0000000..aae4fee
--- /dev/null
+++ b/src/gooey/types/image_segmentation_models.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ImageSegmentationModels = typing.Union[typing.Literal["dis", "u2net"], typing.Any]
diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py
index a2ea60d..3e1952c 100644
--- a/src/gooey/types/image_segmentation_page_request.py
+++ b/src/gooey/types/image_segmentation_page_request.py
@@ -4,7 +4,7 @@
import typing
from .recipe_function import RecipeFunction
import pydantic
-from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel
+from .image_segmentation_models import ImageSegmentationModels
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -17,7 +17,7 @@ class ImageSegmentationPageRequest(UniversalBaseModel):
"""
input_image: str
- selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None
+ selected_model: typing.Optional[ImageSegmentationModels] = None
mask_threshold: typing.Optional[float] = None
rect_persepective_transform: typing.Optional[bool] = None
reflection_opacity: typing.Optional[float] = None
diff --git a/src/gooey/types/image_segmentation_page_request_selected_model.py b/src/gooey/types/image_segmentation_page_request_selected_model.py
deleted file mode 100644
index 9b4b8d7..0000000
--- a/src/gooey/types/image_segmentation_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ImageSegmentationPageRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any]
diff --git a/src/gooey/types/remix_image_request_selected_model.py b/src/gooey/types/image_to_image_models.py
similarity index 89%
rename from src/gooey/types/remix_image_request_selected_model.py
rename to src/gooey/types/image_to_image_models.py
index 245d6b0..70c9201 100644
--- a/src/gooey/types/remix_image_request_selected_model.py
+++ b/src/gooey/types/image_to_image_models.py
@@ -2,7 +2,7 @@
import typing
-RemixImageRequestSelectedModel = typing.Union[
+ImageToImageModels = typing.Union[
typing.Literal[
"dream_shaper",
"dreamlike_2",
diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py
index f3cfd2f..2b689aa 100644
--- a/src/gooey/types/img2img_page_request.py
+++ b/src/gooey/types/img2img_page_request.py
@@ -4,7 +4,7 @@
import typing
from .recipe_function import RecipeFunction
import pydantic
-from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel
+from .image_to_image_models import ImageToImageModels
from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -19,7 +19,7 @@ class Img2ImgPageRequest(UniversalBaseModel):
input_image: str
text_prompt: typing.Optional[str] = None
- selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None
+ selected_model: typing.Optional[ImageToImageModels] = None
selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None
negative_prompt: typing.Optional[str] = None
num_outputs: typing.Optional[int] = None
diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model.py b/src/gooey/types/img2img_page_request_selected_controlnet_model.py
index df9cb36..514d737 100644
--- a/src/gooey/types/img2img_page_request_selected_controlnet_model.py
+++ b/src/gooey/types/img2img_page_request_selected_controlnet_model.py
@@ -1,19 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem
+from .control_net_models import ControlNetModels
-Img2ImgPageRequestSelectedControlnetModel = typing.Union[
- typing.List[Img2ImgPageRequestSelectedControlnetModelItem],
- typing.Literal["sd_controlnet_canny"],
- typing.Literal["sd_controlnet_depth"],
- typing.Literal["sd_controlnet_hed"],
- typing.Literal["sd_controlnet_mlsd"],
- typing.Literal["sd_controlnet_normal"],
- typing.Literal["sd_controlnet_openpose"],
- typing.Literal["sd_controlnet_scribble"],
- typing.Literal["sd_controlnet_seg"],
- typing.Literal["sd_controlnet_tile"],
- typing.Literal["sd_controlnet_brightness"],
- typing.Literal["control_v1p_sd15_qrcode_monster_v2"],
-]
+Img2ImgPageRequestSelectedControlnetModel = typing.Union[typing.List[ControlNetModels], ControlNetModels]
diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py b/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py
deleted file mode 100644
index 1569cf5..0000000
--- a/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-Img2ImgPageRequestSelectedControlnetModelItem = typing.Union[
- typing.Literal[
- "sd_controlnet_canny",
- "sd_controlnet_depth",
- "sd_controlnet_hed",
- "sd_controlnet_mlsd",
- "sd_controlnet_normal",
- "sd_controlnet_openpose",
- "sd_controlnet_scribble",
- "sd_controlnet_seg",
- "sd_controlnet_tile",
- "sd_controlnet_brightness",
- "control_v1p_sd15_qrcode_monster_v2",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/img2img_page_request_selected_model.py b/src/gooey/types/img2img_page_request_selected_model.py
deleted file mode 100644
index 506c2b1..0000000
--- a/src/gooey/types/img2img_page_request_selected_model.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-Img2ImgPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "dream_shaper",
- "dreamlike_2",
- "sd_2",
- "sd_1_5",
- "dall_e",
- "instruct_pix2pix",
- "openjourney_2",
- "openjourney",
- "analog_diffusion",
- "protogen_5_3",
- "jack_qiao",
- "rodent_diffusion_1_5",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/inpainting_models.py b/src/gooey/types/inpainting_models.py
new file mode 100644
index 0000000..f851858
--- /dev/null
+++ b/src/gooey/types/inpainting_models.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+InpaintingModels = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py
index f063d03..9c90696 100644
--- a/src/gooey/types/lipsync_tts_page_request.py
+++ b/src/gooey/types/lipsync_tts_page_request.py
@@ -4,7 +4,7 @@
import typing
from .recipe_function import RecipeFunction
import pydantic
-from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
+from .text_to_speech_providers import TextToSpeechProviders
from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
from .sad_talker_settings import SadTalkerSettings
@@ -21,7 +21,7 @@ class LipsyncTtsPageRequest(UniversalBaseModel):
"""
text_prompt: str
- tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None
+ tts_provider: typing.Optional[TextToSpeechProviders] = None
uberduck_voice_name: typing.Optional[str] = None
uberduck_speaking_rate: typing.Optional[float] = None
google_voice_name: typing.Optional[str] = None
diff --git a/src/gooey/types/lipsync_tts_page_request_tts_provider.py b/src/gooey/types/lipsync_tts_page_request_tts_provider.py
deleted file mode 100644
index 7e73fda..0000000
--- a/src/gooey/types/lipsync_tts_page_request_tts_provider.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncTtsPageRequestTtsProvider = typing.Union[
- typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
-]
diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py
index 50b5b72..54d23b5 100644
--- a/src/gooey/types/object_inpainting_page_request.py
+++ b/src/gooey/types/object_inpainting_page_request.py
@@ -4,7 +4,7 @@
import typing
from .recipe_function import RecipeFunction
import pydantic
-from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel
+from .inpainting_models import InpaintingModels
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -22,7 +22,7 @@ class ObjectInpaintingPageRequest(UniversalBaseModel):
obj_pos_x: typing.Optional[float] = None
obj_pos_y: typing.Optional[float] = None
mask_threshold: typing.Optional[float] = None
- selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None
+ selected_model: typing.Optional[InpaintingModels] = None
negative_prompt: typing.Optional[str] = None
num_outputs: typing.Optional[int] = None
quality: typing.Optional[int] = None
diff --git a/src/gooey/types/object_inpainting_page_request_selected_model.py b/src/gooey/types/object_inpainting_page_request_selected_model.py
deleted file mode 100644
index 92f1302..0000000
--- a/src/gooey/types/object_inpainting_page_request_selected_model.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ObjectInpaintingPageRequestSelectedModel = typing.Union[
- typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any
-]
diff --git a/src/gooey/types/portrait_request_selected_model.py b/src/gooey/types/portrait_request_selected_model.py
deleted file mode 100644
index 6c4a5ce..0000000
--- a/src/gooey/types/portrait_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-PortraitRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any]
diff --git a/src/gooey/types/product_image_request_selected_model.py b/src/gooey/types/product_image_request_selected_model.py
deleted file mode 100644
index f1ce039..0000000
--- a/src/gooey/types/product_image_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ProductImageRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any]
diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py
index 68f3730..60831cb 100644
--- a/src/gooey/types/qr_code_generator_page_request.py
+++ b/src/gooey/types/qr_code_generator_page_request.py
@@ -5,14 +5,9 @@
from .recipe_function import RecipeFunction
import pydantic
from .vcard import Vcard
-from .qr_code_generator_page_request_image_prompt_controlnet_models_item import (
- QrCodeGeneratorPageRequestImagePromptControlnetModelsItem,
-)
-from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel
-from .qr_code_generator_page_request_selected_controlnet_model_item import (
- QrCodeGeneratorPageRequestSelectedControlnetModelItem,
-)
-from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler
+from .control_net_models import ControlNetModels
+from .text_to_image_models import TextToImageModels
+from .schedulers import Schedulers
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -32,24 +27,20 @@ class QrCodeGeneratorPageRequest(UniversalBaseModel):
text_prompt: str
negative_prompt: typing.Optional[str] = None
image_prompt: typing.Optional[str] = None
- image_prompt_controlnet_models: typing.Optional[
- typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem]
- ] = None
+ image_prompt_controlnet_models: typing.Optional[typing.List[ControlNetModels]] = None
image_prompt_strength: typing.Optional[float] = None
image_prompt_scale: typing.Optional[float] = None
image_prompt_pos_x: typing.Optional[float] = None
image_prompt_pos_y: typing.Optional[float] = None
- selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None
- selected_controlnet_model: typing.Optional[typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] = (
- None
- )
+ selected_model: typing.Optional[TextToImageModels] = None
+ selected_controlnet_model: typing.Optional[typing.List[ControlNetModels]] = None
output_width: typing.Optional[int] = None
output_height: typing.Optional[int] = None
guidance_scale: typing.Optional[float] = None
controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None
num_outputs: typing.Optional[int] = None
quality: typing.Optional[int] = None
- scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = None
+ scheduler: typing.Optional[Schedulers] = None
seed: typing.Optional[int] = None
obj_scale: typing.Optional[float] = None
obj_pos_x: typing.Optional[float] = None
diff --git a/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py
deleted file mode 100644
index 508e7e9..0000000
--- a/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-QrCodeGeneratorPageRequestImagePromptControlnetModelsItem = typing.Union[
- typing.Literal[
- "sd_controlnet_canny",
- "sd_controlnet_depth",
- "sd_controlnet_hed",
- "sd_controlnet_mlsd",
- "sd_controlnet_normal",
- "sd_controlnet_openpose",
- "sd_controlnet_scribble",
- "sd_controlnet_seg",
- "sd_controlnet_tile",
- "sd_controlnet_brightness",
- "control_v1p_sd15_qrcode_monster_v2",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/qr_code_generator_page_request_scheduler.py b/src/gooey/types/qr_code_generator_page_request_scheduler.py
deleted file mode 100644
index e30308a..0000000
--- a/src/gooey/types/qr_code_generator_page_request_scheduler.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-QrCodeGeneratorPageRequestScheduler = typing.Union[
- typing.Literal[
- "singlestep_dpm_solver",
- "multistep_dpm_solver",
- "dpm_sde",
- "dpm_discrete",
- "dpm_discrete_ancestral",
- "unipc",
- "lms_discrete",
- "heun",
- "euler",
- "euler_ancestral",
- "pndm",
- "ddpm",
- "ddim",
- "deis",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py
deleted file mode 100644
index c6f1967..0000000
--- a/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-QrCodeGeneratorPageRequestSelectedControlnetModelItem = typing.Union[
- typing.Literal[
- "sd_controlnet_canny",
- "sd_controlnet_depth",
- "sd_controlnet_hed",
- "sd_controlnet_mlsd",
- "sd_controlnet_normal",
- "sd_controlnet_openpose",
- "sd_controlnet_scribble",
- "sd_controlnet_seg",
- "sd_controlnet_tile",
- "sd_controlnet_brightness",
- "control_v1p_sd15_qrcode_monster_v2",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/qr_code_generator_page_request_selected_model.py b/src/gooey/types/qr_code_generator_page_request_selected_model.py
deleted file mode 100644
index 97282cb..0000000
--- a/src/gooey/types/qr_code_generator_page_request_selected_model.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-QrCodeGeneratorPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "dream_shaper",
- "dreamlike_2",
- "sd_2",
- "sd_1_5",
- "dall_e",
- "dall_e_3",
- "openjourney_2",
- "openjourney",
- "analog_diffusion",
- "protogen_5_3",
- "jack_qiao",
- "rodent_diffusion_1_5",
- "deepfloyd_if",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
deleted file mode 100644
index 3be2ab6..0000000
--- a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-QrCodeRequestImagePromptControlnetModelsItem = typing.Union[
- typing.Literal[
- "sd_controlnet_canny",
- "sd_controlnet_depth",
- "sd_controlnet_hed",
- "sd_controlnet_mlsd",
- "sd_controlnet_normal",
- "sd_controlnet_openpose",
- "sd_controlnet_scribble",
- "sd_controlnet_seg",
- "sd_controlnet_tile",
- "sd_controlnet_brightness",
- "control_v1p_sd15_qrcode_monster_v2",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/related_qn_a_doc_page_request_citation_style.py b/src/gooey/types/related_qn_a_doc_page_request_citation_style.py
deleted file mode 100644
index b98f002..0000000
--- a/src/gooey/types/related_qn_a_doc_page_request_citation_style.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnADocPageRequestCitationStyle = typing.Union[
- typing.Literal[
- "number",
- "title",
- "url",
- "symbol",
- "markdown",
- "html",
- "slack_mrkdwn",
- "plaintext",
- "number_markdown",
- "number_html",
- "number_slack_mrkdwn",
- "number_plaintext",
- "symbol_markdown",
- "symbol_html",
- "symbol_slack_mrkdwn",
- "symbol_plaintext",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py b/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py
deleted file mode 100644
index c65a896..0000000
--- a/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnADocPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/related_qn_a_page_request_response_format_type.py b/src/gooey/types/related_qn_a_page_request_response_format_type.py
deleted file mode 100644
index 7bada87..0000000
--- a/src/gooey/types/related_qn_a_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnAPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model.py b/src/gooey/types/remix_image_request_selected_controlnet_model.py
index eea207f..521d9c8 100644
--- a/src/gooey/types/remix_image_request_selected_controlnet_model.py
+++ b/src/gooey/types/remix_image_request_selected_controlnet_model.py
@@ -1,19 +1,6 @@
# This file was auto-generated by Fern from our API Definition.
import typing
-from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem
+from .control_net_models import ControlNetModels
-RemixImageRequestSelectedControlnetModel = typing.Union[
- typing.List[RemixImageRequestSelectedControlnetModelItem],
- typing.Literal["sd_controlnet_canny"],
- typing.Literal["sd_controlnet_depth"],
- typing.Literal["sd_controlnet_hed"],
- typing.Literal["sd_controlnet_mlsd"],
- typing.Literal["sd_controlnet_normal"],
- typing.Literal["sd_controlnet_openpose"],
- typing.Literal["sd_controlnet_scribble"],
- typing.Literal["sd_controlnet_seg"],
- typing.Literal["sd_controlnet_tile"],
- typing.Literal["sd_controlnet_brightness"],
- typing.Literal["control_v1p_sd15_qrcode_monster_v2"],
-]
+RemixImageRequestSelectedControlnetModel = typing.Union[typing.List[ControlNetModels], ControlNetModels]
diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py
deleted file mode 100644
index b4f3ff0..0000000
--- a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RemixImageRequestSelectedControlnetModelItem = typing.Union[
- typing.Literal[
- "sd_controlnet_canny",
- "sd_controlnet_depth",
- "sd_controlnet_hed",
- "sd_controlnet_mlsd",
- "sd_controlnet_normal",
- "sd_controlnet_openpose",
- "sd_controlnet_scribble",
- "sd_controlnet_seg",
- "sd_controlnet_tile",
- "sd_controlnet_brightness",
- "control_v1p_sd15_qrcode_monster_v2",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/remove_background_request_selected_model.py b/src/gooey/types/remove_background_request_selected_model.py
deleted file mode 100644
index c84f0e7..0000000
--- a/src/gooey/types/remove_background_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RemoveBackgroundRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any]
diff --git a/src/gooey/types/response_format_type.py b/src/gooey/types/response_format_type.py
new file mode 100644
index 0000000..f8216e9
--- /dev/null
+++ b/src/gooey/types/response_format_type.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+ResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/qr_code_request_scheduler.py b/src/gooey/types/schedulers.py
similarity index 91%
rename from src/gooey/types/qr_code_request_scheduler.py
rename to src/gooey/types/schedulers.py
index 890b204..d3b5398 100644
--- a/src/gooey/types/qr_code_request_scheduler.py
+++ b/src/gooey/types/schedulers.py
@@ -2,7 +2,7 @@
import typing
-QrCodeRequestScheduler = typing.Union[
+Schedulers = typing.Union[
typing.Literal[
"singlestep_dpm_solver",
"multistep_dpm_solver",
diff --git a/src/gooey/types/seo_summary_page_request_response_format_type.py b/src/gooey/types/seo_summary_page_request_response_format_type.py
deleted file mode 100644
index 26f948b..0000000
--- a/src/gooey/types/seo_summary_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SeoSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/smart_gpt_page_request_response_format_type.py b/src/gooey/types/smart_gpt_page_request_response_format_type.py
deleted file mode 100644
index 1eaf901..0000000
--- a/src/gooey/types/smart_gpt_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SmartGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/social_lookup_email_page_request_response_format_type.py b/src/gooey/types/social_lookup_email_page_request_response_format_type.py
deleted file mode 100644
index 46c50db..0000000
--- a/src/gooey/types/social_lookup_email_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SocialLookupEmailPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/speech_recognition_request_output_format.py b/src/gooey/types/speech_recognition_request_output_format.py
deleted file mode 100644
index 4d2cf2b..0000000
--- a/src/gooey/types/speech_recognition_request_output_format.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SpeechRecognitionRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any]
diff --git a/src/gooey/types/speech_recognition_request_translation_model.py b/src/gooey/types/speech_recognition_request_translation_model.py
deleted file mode 100644
index 886ab92..0000000
--- a/src/gooey/types/speech_recognition_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SpeechRecognitionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/synthesize_data_request_response_format_type.py b/src/gooey/types/synthesize_data_request_response_format_type.py
deleted file mode 100644
index 3ab37a9..0000000
--- a/src/gooey/types/synthesize_data_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SynthesizeDataRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/text2audio_models.py b/src/gooey/types/text2audio_models.py
new file mode 100644
index 0000000..b3eb9eb
--- /dev/null
+++ b/src/gooey/types/text2audio_models.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+Text2AudioModels = typing.Literal["audio_ldm"]
diff --git a/src/gooey/types/qr_code_request_selected_model.py b/src/gooey/types/text_to_image_models.py
similarity index 90%
rename from src/gooey/types/qr_code_request_selected_model.py
rename to src/gooey/types/text_to_image_models.py
index 7ea963c..fd17514 100644
--- a/src/gooey/types/qr_code_request_selected_model.py
+++ b/src/gooey/types/text_to_image_models.py
@@ -2,7 +2,7 @@
import typing
-QrCodeRequestSelectedModel = typing.Union[
+TextToImageModels = typing.Union[
typing.Literal[
"dream_shaper",
"dreamlike_2",
diff --git a/src/gooey/types/text_to_speech_page_request_tts_provider.py b/src/gooey/types/text_to_speech_page_request_tts_provider.py
deleted file mode 100644
index a6b8938..0000000
--- a/src/gooey/types/text_to_speech_page_request_tts_provider.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-TextToSpeechPageRequestTtsProvider = typing.Union[
- typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
-]
diff --git a/src/gooey/types/lipsync_tts_request_tts_provider.py b/src/gooey/types/text_to_speech_providers.py
similarity index 80%
rename from src/gooey/types/lipsync_tts_request_tts_provider.py
rename to src/gooey/types/text_to_speech_providers.py
index 1a23fe3..f86047f 100644
--- a/src/gooey/types/lipsync_tts_request_tts_provider.py
+++ b/src/gooey/types/text_to_speech_providers.py
@@ -2,6 +2,6 @@
import typing
-LipsyncTtsRequestTtsProvider = typing.Union[
+TextToSpeechProviders = typing.Union[
typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
]
diff --git a/src/gooey/types/translate_request_selected_model.py b/src/gooey/types/translate_request_selected_model.py
deleted file mode 100644
index b774b56..0000000
--- a/src/gooey/types/translate_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-TranslateRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/translation_models.py b/src/gooey/types/translation_models.py
new file mode 100644
index 0000000..136ecb8
--- /dev/null
+++ b/src/gooey/types/translation_models.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+TranslationModels = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py
index 9c033a6..6845f7f 100644
--- a/src/gooey/types/translation_page_request.py
+++ b/src/gooey/types/translation_page_request.py
@@ -4,7 +4,7 @@
import typing
from .recipe_function import RecipeFunction
import pydantic
-from .translation_page_request_selected_model import TranslationPageRequestSelectedModel
+from .translation_models import TranslationModels
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -17,7 +17,7 @@ class TranslationPageRequest(UniversalBaseModel):
"""
texts: typing.Optional[typing.List[str]] = None
- selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None
+ selected_model: typing.Optional[TranslationModels] = None
translation_source: typing.Optional[str] = None
translation_target: typing.Optional[str] = None
glossary_document: typing.Optional[str] = None
diff --git a/src/gooey/types/translation_page_request_selected_model.py b/src/gooey/types/translation_page_request_selected_model.py
deleted file mode 100644
index 62ae9ab..0000000
--- a/src/gooey/types/translation_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-TranslationPageRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/upscale_request_selected_models_item.py b/src/gooey/types/upscaler_models.py
similarity index 78%
rename from src/gooey/types/upscale_request_selected_models_item.py
rename to src/gooey/types/upscaler_models.py
index 1a8362e..314c03a 100644
--- a/src/gooey/types/upscale_request_selected_models_item.py
+++ b/src/gooey/types/upscaler_models.py
@@ -2,6 +2,6 @@
import typing
-UpscaleRequestSelectedModelsItem = typing.Union[
+UpscalerModels = typing.Union[
typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any
]
diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py
index bf1cb59..d1f4e31 100644
--- a/src/gooey/types/video_bots_page_request.py
+++ b/src/gooey/types/video_bots_page_request.py
@@ -7,13 +7,13 @@
from .conversation_entry import ConversationEntry
from .large_language_models import LargeLanguageModels
from .embedding_models import EmbeddingModels
-from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
+from .citation_styles import CitationStyles
from .asr_models import AsrModels
-from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
+from .translation_models import TranslationModels
from .lipsync_models import LipsyncModels
from .llm_tools import LlmTools
-from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
-from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
+from .response_format_type import ResponseFormatType
+from .text_to_speech_providers import TextToSpeechProviders
from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings
@@ -59,7 +59,7 @@ class VideoBotsPageRequest(UniversalBaseModel):
Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches.
"""
- citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None
+ citation_style: typing.Optional[CitationStyles] = None
use_url_shortener: typing.Optional[bool] = None
asr_model: typing.Optional[AsrModels] = pydantic.Field(default=None)
"""
@@ -71,7 +71,7 @@ class VideoBotsPageRequest(UniversalBaseModel):
Choose a language to transcribe incoming audio messages to text.
"""
- translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None
+ translation_model: typing.Optional[TranslationModels] = None
user_language: typing.Optional[str] = pydantic.Field(default=None)
"""
Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages.
@@ -90,8 +90,8 @@ class VideoBotsPageRequest(UniversalBaseModel):
quality: typing.Optional[float] = None
max_tokens: typing.Optional[int] = None
sampling_temperature: typing.Optional[float] = None
- response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = None
- tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None
+ response_format_type: typing.Optional[ResponseFormatType] = None
+ tts_provider: typing.Optional[TextToSpeechProviders] = None
uberduck_voice_name: typing.Optional[str] = None
uberduck_speaking_rate: typing.Optional[float] = None
google_voice_name: typing.Optional[str] = None
diff --git a/src/gooey/types/video_bots_page_request_citation_style.py b/src/gooey/types/video_bots_page_request_citation_style.py
deleted file mode 100644
index dc3630b..0000000
--- a/src/gooey/types/video_bots_page_request_citation_style.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestCitationStyle = typing.Union[
- typing.Literal[
- "number",
- "title",
- "url",
- "symbol",
- "markdown",
- "html",
- "slack_mrkdwn",
- "plaintext",
- "number_markdown",
- "number_html",
- "number_slack_mrkdwn",
- "number_plaintext",
- "symbol_markdown",
- "symbol_html",
- "symbol_slack_mrkdwn",
- "symbol_plaintext",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/video_bots_page_request_response_format_type.py b/src/gooey/types/video_bots_page_request_response_format_type.py
deleted file mode 100644
index 25cc8f1..0000000
--- a/src/gooey/types/video_bots_page_request_response_format_type.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_translation_model.py b/src/gooey/types/video_bots_page_request_translation_model.py
deleted file mode 100644
index 0373c0c..0000000
--- a/src/gooey/types/video_bots_page_request_translation_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any]
diff --git a/src/gooey/types/video_bots_page_request_tts_provider.py b/src/gooey/types/video_bots_page_request_tts_provider.py
deleted file mode 100644
index 3fc8d0a..0000000
--- a/src/gooey/types/video_bots_page_request_tts_provider.py
+++ /dev/null
@@ -1,7 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestTtsProvider = typing.Union[
- typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any
-]