diff --git a/pyproject.toml b/pyproject.toml index d66b377..7b35d2e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gooeyai" -version = "0.0.1-beta26" +version = "0.0.1-beta27" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index a067ff0..11ae1ed 100644 --- a/reference.md +++ b/reference.md @@ -80,7 +80,7 @@ client.animate(
-**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]` +**selected_model:** `typing.Optional[AnimationModels]`
@@ -308,7 +308,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**image_prompt_controlnet_models:** `typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]` +**image_prompt_controlnet_models:** `typing.Optional[typing.List[ControlNetModels]]`
@@ -348,7 +348,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[QrCodeRequestSelectedModel]` +**selected_model:** `typing.Optional[TextToImageModels]`
@@ -356,7 +356,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_controlnet_model:** `typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]` +**selected_controlnet_model:** `typing.Optional[typing.List[ControlNetModels]]`
@@ -412,7 +412,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**scheduler:** `typing.Optional[QrCodeRequestScheduler]` +**scheduler:** `typing.Optional[Schedulers]`
@@ -605,7 +605,7 @@ client.seo_people_also_ask(
-**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]` +**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -666,7 +666,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[RelatedQnAPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -674,7 +674,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -893,7 +893,7 @@ client.seo_content(
-**response_format_type:** `typing.Optional[SeoSummaryPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -901,7 +901,7 @@ client.seo_content(
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -1086,7 +1086,7 @@ client.web_search_llm(
-**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]` +**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -1147,7 +1147,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[GoogleGptPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -1155,7 +1155,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -1331,7 +1331,7 @@ client.personalize_email(
-**response_format_type:** `typing.Optional[SocialLookupEmailPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -1647,7 +1647,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -1753,7 +1753,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_asr_model:** `typing.Optional[SynthesizeDataRequestSelectedAsrModel]` +**selected_asr_model:** `typing.Optional[AsrModels]`
@@ -1835,7 +1835,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**response_format_type:** `typing.Optional[SynthesizeDataRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -1977,7 +1977,7 @@ client.llm()
-**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -2073,7 +2073,7 @@ client.rag(
-**keyword_query:** `typing.Optional[DocSearchPageRequestKeywordQuery]` +**keyword_query:** `typing.Optional[KeywordQuery]`
@@ -2121,7 +2121,7 @@ client.rag(
-**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]` +**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -2166,7 +2166,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]` +**citation_style:** `typing.Optional[CitationStyles]`
@@ -2214,7 +2214,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[DocSearchPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -2382,7 +2382,7 @@ client.smart_gpt(
-**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -2502,7 +2502,7 @@ typing.List[core.File]` — See core.File for more documentation
-**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]` +**chain_type:** `typing.Optional[CombineDocumentsChains]`
@@ -2510,7 +2510,7 @@ typing.List[core.File]` — See core.File for more documentation
-**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]` +**selected_asr_model:** `typing.Optional[AsrModels]`
@@ -2566,7 +2566,7 @@ typing.List[core.File]` — See core.File for more documentation
-**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -2780,7 +2780,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[LipsyncRequestSelectedModel]` +**selected_model:** `typing.Optional[LipsyncModels]`
@@ -2886,7 +2886,7 @@ client.lipsync_tts(
-**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]` +**tts_provider:** `typing.Optional[TextToSpeechProviders]`
@@ -3014,7 +3014,7 @@ client.lipsync_tts(
-**openai_voice_name:** `typing.Optional[LipsyncTtsRequestOpenaiVoiceName]` +**openai_voice_name:** `typing.Optional[OpenAiTtsVoices]`
@@ -3022,7 +3022,7 @@ client.lipsync_tts(
-**openai_tts_model:** `typing.Optional[LipsyncTtsRequestOpenaiTtsModel]` +**openai_tts_model:** `typing.Optional[OpenAiTtsModels]`
@@ -3080,7 +3080,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]` +**selected_model:** `typing.Optional[LipsyncModels]`
@@ -3176,7 +3176,7 @@ client.text_to_speech(
-**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]` +**tts_provider:** `typing.Optional[TextToSpeechProviders]`
@@ -3304,7 +3304,7 @@ client.text_to_speech(
-**openai_voice_name:** `typing.Optional[TextToSpeechPageRequestOpenaiVoiceName]` +**openai_voice_name:** `typing.Optional[OpenAiTtsVoices]`
@@ -3312,7 +3312,7 @@ client.text_to_speech(
-**openai_tts_model:** `typing.Optional[TextToSpeechPageRequestOpenaiTtsModel]` +**openai_tts_model:** `typing.Optional[OpenAiTtsModels]`
@@ -3408,7 +3408,7 @@ typing.List[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]` +**selected_model:** `typing.Optional[AsrModels]`
@@ -3424,7 +3424,7 @@ typing.List[core.File]` — See core.File for more documentation
-**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]` +**translation_model:** `typing.Optional[TranslationModels]`
@@ -3432,7 +3432,7 @@ typing.List[core.File]` — See core.File for more documentation
-**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]` +**output_format:** `typing.Optional[AsrOutputFormat]`
@@ -3618,7 +3618,7 @@ client.text_to_music(
-**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]` +**selected_models:** `typing.Optional[typing.Sequence[Text2AudioModels]]`
@@ -3712,7 +3712,7 @@ client.translate()
-**selected_model:** `typing.Optional[TranslateRequestSelectedModel]` +**selected_model:** `typing.Optional[TranslationModels]`
@@ -3842,7 +3842,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]` +**selected_model:** `typing.Optional[ImageToImageModels]`
@@ -3850,7 +3850,7 @@ core.File` — See core.File for more documentation
-**selected_controlnet_model:** `typing.Optional[RemixImageRequestSelectedControlnetModel]` +**selected_controlnet_model:** `typing.Optional[SelectedControlNetModels]`
@@ -4106,7 +4106,7 @@ client.text_to_image(
-**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]` +**selected_models:** `typing.Optional[typing.Sequence[TextToImageModels]]`
@@ -4114,7 +4114,7 @@ client.text_to_image(
-**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]` +**scheduler:** `typing.Optional[Schedulers]`
@@ -4268,7 +4268,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]` +**selected_model:** `typing.Optional[InpaintingModels]`
@@ -4462,7 +4462,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[PortraitRequestSelectedModel]` +**selected_model:** `typing.Optional[InpaintingModels]`
@@ -4663,7 +4663,7 @@ client.image_from_email(
-**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]` +**selected_model:** `typing.Optional[InpaintingModels]`
@@ -4896,7 +4896,7 @@ client.image_from_web_search(
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -4912,7 +4912,7 @@ client.image_from_web_search(
-**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]` +**selected_model:** `typing.Optional[ImageToImageModels]`
@@ -5072,7 +5072,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]` +**selected_model:** `typing.Optional[ImageSegmentationModels]`
@@ -5236,7 +5236,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]` +**selected_models:** `typing.Optional[typing.List[UpscalerModels]]`
@@ -5340,7 +5340,7 @@ client.embed(
-**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]` +**selected_model:** `typing.Optional[EmbeddingModels]`
@@ -5436,7 +5436,7 @@ client.seo_people_also_ask_doc(
-**keyword_query:** `typing.Optional[RelatedQnADocPageRequestKeywordQuery]` +**keyword_query:** `typing.Optional[KeywordQuery]`
@@ -5484,7 +5484,7 @@ client.seo_people_also_ask_doc(
-**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]` +**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -5529,7 +5529,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]` +**citation_style:** `typing.Optional[CitationStyles]`
@@ -5577,7 +5577,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -5585,7 +5585,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_location:** `typing.Optional[SerpSearchLocation]` +**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -5876,7 +5876,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-**embedding_model:** `typing.Optional[CopilotCompletionRequestEmbeddingModel]` +**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -5897,7 +5897,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[CopilotCompletionRequestCitationStyle]` +**citation_style:** `typing.Optional[CitationStyles]`
@@ -5913,7 +5913,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_model:** `typing.Optional[CopilotCompletionRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text. +**asr_model:** `typing.Optional[AsrModels]` — Choose a model to transcribe incoming audio messages to text.
@@ -5929,7 +5929,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**translation_model:** `typing.Optional[CopilotCompletionRequestTranslationModel]` +**translation_model:** `typing.Optional[TranslationModels]`
@@ -5965,7 +5965,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**lipsync_model:** `typing.Optional[CopilotCompletionRequestLipsyncModel]` +**lipsync_model:** `typing.Optional[LipsyncModels]`
@@ -6021,7 +6021,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**response_format_type:** `typing.Optional[CopilotCompletionRequestResponseFormatType]` +**response_format_type:** `typing.Optional[ResponseFormatType]`
@@ -6029,7 +6029,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**tts_provider:** `typing.Optional[CopilotCompletionRequestTtsProvider]` +**tts_provider:** `typing.Optional[TextToSpeechProviders]`
@@ -6157,7 +6157,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**openai_voice_name:** `typing.Optional[CopilotCompletionRequestOpenaiVoiceName]` +**openai_voice_name:** `typing.Optional[OpenAiTtsVoices]`
@@ -6165,7 +6165,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**openai_tts_model:** `typing.Optional[CopilotCompletionRequestOpenaiTtsModel]` +**openai_tts_model:** `typing.Optional[OpenAiTtsModels]`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py index 86305e9..bb9a2ed 100644 --- a/src/gooey/__init__.py +++ b/src/gooey/__init__.py @@ -5,47 +5,44 @@ AggFunctionFunction, AggFunctionResult, AggFunctionResultFunction, + AnimationModels, AnimationPrompt, AsrChunk, + AsrModels, + AsrOutputFormat, AsrOutputJson, AsrPageOutput, AsrPageOutputOutputTextItem, AsrPageRequest, - AsrPageRequestOutputFormat, - AsrPageRequestSelectedModel, - AsrPageRequestTranslationModel, AsrPageStatusResponse, AsyncApiResponseModelV3, BalanceResponse, BotBroadcastFilters, BotBroadcastRequestModel, BulkEvalPageOutput, - BulkEvalPageRequestResponseFormatType, BulkEvalPageStatusResponse, BulkRunnerPageOutput, BulkRunnerPageRequest, BulkRunnerPageStatusResponse, ButtonPressed, CalledFunctionResponse, - CalledFunctionResponseTrigger, ChatCompletionContentPartImageParam, ChatCompletionContentPartTextParam, ChyronPlantPageOutput, ChyronPlantPageRequest, ChyronPlantPageStatusResponse, + CitationStyles, + CombineDocumentsChains, CompareLlmPageOutput, - CompareLlmPageRequestResponseFormatType, CompareLlmPageStatusResponse, CompareText2ImgPageOutput, - CompareText2ImgPageRequestScheduler, - CompareText2ImgPageRequestSelectedModelsItem, CompareText2ImgPageStatusResponse, CompareUpscalerPageOutput, CompareUpscalerPageRequest, - CompareUpscalerPageRequestSelectedModelsItem, CompareUpscalerPageStatusResponse, ConsoleLogs, ConsoleLogsLevel, + ControlNetModels, ConversationEntry, ConversationEntryContent, ConversationEntryContentItem, @@ -54,134 +51,82 @@ ConversationEntryRole, ConversationStart, CreateStreamRequest, - CreateStreamRequestAsrModel, - CreateStreamRequestCitationStyle, - CreateStreamRequestEmbeddingModel, - CreateStreamRequestLipsyncModel, - CreateStreamRequestOpenaiTtsModel, - CreateStreamRequestOpenaiVoiceName, - CreateStreamRequestResponseFormatType, - CreateStreamRequestTranslationModel, - CreateStreamRequestTtsProvider, CreateStreamResponse, DeforumSdPageOutput, - DeforumSdPageRequestSelectedModel, DeforumSdPageStatusResponse, DocExtractPageOutput, DocExtractPageRequest, - DocExtractPageRequestResponseFormatType, - DocExtractPageRequestSelectedAsrModel, DocExtractPageStatusResponse, DocSearchPageOutput, - DocSearchPageRequestCitationStyle, - DocSearchPageRequestEmbeddingModel, - DocSearchPageRequestKeywordQuery, - DocSearchPageRequestResponseFormatType, DocSearchPageStatusResponse, DocSummaryPageOutput, DocSummaryPageRequest, - DocSummaryPageRequestResponseFormatType, - DocSummaryPageRequestSelectedAsrModel, DocSummaryPageStatusResponse, - DocSummaryRequestResponseFormatType, - DocSummaryRequestSelectedAsrModel, EmailFaceInpaintingPageOutput, - EmailFaceInpaintingPageRequestSelectedModel, EmailFaceInpaintingPageStatusResponse, + EmbeddingModels, EmbeddingsPageOutput, - EmbeddingsPageRequestSelectedModel, EmbeddingsPageStatusResponse, EvalPrompt, FaceInpaintingPageOutput, FaceInpaintingPageRequest, - FaceInpaintingPageRequestSelectedModel, FaceInpaintingPageStatusResponse, FinalResponse, + FunctionTrigger, FunctionsPageOutput, FunctionsPageStatusResponse, GenericErrorResponse, GenericErrorResponseDetail, GoogleGptPageOutput, - GoogleGptPageRequestEmbeddingModel, - GoogleGptPageRequestResponseFormatType, GoogleGptPageStatusResponse, GoogleImageGenPageOutput, - GoogleImageGenPageRequestSelectedModel, GoogleImageGenPageStatusResponse, HttpValidationError, + ImageSegmentationModels, ImageSegmentationPageOutput, ImageSegmentationPageRequest, - ImageSegmentationPageRequestSelectedModel, ImageSegmentationPageStatusResponse, + ImageToImageModels, ImageUrl, ImageUrlDetail, Img2ImgPageOutput, Img2ImgPageRequest, - Img2ImgPageRequestSelectedControlnetModel, - Img2ImgPageRequestSelectedControlnetModelItem, - Img2ImgPageRequestSelectedModel, Img2ImgPageStatusResponse, + InpaintingModels, + KeywordQuery, LargeLanguageModels, LetterWriterPageOutput, LetterWriterPageRequest, LetterWriterPageStatusResponse, + LipsyncModels, LipsyncPageOutput, LipsyncPageRequest, - LipsyncPageRequestSelectedModel, LipsyncPageStatusResponse, - LipsyncRequestSelectedModel, LipsyncTtsPageOutput, LipsyncTtsPageRequest, - LipsyncTtsPageRequestOpenaiTtsModel, - LipsyncTtsPageRequestOpenaiVoiceName, - LipsyncTtsPageRequestSelectedModel, - LipsyncTtsPageRequestTtsProvider, LipsyncTtsPageStatusResponse, - LipsyncTtsRequestOpenaiTtsModel, - LipsyncTtsRequestOpenaiVoiceName, - LipsyncTtsRequestSelectedModel, - LipsyncTtsRequestTtsProvider, LlmTools, MessagePart, ObjectInpaintingPageOutput, ObjectInpaintingPageRequest, - ObjectInpaintingPageRequestSelectedModel, ObjectInpaintingPageStatusResponse, - PortraitRequestSelectedModel, - ProductImageRequestSelectedModel, + OpenAiTtsModels, + OpenAiTtsVoices, PromptTreeNode, PromptTreeNodePrompt, QrCodeGeneratorPageOutput, QrCodeGeneratorPageRequest, - QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, - QrCodeGeneratorPageRequestScheduler, - QrCodeGeneratorPageRequestSelectedControlnetModelItem, - QrCodeGeneratorPageRequestSelectedModel, QrCodeGeneratorPageStatusResponse, - QrCodeRequestImagePromptControlnetModelsItem, - QrCodeRequestScheduler, - QrCodeRequestSelectedControlnetModelItem, - QrCodeRequestSelectedModel, RecipeFunction, - RecipeFunctionTrigger, RecipeRunState, RelatedDocSearchResponse, RelatedGoogleGptResponse, RelatedQnADocPageOutput, - RelatedQnADocPageRequestCitationStyle, - RelatedQnADocPageRequestEmbeddingModel, - RelatedQnADocPageRequestKeywordQuery, - RelatedQnADocPageRequestResponseFormatType, RelatedQnADocPageStatusResponse, RelatedQnAPageOutput, - RelatedQnAPageRequestEmbeddingModel, - RelatedQnAPageRequestResponseFormatType, RelatedQnAPageStatusResponse, - RemixImageRequestSelectedControlnetModel, - RemixImageRequestSelectedControlnetModelItem, - RemixImageRequestSelectedModel, - RemoveBackgroundRequestSelectedModel, ReplyButton, + ResponseFormatType, ResponseModel, ResponseModelFinalKeywordQuery, ResponseModelFinalPrompt, @@ -190,38 +135,31 @@ RunStart, SadTalkerSettings, SadTalkerSettingsPreprocess, + Schedulers, SearchReference, + SelectedControlNetModels, SeoSummaryPageOutput, - SeoSummaryPageRequestResponseFormatType, SeoSummaryPageStatusResponse, - SerpSearchLocation, + SerpSearchLocations, SerpSearchType, SmartGptPageOutput, - SmartGptPageRequestResponseFormatType, SmartGptPageStatusResponse, SocialLookupEmailPageOutput, - SocialLookupEmailPageRequestResponseFormatType, SocialLookupEmailPageStatusResponse, - SpeechRecognitionRequestOutputFormat, - SpeechRecognitionRequestSelectedModel, - SpeechRecognitionRequestTranslationModel, StreamError, - SynthesizeDataRequestResponseFormatType, - SynthesizeDataRequestSelectedAsrModel, + Text2AudioModels, Text2AudioPageOutput, Text2AudioPageStatusResponse, + TextToImageModels, TextToSpeechPageOutput, - TextToSpeechPageRequestOpenaiTtsModel, - TextToSpeechPageRequestOpenaiVoiceName, - TextToSpeechPageRequestTtsProvider, TextToSpeechPageStatusResponse, + TextToSpeechProviders, TrainingDataModel, - TranslateRequestSelectedModel, + TranslationModels, TranslationPageOutput, TranslationPageRequest, - TranslationPageRequestSelectedModel, TranslationPageStatusResponse, - UpscaleRequestSelectedModelsItem, + UpscalerModels, ValidationError, ValidationErrorLocItem, Vcard, @@ -229,38 +167,18 @@ VideoBotsPageOutputFinalKeywordQuery, VideoBotsPageOutputFinalPrompt, VideoBotsPageRequest, - VideoBotsPageRequestAsrModel, - VideoBotsPageRequestCitationStyle, - VideoBotsPageRequestEmbeddingModel, VideoBotsPageRequestFunctionsItem, - VideoBotsPageRequestFunctionsItemTrigger, - VideoBotsPageRequestLipsyncModel, - VideoBotsPageRequestOpenaiTtsModel, - VideoBotsPageRequestOpenaiVoiceName, - VideoBotsPageRequestResponseFormatType, VideoBotsPageRequestSadtalkerSettings, VideoBotsPageRequestSadtalkerSettingsPreprocess, - VideoBotsPageRequestTranslationModel, - VideoBotsPageRequestTtsProvider, VideoBotsPageStatusResponse, ) from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError from . import copilot from .client import AsyncGooey, Gooey from .copilot import ( - CopilotCompletionRequestAsrModel, - CopilotCompletionRequestCitationStyle, - CopilotCompletionRequestEmbeddingModel, CopilotCompletionRequestFunctionsItem, - CopilotCompletionRequestFunctionsItemTrigger, - CopilotCompletionRequestLipsyncModel, - CopilotCompletionRequestOpenaiTtsModel, - CopilotCompletionRequestOpenaiVoiceName, - CopilotCompletionRequestResponseFormatType, CopilotCompletionRequestSadtalkerSettings, CopilotCompletionRequestSadtalkerSettingsPreprocess, - CopilotCompletionRequestTranslationModel, - CopilotCompletionRequestTtsProvider, ) from .environment import GooeyEnvironment from .version import __version__ @@ -270,15 +188,15 @@ "AggFunctionFunction", "AggFunctionResult", "AggFunctionResultFunction", + "AnimationModels", "AnimationPrompt", "AsrChunk", + "AsrModels", + "AsrOutputFormat", "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", "AsrPageRequest", - "AsrPageRequestOutputFormat", - "AsrPageRequestSelectedModel", - "AsrPageRequestTranslationModel", "AsrPageStatusResponse", "AsyncApiResponseModelV3", "AsyncGooey", @@ -286,32 +204,29 @@ "BotBroadcastFilters", "BotBroadcastRequestModel", "BulkEvalPageOutput", - "BulkEvalPageRequestResponseFormatType", "BulkEvalPageStatusResponse", "BulkRunnerPageOutput", "BulkRunnerPageRequest", "BulkRunnerPageStatusResponse", "ButtonPressed", "CalledFunctionResponse", - "CalledFunctionResponseTrigger", "ChatCompletionContentPartImageParam", "ChatCompletionContentPartTextParam", "ChyronPlantPageOutput", "ChyronPlantPageRequest", "ChyronPlantPageStatusResponse", + "CitationStyles", + "CombineDocumentsChains", "CompareLlmPageOutput", - "CompareLlmPageRequestResponseFormatType", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", - "CompareText2ImgPageRequestScheduler", - "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", "CompareUpscalerPageRequest", - "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageStatusResponse", "ConsoleLogs", "ConsoleLogsLevel", + "ControlNetModels", "ConversationEntry", "ConversationEntryContent", "ConversationEntryContentItem", @@ -319,63 +234,32 @@ "ConversationEntryContentItem_Text", "ConversationEntryRole", "ConversationStart", - "CopilotCompletionRequestAsrModel", - "CopilotCompletionRequestCitationStyle", - "CopilotCompletionRequestEmbeddingModel", "CopilotCompletionRequestFunctionsItem", - "CopilotCompletionRequestFunctionsItemTrigger", - "CopilotCompletionRequestLipsyncModel", - "CopilotCompletionRequestOpenaiTtsModel", - "CopilotCompletionRequestOpenaiVoiceName", - "CopilotCompletionRequestResponseFormatType", "CopilotCompletionRequestSadtalkerSettings", "CopilotCompletionRequestSadtalkerSettingsPreprocess", - "CopilotCompletionRequestTranslationModel", - "CopilotCompletionRequestTtsProvider", "CreateStreamRequest", - "CreateStreamRequestAsrModel", - "CreateStreamRequestCitationStyle", - "CreateStreamRequestEmbeddingModel", - "CreateStreamRequestLipsyncModel", - "CreateStreamRequestOpenaiTtsModel", - "CreateStreamRequestOpenaiVoiceName", - "CreateStreamRequestResponseFormatType", - "CreateStreamRequestTranslationModel", - "CreateStreamRequestTtsProvider", "CreateStreamResponse", "DeforumSdPageOutput", - "DeforumSdPageRequestSelectedModel", "DeforumSdPageStatusResponse", "DocExtractPageOutput", "DocExtractPageRequest", - "DocExtractPageRequestResponseFormatType", - "DocExtractPageRequestSelectedAsrModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", - "DocSearchPageRequestCitationStyle", - "DocSearchPageRequestEmbeddingModel", - "DocSearchPageRequestKeywordQuery", - "DocSearchPageRequestResponseFormatType", "DocSearchPageStatusResponse", "DocSummaryPageOutput", "DocSummaryPageRequest", - "DocSummaryPageRequestResponseFormatType", - "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageStatusResponse", - "DocSummaryRequestResponseFormatType", - "DocSummaryRequestSelectedAsrModel", "EmailFaceInpaintingPageOutput", - "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", + "EmbeddingModels", "EmbeddingsPageOutput", - "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", "FaceInpaintingPageRequest", - "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageStatusResponse", "FinalResponse", + "FunctionTrigger", "FunctionsPageOutput", "FunctionsPageStatusResponse", "GenericErrorResponse", @@ -383,87 +267,56 @@ "Gooey", "GooeyEnvironment", "GoogleGptPageOutput", - "GoogleGptPageRequestEmbeddingModel", - "GoogleGptPageRequestResponseFormatType", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", - "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageStatusResponse", "HttpValidationError", + "ImageSegmentationModels", "ImageSegmentationPageOutput", "ImageSegmentationPageRequest", - "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageStatusResponse", + "ImageToImageModels", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", "Img2ImgPageRequest", - "Img2ImgPageRequestSelectedControlnetModel", - "Img2ImgPageRequestSelectedControlnetModelItem", - "Img2ImgPageRequestSelectedModel", "Img2ImgPageStatusResponse", + "InpaintingModels", + "KeywordQuery", "LargeLanguageModels", "LetterWriterPageOutput", "LetterWriterPageRequest", "LetterWriterPageStatusResponse", + "LipsyncModels", "LipsyncPageOutput", "LipsyncPageRequest", - "LipsyncPageRequestSelectedModel", "LipsyncPageStatusResponse", - "LipsyncRequestSelectedModel", "LipsyncTtsPageOutput", "LipsyncTtsPageRequest", - "LipsyncTtsPageRequestOpenaiTtsModel", - "LipsyncTtsPageRequestOpenaiVoiceName", - "LipsyncTtsPageRequestSelectedModel", - "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", - "LipsyncTtsRequestOpenaiTtsModel", - "LipsyncTtsRequestOpenaiVoiceName", - "LipsyncTtsRequestSelectedModel", - "LipsyncTtsRequestTtsProvider", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", "ObjectInpaintingPageRequest", - "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", + "OpenAiTtsModels", + "OpenAiTtsVoices", "PaymentRequiredError", - "PortraitRequestSelectedModel", - "ProductImageRequestSelectedModel", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", "QrCodeGeneratorPageRequest", - "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", - "QrCodeGeneratorPageRequestScheduler", - "QrCodeGeneratorPageRequestSelectedControlnetModelItem", - "QrCodeGeneratorPageRequestSelectedModel", "QrCodeGeneratorPageStatusResponse", - "QrCodeRequestImagePromptControlnetModelsItem", - "QrCodeRequestScheduler", - "QrCodeRequestSelectedControlnetModelItem", - "QrCodeRequestSelectedModel", "RecipeFunction", - "RecipeFunctionTrigger", "RecipeRunState", "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", - "RelatedQnADocPageRequestCitationStyle", - "RelatedQnADocPageRequestEmbeddingModel", - "RelatedQnADocPageRequestKeywordQuery", - "RelatedQnADocPageRequestResponseFormatType", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", - "RelatedQnAPageRequestEmbeddingModel", - "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageStatusResponse", - "RemixImageRequestSelectedControlnetModel", - "RemixImageRequestSelectedControlnetModelItem", - "RemixImageRequestSelectedModel", - "RemoveBackgroundRequestSelectedModel", "ReplyButton", + "ResponseFormatType", "ResponseModel", "ResponseModelFinalKeywordQuery", "ResponseModelFinalPrompt", @@ -472,40 +325,33 @@ "RunStart", "SadTalkerSettings", "SadTalkerSettingsPreprocess", + "Schedulers", "SearchReference", + "SelectedControlNetModels", "SeoSummaryPageOutput", - "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageStatusResponse", - "SerpSearchLocation", + "SerpSearchLocations", "SerpSearchType", "SmartGptPageOutput", - "SmartGptPageRequestResponseFormatType", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", - "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageStatusResponse", - "SpeechRecognitionRequestOutputFormat", - "SpeechRecognitionRequestSelectedModel", - "SpeechRecognitionRequestTranslationModel", "StreamError", - "SynthesizeDataRequestResponseFormatType", - "SynthesizeDataRequestSelectedAsrModel", + "Text2AudioModels", "Text2AudioPageOutput", "Text2AudioPageStatusResponse", + "TextToImageModels", "TextToSpeechPageOutput", - "TextToSpeechPageRequestOpenaiTtsModel", - "TextToSpeechPageRequestOpenaiVoiceName", - "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", + "TextToSpeechProviders", "TooManyRequestsError", "TrainingDataModel", - "TranslateRequestSelectedModel", + "TranslationModels", "TranslationPageOutput", "TranslationPageRequest", - "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", "UnprocessableEntityError", - "UpscaleRequestSelectedModelsItem", + "UpscalerModels", "ValidationError", "ValidationErrorLocItem", "Vcard", @@ -513,19 +359,9 @@ "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", "VideoBotsPageRequest", - "VideoBotsPageRequestAsrModel", - "VideoBotsPageRequestCitationStyle", - "VideoBotsPageRequestEmbeddingModel", "VideoBotsPageRequestFunctionsItem", - "VideoBotsPageRequestFunctionsItemTrigger", - "VideoBotsPageRequestLipsyncModel", - "VideoBotsPageRequestOpenaiTtsModel", - "VideoBotsPageRequestOpenaiVoiceName", - "VideoBotsPageRequestResponseFormatType", "VideoBotsPageRequestSadtalkerSettings", "VideoBotsPageRequestSadtalkerSettingsPreprocess", - "VideoBotsPageRequestTranslationModel", - "VideoBotsPageRequestTtsProvider", "VideoBotsPageStatusResponse", "__version__", "copilot", diff --git a/src/gooey/client.py b/src/gooey/client.py index 6767f27..9f131b1 100644 --- a/src/gooey/client.py +++ b/src/gooey/client.py @@ -9,7 +9,7 @@ from .copilot.client import CopilotClient from .types.animation_prompt import AnimationPrompt from .types.recipe_function import RecipeFunction -from .types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel +from .types.animation_models import AnimationModels from .types.run_settings import RunSettings from .core.request_options import RequestOptions from .types.deforum_sd_page_output import DeforumSdPageOutput @@ -22,88 +22,61 @@ from json.decoder import JSONDecodeError from . import core from .types.vcard import Vcard -from .types.qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem -from .types.qr_code_request_selected_model import QrCodeRequestSelectedModel -from .types.qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem -from .types.qr_code_request_scheduler import QrCodeRequestScheduler +from .types.control_net_models import ControlNetModels +from .types.text_to_image_models import TextToImageModels +from .types.schedulers import Schedulers from .types.qr_code_generator_page_output import QrCodeGeneratorPageOutput from .types.large_language_models import LargeLanguageModels -from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel -from .types.related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType -from .types.serp_search_location import SerpSearchLocation +from .types.embedding_models import EmbeddingModels +from .types.response_format_type import ResponseFormatType +from .types.serp_search_locations import SerpSearchLocations from .types.serp_search_type import SerpSearchType from .types.related_qn_a_page_output import RelatedQnAPageOutput -from .types.seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType from .types.seo_summary_page_output import SeoSummaryPageOutput -from .types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel -from .types.google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType from .types.google_gpt_page_output import GoogleGptPageOutput -from .types.social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType from .types.social_lookup_email_page_output import SocialLookupEmailPageOutput from .types.bulk_runner_page_output import BulkRunnerPageOutput from .types.eval_prompt import EvalPrompt from .types.agg_function import AggFunction -from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType from .types.bulk_eval_page_output import BulkEvalPageOutput -from .types.synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel -from .types.synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType +from .types.asr_models import AsrModels from .types.doc_extract_page_output import DocExtractPageOutput -from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType from .types.compare_llm_page_output import CompareLlmPageOutput -from .types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery -from .types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel -from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle -from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType +from .types.keyword_query import KeywordQuery +from .types.citation_styles import CitationStyles from .types.doc_search_page_output import DocSearchPageOutput -from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType from .types.smart_gpt_page_output import SmartGptPageOutput -from .types.doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel -from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType +from .types.combine_documents_chains import CombineDocumentsChains from .types.doc_summary_page_output import DocSummaryPageOutput from .types.functions_page_output import FunctionsPageOutput from .types.sad_talker_settings import SadTalkerSettings -from .types.lipsync_request_selected_model import LipsyncRequestSelectedModel +from .types.lipsync_models import LipsyncModels from .types.lipsync_page_output import LipsyncPageOutput -from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider -from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName -from .types.lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel -from .types.lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel +from .types.text_to_speech_providers import TextToSpeechProviders +from .types.open_ai_tts_voices import OpenAiTtsVoices +from .types.open_ai_tts_models import OpenAiTtsModels from .types.lipsync_tts_page_output import LipsyncTtsPageOutput -from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider -from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName -from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel from .types.text_to_speech_page_output import TextToSpeechPageOutput -from .types.speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel -from .types.speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel -from .types.speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat +from .types.translation_models import TranslationModels +from .types.asr_output_format import AsrOutputFormat from .types.asr_page_output import AsrPageOutput +from .types.text2audio_models import Text2AudioModels from .types.text2audio_page_output import Text2AudioPageOutput -from .types.translate_request_selected_model import TranslateRequestSelectedModel from .types.translation_page_output import TranslationPageOutput -from .types.remix_image_request_selected_model import RemixImageRequestSelectedModel -from .types.remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel +from .types.image_to_image_models import ImageToImageModels +from .types.selected_control_net_models import SelectedControlNetModels from .types.img2img_page_output import Img2ImgPageOutput -from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem -from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler from .types.compare_text2img_page_output import CompareText2ImgPageOutput -from .types.product_image_request_selected_model import ProductImageRequestSelectedModel +from .types.inpainting_models import InpaintingModels from .types.object_inpainting_page_output import ObjectInpaintingPageOutput -from .types.portrait_request_selected_model import PortraitRequestSelectedModel from .types.face_inpainting_page_output import FaceInpaintingPageOutput -from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel from .types.email_face_inpainting_page_output import EmailFaceInpaintingPageOutput -from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel from .types.google_image_gen_page_output import GoogleImageGenPageOutput -from .types.remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel +from .types.image_segmentation_models import ImageSegmentationModels from .types.image_segmentation_page_output import ImageSegmentationPageOutput -from .types.upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem +from .types.upscaler_models import UpscalerModels from .types.compare_upscaler_page_output import CompareUpscalerPageOutput -from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel from .types.embeddings_page_output import EmbeddingsPageOutput -from .types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery -from .types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel -from .types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle -from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType from .types.related_qn_a_doc_page_output import RelatedQnADocPageOutput from .types.balance_response import BalanceResponse from .core.client_wrapper import AsyncClientWrapper @@ -183,7 +156,7 @@ def animate( functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + selected_model: typing.Optional[AnimationModels] = OMIT, animation_mode: typing.Optional[str] = OMIT, zoom: typing.Optional[str] = OMIT, translation_x: typing.Optional[str] = OMIT, @@ -210,7 +183,7 @@ def animate( max_frames : typing.Optional[int] - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] + selected_model : typing.Optional[AnimationModels] animation_mode : typing.Optional[str] @@ -340,22 +313,20 @@ def qr_code( use_url_shortener: typing.Optional[bool] = None, negative_prompt: typing.Optional[str] = None, image_prompt: typing.Optional[str] = None, - image_prompt_controlnet_models: typing.Optional[ - typing.List[QrCodeRequestImagePromptControlnetModelsItem] - ] = None, + image_prompt_controlnet_models: typing.Optional[typing.List[ControlNetModels]] = None, image_prompt_strength: typing.Optional[float] = None, image_prompt_scale: typing.Optional[float] = None, image_prompt_pos_x: typing.Optional[float] = None, image_prompt_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, + selected_model: typing.Optional[TextToImageModels] = None, + selected_controlnet_model: typing.Optional[typing.List[ControlNetModels]] = None, output_width: typing.Optional[int] = None, output_height: typing.Optional[int] = None, guidance_scale: typing.Optional[float] = None, controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, - scheduler: typing.Optional[QrCodeRequestScheduler] = None, + scheduler: typing.Optional[Schedulers] = None, seed: typing.Optional[int] = None, obj_scale: typing.Optional[float] = None, obj_pos_x: typing.Optional[float] = None, @@ -391,7 +362,7 @@ def qr_code( image_prompt : typing.Optional[str] - image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] + image_prompt_controlnet_models : typing.Optional[typing.List[ControlNetModels]] image_prompt_strength : typing.Optional[float] @@ -401,9 +372,9 @@ def qr_code( image_prompt_pos_y : typing.Optional[float] - selected_model : typing.Optional[QrCodeRequestSelectedModel] + selected_model : typing.Optional[TextToImageModels] - selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] + selected_controlnet_model : typing.Optional[typing.List[ControlNetModels]] output_width : typing.Optional[int] @@ -417,7 +388,7 @@ def qr_code( quality : typing.Optional[int] - scheduler : typing.Optional[QrCodeRequestScheduler] + scheduler : typing.Optional[Schedulers] seed : typing.Optional[int] @@ -549,15 +520,15 @@ def seo_people_also_ask( max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + embedding_model: typing.Optional[EmbeddingModels] = OMIT, dense_weight: typing.Optional[float] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -592,7 +563,7 @@ def seo_people_also_ask( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -610,9 +581,9 @@ def seo_people_also_ask( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -742,8 +713,8 @@ def seo_content( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -785,9 +756,9 @@ def seo_content( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -912,15 +883,15 @@ def web_search_llm( max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + embedding_model: typing.Optional[EmbeddingModels] = OMIT, dense_weight: typing.Optional[float] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -955,7 +926,7 @@ def web_search_llm( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -973,9 +944,9 @@ def web_search_llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -1100,7 +1071,7 @@ def personalize_email( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> SocialLookupEmailPageOutput: @@ -1130,7 +1101,7 @@ def personalize_email( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -1371,7 +1342,7 @@ def eval( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> BulkEvalPageOutput: @@ -1415,7 +1386,7 @@ def eval( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -1514,7 +1485,7 @@ def synthesize_data( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, sheet_url: typing.Optional[core.File] = None, - selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, + selected_asr_model: typing.Optional[AsrModels] = None, google_translate_target: typing.Optional[str] = None, glossary_document: typing.Optional[core.File] = None, task_instructions: typing.Optional[str] = None, @@ -1524,7 +1495,7 @@ def synthesize_data( quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, + response_format_type: typing.Optional[ResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> DocExtractPageOutput: @@ -1544,7 +1515,7 @@ def synthesize_data( sheet_url : typing.Optional[core.File] See core.File for more documentation - selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] + selected_asr_model : typing.Optional[AsrModels] google_translate_target : typing.Optional[str] @@ -1565,7 +1536,7 @@ def synthesize_data( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -1672,7 +1643,7 @@ def llm( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CompareLlmPageOutput: @@ -1700,7 +1671,7 @@ def llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -1794,24 +1765,24 @@ def rag( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + keyword_query: typing.Optional[KeywordQuery] = OMIT, documents: typing.Optional[typing.Sequence[str]] = OMIT, max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + embedding_model: typing.Optional[EmbeddingModels] = OMIT, dense_weight: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, selected_model: typing.Optional[LargeLanguageModels] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + citation_style: typing.Optional[CitationStyles] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DocSearchPageOutput: @@ -1827,7 +1798,7 @@ def rag( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] + keyword_query : typing.Optional[KeywordQuery] documents : typing.Optional[typing.Sequence[str]] @@ -1839,7 +1810,7 @@ def rag( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -1853,7 +1824,7 @@ def rag( selected_model : typing.Optional[LargeLanguageModels] - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] + citation_style : typing.Optional[CitationStyles] avoid_repetition : typing.Optional[bool] @@ -1865,7 +1836,7 @@ def rag( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -1981,7 +1952,7 @@ def smart_gpt( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> SmartGptPageOutput: @@ -2015,7 +1986,7 @@ def smart_gpt( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -2117,15 +2088,15 @@ def doc_summary( task_instructions: typing.Optional[str] = None, merge_instructions: typing.Optional[str] = None, selected_model: typing.Optional[LargeLanguageModels] = None, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, - selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, + chain_type: typing.Optional[CombineDocumentsChains] = None, + selected_asr_model: typing.Optional[AsrModels] = None, google_translate_target: typing.Optional[str] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, + response_format_type: typing.Optional[ResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> DocSummaryPageOutput: @@ -2148,9 +2119,9 @@ def doc_summary( selected_model : typing.Optional[LargeLanguageModels] - chain_type : typing.Optional[typing.Literal["map_reduce"]] + chain_type : typing.Optional[CombineDocumentsChains] - selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] + selected_asr_model : typing.Optional[AsrModels] google_translate_target : typing.Optional[str] @@ -2164,7 +2135,7 @@ def doc_summary( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -2367,7 +2338,7 @@ def lipsync( face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncRequestSelectedModel] = None, + selected_model: typing.Optional[LipsyncModels] = None, input_audio: typing.Optional[core.File] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, @@ -2395,7 +2366,7 @@ def lipsync( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[LipsyncRequestSelectedModel] + selected_model : typing.Optional[LipsyncModels] input_audio : typing.Optional[core.File] See core.File for more documentation @@ -2494,7 +2465,7 @@ def lipsync_tts( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, + tts_provider: typing.Optional[TextToSpeechProviders] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -2510,15 +2481,15 @@ def lipsync_tts( elevenlabs_style: typing.Optional[float] = None, elevenlabs_speaker_boost: typing.Optional[bool] = None, azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None, + openai_voice_name: typing.Optional[OpenAiTtsVoices] = None, + openai_tts_model: typing.Optional[OpenAiTtsModels] = None, input_face: typing.Optional[core.File] = None, face_padding_top: typing.Optional[int] = None, face_padding_bottom: typing.Optional[int] = None, face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, + selected_model: typing.Optional[LipsyncModels] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> LipsyncTtsPageOutput: @@ -2534,7 +2505,7 @@ def lipsync_tts( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] + tts_provider : typing.Optional[TextToSpeechProviders] uberduck_voice_name : typing.Optional[str] @@ -2567,9 +2538,9 @@ def lipsync_tts( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[OpenAiTtsVoices] - openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[OpenAiTtsModels] input_face : typing.Optional[core.File] See core.File for more documentation @@ -2584,7 +2555,7 @@ def lipsync_tts( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] + selected_model : typing.Optional[LipsyncModels] settings : typing.Optional[RunSettings] @@ -2700,7 +2671,7 @@ def text_to_speech( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + tts_provider: typing.Optional[TextToSpeechProviders] = OMIT, uberduck_voice_name: typing.Optional[str] = OMIT, uberduck_speaking_rate: typing.Optional[float] = OMIT, google_voice_name: typing.Optional[str] = OMIT, @@ -2716,8 +2687,8 @@ def text_to_speech( elevenlabs_style: typing.Optional[float] = OMIT, elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, + openai_voice_name: typing.Optional[OpenAiTtsVoices] = OMIT, + openai_tts_model: typing.Optional[OpenAiTtsModels] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> TextToSpeechPageOutput: @@ -2733,7 +2704,7 @@ def text_to_speech( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] + tts_provider : typing.Optional[TextToSpeechProviders] uberduck_voice_name : typing.Optional[str] @@ -2766,9 +2737,9 @@ def text_to_speech( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[OpenAiTtsVoices] - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[OpenAiTtsModels] settings : typing.Optional[RunSettings] @@ -2875,10 +2846,10 @@ def speech_recognition( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, + selected_model: typing.Optional[AsrModels] = None, language: typing.Optional[str] = None, - translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, - output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, + translation_model: typing.Optional[TranslationModels] = None, + output_format: typing.Optional[AsrOutputFormat] = None, google_translate_target: typing.Optional[str] = None, translation_source: typing.Optional[str] = None, translation_target: typing.Optional[str] = None, @@ -2899,13 +2870,13 @@ def speech_recognition( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] + selected_model : typing.Optional[AsrModels] language : typing.Optional[str] - translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] + translation_model : typing.Optional[TranslationModels] - output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] + output_format : typing.Optional[AsrOutputFormat] google_translate_target : typing.Optional[str] use `translation_model` & `translation_target` instead. @@ -3019,7 +2990,7 @@ def text_to_music( guidance_scale: typing.Optional[float] = OMIT, seed: typing.Optional[int] = OMIT, sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, + selected_models: typing.Optional[typing.Sequence[Text2AudioModels]] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> Text2AudioPageOutput: @@ -3049,7 +3020,7 @@ def text_to_music( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] + selected_models : typing.Optional[typing.Sequence[Text2AudioModels]] settings : typing.Optional[RunSettings] @@ -3146,7 +3117,7 @@ def translate( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, texts: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[TranslateRequestSelectedModel] = None, + selected_model: typing.Optional[TranslationModels] = None, translation_source: typing.Optional[str] = None, translation_target: typing.Optional[str] = None, glossary_document: typing.Optional[core.File] = None, @@ -3165,7 +3136,7 @@ def translate( texts : typing.Optional[typing.List[str]] - selected_model : typing.Optional[TranslateRequestSelectedModel] + selected_model : typing.Optional[TranslationModels] translation_source : typing.Optional[str] @@ -3266,8 +3237,8 @@ def remix_image( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, text_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, + selected_model: typing.Optional[ImageToImageModels] = None, + selected_controlnet_model: typing.Optional[SelectedControlNetModels] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -3296,9 +3267,9 @@ def remix_image( text_prompt : typing.Optional[str] - selected_model : typing.Optional[RemixImageRequestSelectedModel] + selected_model : typing.Optional[ImageToImageModels] - selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] + selected_controlnet_model : typing.Optional[SelectedControlNetModels] negative_prompt : typing.Optional[str] @@ -3430,8 +3401,8 @@ def text_to_image( guidance_scale: typing.Optional[float] = OMIT, seed: typing.Optional[int] = OMIT, sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + selected_models: typing.Optional[typing.Sequence[TextToImageModels]] = OMIT, + scheduler: typing.Optional[Schedulers] = OMIT, edit_instruction: typing.Optional[str] = OMIT, image_guidance_scale: typing.Optional[float] = OMIT, settings: typing.Optional[RunSettings] = OMIT, @@ -3469,9 +3440,9 @@ def text_to_image( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] + selected_models : typing.Optional[typing.Sequence[TextToImageModels]] - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] + scheduler : typing.Optional[Schedulers] edit_instruction : typing.Optional[str] @@ -3583,7 +3554,7 @@ def product_image( obj_pos_x: typing.Optional[float] = None, obj_pos_y: typing.Optional[float] = None, mask_threshold: typing.Optional[float] = None, - selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, + selected_model: typing.Optional[InpaintingModels] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -3618,7 +3589,7 @@ def product_image( mask_threshold : typing.Optional[float] - selected_model : typing.Optional[ProductImageRequestSelectedModel] + selected_model : typing.Optional[InpaintingModels] negative_prompt : typing.Optional[str] @@ -3743,7 +3714,7 @@ def portrait( face_scale: typing.Optional[float] = None, face_pos_x: typing.Optional[float] = None, face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PortraitRequestSelectedModel] = None, + selected_model: typing.Optional[InpaintingModels] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -3776,7 +3747,7 @@ def portrait( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[PortraitRequestSelectedModel] + selected_model : typing.Optional[InpaintingModels] negative_prompt : typing.Optional[str] @@ -3901,7 +3872,7 @@ def image_from_email( face_scale: typing.Optional[float] = OMIT, face_pos_x: typing.Optional[float] = OMIT, face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + selected_model: typing.Optional[InpaintingModels] = OMIT, negative_prompt: typing.Optional[str] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[int] = OMIT, @@ -3943,7 +3914,7 @@ def image_from_email( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] + selected_model : typing.Optional[InpaintingModels] negative_prompt : typing.Optional[str] @@ -4088,9 +4059,9 @@ def image_from_web_search( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + selected_model: typing.Optional[ImageToImageModels] = OMIT, negative_prompt: typing.Optional[str] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[int] = OMIT, @@ -4116,12 +4087,12 @@ def image_from_web_search( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] + selected_model : typing.Optional[ImageToImageModels] negative_prompt : typing.Optional[str] @@ -4239,7 +4210,7 @@ def remove_background( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, + selected_model: typing.Optional[ImageSegmentationModels] = None, mask_threshold: typing.Optional[float] = None, rect_persepective_transform: typing.Optional[bool] = None, reflection_opacity: typing.Optional[float] = None, @@ -4262,7 +4233,7 @@ def remove_background( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] + selected_model : typing.Optional[ImageSegmentationModels] mask_threshold : typing.Optional[float] @@ -4372,7 +4343,7 @@ def upscale( variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, input_image: typing.Optional[core.File] = None, input_video: typing.Optional[core.File] = None, - selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, + selected_models: typing.Optional[typing.List[UpscalerModels]] = None, selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, @@ -4396,7 +4367,7 @@ def upscale( input_video : typing.Optional[core.File] See core.File for more documentation - selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] + selected_models : typing.Optional[typing.List[UpscalerModels]] selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] @@ -4493,7 +4464,7 @@ def embed( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + selected_model: typing.Optional[EmbeddingModels] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> EmbeddingsPageOutput: @@ -4509,7 +4480,7 @@ def embed( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] + selected_model : typing.Optional[EmbeddingModels] settings : typing.Optional[RunSettings] @@ -4599,25 +4570,25 @@ def seo_people_also_ask_doc( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + keyword_query: typing.Optional[KeywordQuery] = OMIT, documents: typing.Optional[typing.Sequence[str]] = OMIT, max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + embedding_model: typing.Optional[EmbeddingModels] = OMIT, dense_weight: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, selected_model: typing.Optional[LargeLanguageModels] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + citation_style: typing.Optional[CitationStyles] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -4636,7 +4607,7 @@ def seo_people_also_ask_doc( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] + keyword_query : typing.Optional[KeywordQuery] documents : typing.Optional[typing.Sequence[str]] @@ -4648,7 +4619,7 @@ def seo_people_also_ask_doc( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -4662,7 +4633,7 @@ def seo_people_also_ask_doc( selected_model : typing.Optional[LargeLanguageModels] - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] + citation_style : typing.Optional[CitationStyles] avoid_repetition : typing.Optional[bool] @@ -4674,9 +4645,9 @@ def seo_people_also_ask_doc( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -4899,7 +4870,7 @@ async def animate( functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, + selected_model: typing.Optional[AnimationModels] = OMIT, animation_mode: typing.Optional[str] = OMIT, zoom: typing.Optional[str] = OMIT, translation_x: typing.Optional[str] = OMIT, @@ -4926,7 +4897,7 @@ async def animate( max_frames : typing.Optional[int] - selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] + selected_model : typing.Optional[AnimationModels] animation_mode : typing.Optional[str] @@ -5064,22 +5035,20 @@ async def qr_code( use_url_shortener: typing.Optional[bool] = None, negative_prompt: typing.Optional[str] = None, image_prompt: typing.Optional[str] = None, - image_prompt_controlnet_models: typing.Optional[ - typing.List[QrCodeRequestImagePromptControlnetModelsItem] - ] = None, + image_prompt_controlnet_models: typing.Optional[typing.List[ControlNetModels]] = None, image_prompt_strength: typing.Optional[float] = None, image_prompt_scale: typing.Optional[float] = None, image_prompt_pos_x: typing.Optional[float] = None, image_prompt_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, + selected_model: typing.Optional[TextToImageModels] = None, + selected_controlnet_model: typing.Optional[typing.List[ControlNetModels]] = None, output_width: typing.Optional[int] = None, output_height: typing.Optional[int] = None, guidance_scale: typing.Optional[float] = None, controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, - scheduler: typing.Optional[QrCodeRequestScheduler] = None, + scheduler: typing.Optional[Schedulers] = None, seed: typing.Optional[int] = None, obj_scale: typing.Optional[float] = None, obj_pos_x: typing.Optional[float] = None, @@ -5115,7 +5084,7 @@ async def qr_code( image_prompt : typing.Optional[str] - image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] + image_prompt_controlnet_models : typing.Optional[typing.List[ControlNetModels]] image_prompt_strength : typing.Optional[float] @@ -5125,9 +5094,9 @@ async def qr_code( image_prompt_pos_y : typing.Optional[float] - selected_model : typing.Optional[QrCodeRequestSelectedModel] + selected_model : typing.Optional[TextToImageModels] - selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] + selected_controlnet_model : typing.Optional[typing.List[ControlNetModels]] output_width : typing.Optional[int] @@ -5141,7 +5110,7 @@ async def qr_code( quality : typing.Optional[int] - scheduler : typing.Optional[QrCodeRequestScheduler] + scheduler : typing.Optional[Schedulers] seed : typing.Optional[int] @@ -5281,15 +5250,15 @@ async def seo_people_also_ask( max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, + embedding_model: typing.Optional[EmbeddingModels] = OMIT, dense_weight: typing.Optional[float] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -5324,7 +5293,7 @@ async def seo_people_also_ask( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -5342,9 +5311,9 @@ async def seo_people_also_ask( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -5482,8 +5451,8 @@ async def seo_content( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -5525,9 +5494,9 @@ async def seo_content( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -5660,15 +5629,15 @@ async def web_search_llm( max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, + embedding_model: typing.Optional[EmbeddingModels] = OMIT, dense_weight: typing.Optional[float] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -5703,7 +5672,7 @@ async def web_search_llm( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -5721,9 +5690,9 @@ async def web_search_llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -5856,7 +5825,7 @@ async def personalize_email( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> SocialLookupEmailPageOutput: @@ -5886,7 +5855,7 @@ async def personalize_email( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -6143,7 +6112,7 @@ async def eval( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> BulkEvalPageOutput: @@ -6187,7 +6156,7 @@ async def eval( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -6294,7 +6263,7 @@ async def synthesize_data( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, sheet_url: typing.Optional[core.File] = None, - selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, + selected_asr_model: typing.Optional[AsrModels] = None, google_translate_target: typing.Optional[str] = None, glossary_document: typing.Optional[core.File] = None, task_instructions: typing.Optional[str] = None, @@ -6304,7 +6273,7 @@ async def synthesize_data( quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, + response_format_type: typing.Optional[ResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> DocExtractPageOutput: @@ -6324,7 +6293,7 @@ async def synthesize_data( sheet_url : typing.Optional[core.File] See core.File for more documentation - selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] + selected_asr_model : typing.Optional[AsrModels] google_translate_target : typing.Optional[str] @@ -6345,7 +6314,7 @@ async def synthesize_data( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -6460,7 +6429,7 @@ async def llm( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CompareLlmPageOutput: @@ -6488,7 +6457,7 @@ async def llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -6590,24 +6559,24 @@ async def rag( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[DocSearchPageRequestKeywordQuery] = OMIT, + keyword_query: typing.Optional[KeywordQuery] = OMIT, documents: typing.Optional[typing.Sequence[str]] = OMIT, max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, + embedding_model: typing.Optional[EmbeddingModels] = OMIT, dense_weight: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, selected_model: typing.Optional[LargeLanguageModels] = OMIT, - citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, + citation_style: typing.Optional[CitationStyles] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DocSearchPageOutput: @@ -6623,7 +6592,7 @@ async def rag( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - keyword_query : typing.Optional[DocSearchPageRequestKeywordQuery] + keyword_query : typing.Optional[KeywordQuery] documents : typing.Optional[typing.Sequence[str]] @@ -6635,7 +6604,7 @@ async def rag( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -6649,7 +6618,7 @@ async def rag( selected_model : typing.Optional[LargeLanguageModels] - citation_style : typing.Optional[DocSearchPageRequestCitationStyle] + citation_style : typing.Optional[CitationStyles] avoid_repetition : typing.Optional[bool] @@ -6661,7 +6630,7 @@ async def rag( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -6785,7 +6754,7 @@ async def smart_gpt( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> SmartGptPageOutput: @@ -6819,7 +6788,7 @@ async def smart_gpt( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -6929,15 +6898,15 @@ async def doc_summary( task_instructions: typing.Optional[str] = None, merge_instructions: typing.Optional[str] = None, selected_model: typing.Optional[LargeLanguageModels] = None, - chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, - selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, + chain_type: typing.Optional[CombineDocumentsChains] = None, + selected_asr_model: typing.Optional[AsrModels] = None, google_translate_target: typing.Optional[str] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, + response_format_type: typing.Optional[ResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> DocSummaryPageOutput: @@ -6960,9 +6929,9 @@ async def doc_summary( selected_model : typing.Optional[LargeLanguageModels] - chain_type : typing.Optional[typing.Literal["map_reduce"]] + chain_type : typing.Optional[CombineDocumentsChains] - selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] + selected_asr_model : typing.Optional[AsrModels] google_translate_target : typing.Optional[str] @@ -6976,7 +6945,7 @@ async def doc_summary( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] settings : typing.Optional[RunSettings] @@ -7195,7 +7164,7 @@ async def lipsync( face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncRequestSelectedModel] = None, + selected_model: typing.Optional[LipsyncModels] = None, input_audio: typing.Optional[core.File] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, @@ -7223,7 +7192,7 @@ async def lipsync( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[LipsyncRequestSelectedModel] + selected_model : typing.Optional[LipsyncModels] input_audio : typing.Optional[core.File] See core.File for more documentation @@ -7330,7 +7299,7 @@ async def lipsync_tts( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, + tts_provider: typing.Optional[TextToSpeechProviders] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -7346,15 +7315,15 @@ async def lipsync_tts( elevenlabs_style: typing.Optional[float] = None, elevenlabs_speaker_boost: typing.Optional[bool] = None, azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[LipsyncTtsRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[LipsyncTtsRequestOpenaiTtsModel] = None, + openai_voice_name: typing.Optional[OpenAiTtsVoices] = None, + openai_tts_model: typing.Optional[OpenAiTtsModels] = None, input_face: typing.Optional[core.File] = None, face_padding_top: typing.Optional[int] = None, face_padding_bottom: typing.Optional[int] = None, face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, + selected_model: typing.Optional[LipsyncModels] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> LipsyncTtsPageOutput: @@ -7370,7 +7339,7 @@ async def lipsync_tts( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] + tts_provider : typing.Optional[TextToSpeechProviders] uberduck_voice_name : typing.Optional[str] @@ -7403,9 +7372,9 @@ async def lipsync_tts( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[LipsyncTtsRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[OpenAiTtsVoices] - openai_tts_model : typing.Optional[LipsyncTtsRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[OpenAiTtsModels] input_face : typing.Optional[core.File] See core.File for more documentation @@ -7420,7 +7389,7 @@ async def lipsync_tts( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] + selected_model : typing.Optional[LipsyncModels] settings : typing.Optional[RunSettings] @@ -7544,7 +7513,7 @@ async def text_to_speech( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, + tts_provider: typing.Optional[TextToSpeechProviders] = OMIT, uberduck_voice_name: typing.Optional[str] = OMIT, uberduck_speaking_rate: typing.Optional[float] = OMIT, google_voice_name: typing.Optional[str] = OMIT, @@ -7560,8 +7529,8 @@ async def text_to_speech( elevenlabs_style: typing.Optional[float] = OMIT, elevenlabs_speaker_boost: typing.Optional[bool] = OMIT, azure_voice_name: typing.Optional[str] = OMIT, - openai_voice_name: typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] = OMIT, - openai_tts_model: typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] = OMIT, + openai_voice_name: typing.Optional[OpenAiTtsVoices] = OMIT, + openai_tts_model: typing.Optional[OpenAiTtsModels] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> TextToSpeechPageOutput: @@ -7577,7 +7546,7 @@ async def text_to_speech( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] + tts_provider : typing.Optional[TextToSpeechProviders] uberduck_voice_name : typing.Optional[str] @@ -7610,9 +7579,9 @@ async def text_to_speech( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[TextToSpeechPageRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[OpenAiTtsVoices] - openai_tts_model : typing.Optional[TextToSpeechPageRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[OpenAiTtsModels] settings : typing.Optional[RunSettings] @@ -7727,10 +7696,10 @@ async def speech_recognition( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, + selected_model: typing.Optional[AsrModels] = None, language: typing.Optional[str] = None, - translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, - output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, + translation_model: typing.Optional[TranslationModels] = None, + output_format: typing.Optional[AsrOutputFormat] = None, google_translate_target: typing.Optional[str] = None, translation_source: typing.Optional[str] = None, translation_target: typing.Optional[str] = None, @@ -7751,13 +7720,13 @@ async def speech_recognition( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] + selected_model : typing.Optional[AsrModels] language : typing.Optional[str] - translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] + translation_model : typing.Optional[TranslationModels] - output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] + output_format : typing.Optional[AsrOutputFormat] google_translate_target : typing.Optional[str] use `translation_model` & `translation_target` instead. @@ -7879,7 +7848,7 @@ async def text_to_music( guidance_scale: typing.Optional[float] = OMIT, seed: typing.Optional[int] = OMIT, sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, + selected_models: typing.Optional[typing.Sequence[Text2AudioModels]] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> Text2AudioPageOutput: @@ -7909,7 +7878,7 @@ async def text_to_music( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] + selected_models : typing.Optional[typing.Sequence[Text2AudioModels]] settings : typing.Optional[RunSettings] @@ -8014,7 +7983,7 @@ async def translate( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, texts: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[TranslateRequestSelectedModel] = None, + selected_model: typing.Optional[TranslationModels] = None, translation_source: typing.Optional[str] = None, translation_target: typing.Optional[str] = None, glossary_document: typing.Optional[core.File] = None, @@ -8033,7 +8002,7 @@ async def translate( texts : typing.Optional[typing.List[str]] - selected_model : typing.Optional[TranslateRequestSelectedModel] + selected_model : typing.Optional[TranslationModels] translation_source : typing.Optional[str] @@ -8142,8 +8111,8 @@ async def remix_image( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, text_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, - selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, + selected_model: typing.Optional[ImageToImageModels] = None, + selected_controlnet_model: typing.Optional[SelectedControlNetModels] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -8172,9 +8141,9 @@ async def remix_image( text_prompt : typing.Optional[str] - selected_model : typing.Optional[RemixImageRequestSelectedModel] + selected_model : typing.Optional[ImageToImageModels] - selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] + selected_controlnet_model : typing.Optional[SelectedControlNetModels] negative_prompt : typing.Optional[str] @@ -8314,8 +8283,8 @@ async def text_to_image( guidance_scale: typing.Optional[float] = OMIT, seed: typing.Optional[int] = OMIT, sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, - scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, + selected_models: typing.Optional[typing.Sequence[TextToImageModels]] = OMIT, + scheduler: typing.Optional[Schedulers] = OMIT, edit_instruction: typing.Optional[str] = OMIT, image_guidance_scale: typing.Optional[float] = OMIT, settings: typing.Optional[RunSettings] = OMIT, @@ -8353,9 +8322,9 @@ async def text_to_image( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] + selected_models : typing.Optional[typing.Sequence[TextToImageModels]] - scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] + scheduler : typing.Optional[Schedulers] edit_instruction : typing.Optional[str] @@ -8475,7 +8444,7 @@ async def product_image( obj_pos_x: typing.Optional[float] = None, obj_pos_y: typing.Optional[float] = None, mask_threshold: typing.Optional[float] = None, - selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, + selected_model: typing.Optional[InpaintingModels] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -8510,7 +8479,7 @@ async def product_image( mask_threshold : typing.Optional[float] - selected_model : typing.Optional[ProductImageRequestSelectedModel] + selected_model : typing.Optional[InpaintingModels] negative_prompt : typing.Optional[str] @@ -8643,7 +8612,7 @@ async def portrait( face_scale: typing.Optional[float] = None, face_pos_x: typing.Optional[float] = None, face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[PortraitRequestSelectedModel] = None, + selected_model: typing.Optional[InpaintingModels] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -8676,7 +8645,7 @@ async def portrait( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[PortraitRequestSelectedModel] + selected_model : typing.Optional[InpaintingModels] negative_prompt : typing.Optional[str] @@ -8809,7 +8778,7 @@ async def image_from_email( face_scale: typing.Optional[float] = OMIT, face_pos_x: typing.Optional[float] = OMIT, face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, + selected_model: typing.Optional[InpaintingModels] = OMIT, negative_prompt: typing.Optional[str] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[int] = OMIT, @@ -8851,7 +8820,7 @@ async def image_from_email( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] + selected_model : typing.Optional[InpaintingModels] negative_prompt : typing.Optional[str] @@ -9004,9 +8973,9 @@ async def image_from_web_search( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, + selected_model: typing.Optional[ImageToImageModels] = OMIT, negative_prompt: typing.Optional[str] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[int] = OMIT, @@ -9032,12 +9001,12 @@ async def image_from_web_search( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead - selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] + selected_model : typing.Optional[ImageToImageModels] negative_prompt : typing.Optional[str] @@ -9163,7 +9132,7 @@ async def remove_background( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, + selected_model: typing.Optional[ImageSegmentationModels] = None, mask_threshold: typing.Optional[float] = None, rect_persepective_transform: typing.Optional[bool] = None, reflection_opacity: typing.Optional[float] = None, @@ -9186,7 +9155,7 @@ async def remove_background( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] + selected_model : typing.Optional[ImageSegmentationModels] mask_threshold : typing.Optional[float] @@ -9304,7 +9273,7 @@ async def upscale( variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, input_image: typing.Optional[core.File] = None, input_video: typing.Optional[core.File] = None, - selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, + selected_models: typing.Optional[typing.List[UpscalerModels]] = None, selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, @@ -9328,7 +9297,7 @@ async def upscale( input_video : typing.Optional[core.File] See core.File for more documentation - selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] + selected_models : typing.Optional[typing.List[UpscalerModels]] selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] @@ -9433,7 +9402,7 @@ async def embed( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, + selected_model: typing.Optional[EmbeddingModels] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> EmbeddingsPageOutput: @@ -9449,7 +9418,7 @@ async def embed( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] + selected_model : typing.Optional[EmbeddingModels] settings : typing.Optional[RunSettings] @@ -9547,25 +9516,25 @@ async def seo_people_also_ask_doc( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - keyword_query: typing.Optional[RelatedQnADocPageRequestKeywordQuery] = OMIT, + keyword_query: typing.Optional[KeywordQuery] = OMIT, documents: typing.Optional[typing.Sequence[str]] = OMIT, max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, + embedding_model: typing.Optional[EmbeddingModels] = OMIT, dense_weight: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, selected_model: typing.Optional[LargeLanguageModels] = OMIT, - citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, + citation_style: typing.Optional[CitationStyles] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, + response_format_type: typing.Optional[ResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -9584,7 +9553,7 @@ async def seo_people_also_ask_doc( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - keyword_query : typing.Optional[RelatedQnADocPageRequestKeywordQuery] + keyword_query : typing.Optional[KeywordQuery] documents : typing.Optional[typing.Sequence[str]] @@ -9596,7 +9565,7 @@ async def seo_people_also_ask_doc( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -9610,7 +9579,7 @@ async def seo_people_also_ask_doc( selected_model : typing.Optional[LargeLanguageModels] - citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] + citation_style : typing.Optional[CitationStyles] avoid_repetition : typing.Optional[bool] @@ -9622,9 +9591,9 @@ async def seo_people_also_ask_doc( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocation] + serp_search_location : typing.Optional[SerpSearchLocations] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead diff --git a/src/gooey/copilot/__init__.py b/src/gooey/copilot/__init__.py index 3234b31..63aedba 100644 --- a/src/gooey/copilot/__init__.py +++ b/src/gooey/copilot/__init__.py @@ -1,33 +1,13 @@ # This file was auto-generated by Fern from our API Definition. from .types import ( - CopilotCompletionRequestAsrModel, - CopilotCompletionRequestCitationStyle, - CopilotCompletionRequestEmbeddingModel, CopilotCompletionRequestFunctionsItem, - CopilotCompletionRequestFunctionsItemTrigger, - CopilotCompletionRequestLipsyncModel, - CopilotCompletionRequestOpenaiTtsModel, - CopilotCompletionRequestOpenaiVoiceName, - CopilotCompletionRequestResponseFormatType, CopilotCompletionRequestSadtalkerSettings, CopilotCompletionRequestSadtalkerSettingsPreprocess, - CopilotCompletionRequestTranslationModel, - CopilotCompletionRequestTtsProvider, ) __all__ = [ - "CopilotCompletionRequestAsrModel", - "CopilotCompletionRequestCitationStyle", - "CopilotCompletionRequestEmbeddingModel", "CopilotCompletionRequestFunctionsItem", - "CopilotCompletionRequestFunctionsItemTrigger", - "CopilotCompletionRequestLipsyncModel", - "CopilotCompletionRequestOpenaiTtsModel", - "CopilotCompletionRequestOpenaiVoiceName", - "CopilotCompletionRequestResponseFormatType", "CopilotCompletionRequestSadtalkerSettings", "CopilotCompletionRequestSadtalkerSettingsPreprocess", - "CopilotCompletionRequestTranslationModel", - "CopilotCompletionRequestTtsProvider", ] diff --git a/src/gooey/copilot/client.py b/src/gooey/copilot/client.py index 9dcc465..92e30b9 100644 --- a/src/gooey/copilot/client.py +++ b/src/gooey/copilot/client.py @@ -6,16 +6,16 @@ from .. import core from ..types.conversation_entry import ConversationEntry from ..types.large_language_models import LargeLanguageModels -from .types.copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel -from .types.copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle -from .types.copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel -from .types.copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel -from .types.copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel +from ..types.embedding_models import EmbeddingModels +from ..types.citation_styles import CitationStyles +from ..types.asr_models import AsrModels +from ..types.translation_models import TranslationModels +from ..types.lipsync_models import LipsyncModels from ..types.llm_tools import LlmTools -from .types.copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType -from .types.copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider -from .types.copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName -from .types.copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel +from ..types.response_format_type import ResponseFormatType +from ..types.text_to_speech_providers import TextToSpeechProviders +from ..types.open_ai_tts_voices import OpenAiTtsVoices +from ..types.open_ai_tts_models import OpenAiTtsModels from .types.copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings from ..types.run_settings import RunSettings from ..core.request_options import RequestOptions @@ -60,25 +60,25 @@ def completion( max_references: typing.Optional[int] = None, max_context_words: typing.Optional[int] = None, scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None, + embedding_model: typing.Optional[EmbeddingModels] = None, dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None, + citation_style: typing.Optional[CitationStyles] = None, use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None, + asr_model: typing.Optional[AsrModels] = None, asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None, + translation_model: typing.Optional[TranslationModels] = None, user_language: typing.Optional[str] = None, input_glossary_document: typing.Optional[core.File] = None, output_glossary_document: typing.Optional[core.File] = None, - lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None, + lipsync_model: typing.Optional[LipsyncModels] = None, tools: typing.Optional[typing.List[LlmTools]] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None, - tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None, + response_format_type: typing.Optional[ResponseFormatType] = None, + tts_provider: typing.Optional[TextToSpeechProviders] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -94,8 +94,8 @@ def completion( elevenlabs_style: typing.Optional[float] = None, elevenlabs_speaker_boost: typing.Optional[bool] = None, azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[CopilotCompletionRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[CopilotCompletionRequestOpenaiTtsModel] = None, + openai_voice_name: typing.Optional[OpenAiTtsVoices] = None, + openai_tts_model: typing.Optional[OpenAiTtsModels] = None, input_face: typing.Optional[core.File] = None, face_padding_top: typing.Optional[int] = None, face_padding_bottom: typing.Optional[int] = None, @@ -152,7 +152,7 @@ def completion( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -160,17 +160,17 @@ def completion( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[CopilotCompletionRequestCitationStyle] + citation_style : typing.Optional[CitationStyles] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[CopilotCompletionRequestAsrModel] + asr_model : typing.Optional[AsrModels] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[CopilotCompletionRequestTranslationModel] + translation_model : typing.Optional[TranslationModels] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -181,7 +181,7 @@ def completion( output_glossary_document : typing.Optional[core.File] See core.File for more documentation - lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel] + lipsync_model : typing.Optional[LipsyncModels] tools : typing.Optional[typing.List[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -196,9 +196,9 @@ def completion( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider] + tts_provider : typing.Optional[TextToSpeechProviders] uberduck_voice_name : typing.Optional[str] @@ -231,9 +231,9 @@ def completion( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[CopilotCompletionRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[OpenAiTtsVoices] - openai_tts_model : typing.Optional[CopilotCompletionRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[OpenAiTtsModels] input_face : typing.Optional[core.File] See core.File for more documentation @@ -412,25 +412,25 @@ async def completion( max_references: typing.Optional[int] = None, max_context_words: typing.Optional[int] = None, scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None, + embedding_model: typing.Optional[EmbeddingModels] = None, dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None, + citation_style: typing.Optional[CitationStyles] = None, use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None, + asr_model: typing.Optional[AsrModels] = None, asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None, + translation_model: typing.Optional[TranslationModels] = None, user_language: typing.Optional[str] = None, input_glossary_document: typing.Optional[core.File] = None, output_glossary_document: typing.Optional[core.File] = None, - lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None, + lipsync_model: typing.Optional[LipsyncModels] = None, tools: typing.Optional[typing.List[LlmTools]] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None, - tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None, + response_format_type: typing.Optional[ResponseFormatType] = None, + tts_provider: typing.Optional[TextToSpeechProviders] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -446,8 +446,8 @@ async def completion( elevenlabs_style: typing.Optional[float] = None, elevenlabs_speaker_boost: typing.Optional[bool] = None, azure_voice_name: typing.Optional[str] = None, - openai_voice_name: typing.Optional[CopilotCompletionRequestOpenaiVoiceName] = None, - openai_tts_model: typing.Optional[CopilotCompletionRequestOpenaiTtsModel] = None, + openai_voice_name: typing.Optional[OpenAiTtsVoices] = None, + openai_tts_model: typing.Optional[OpenAiTtsModels] = None, input_face: typing.Optional[core.File] = None, face_padding_top: typing.Optional[int] = None, face_padding_bottom: typing.Optional[int] = None, @@ -504,7 +504,7 @@ async def completion( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel] + embedding_model : typing.Optional[EmbeddingModels] dense_weight : typing.Optional[float] @@ -512,17 +512,17 @@ async def completion( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[CopilotCompletionRequestCitationStyle] + citation_style : typing.Optional[CitationStyles] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[CopilotCompletionRequestAsrModel] + asr_model : typing.Optional[AsrModels] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[CopilotCompletionRequestTranslationModel] + translation_model : typing.Optional[TranslationModels] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -533,7 +533,7 @@ async def completion( output_glossary_document : typing.Optional[core.File] See core.File for more documentation - lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel] + lipsync_model : typing.Optional[LipsyncModels] tools : typing.Optional[typing.List[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -548,9 +548,9 @@ async def completion( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType] + response_format_type : typing.Optional[ResponseFormatType] - tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider] + tts_provider : typing.Optional[TextToSpeechProviders] uberduck_voice_name : typing.Optional[str] @@ -583,9 +583,9 @@ async def completion( azure_voice_name : typing.Optional[str] - openai_voice_name : typing.Optional[CopilotCompletionRequestOpenaiVoiceName] + openai_voice_name : typing.Optional[OpenAiTtsVoices] - openai_tts_model : typing.Optional[CopilotCompletionRequestOpenaiTtsModel] + openai_tts_model : typing.Optional[OpenAiTtsModels] input_face : typing.Optional[core.File] See core.File for more documentation diff --git a/src/gooey/copilot/types/__init__.py b/src/gooey/copilot/types/__init__.py index 1cdf619..42cacaa 100644 --- a/src/gooey/copilot/types/__init__.py +++ b/src/gooey/copilot/types/__init__.py @@ -1,33 +1,13 @@ # This file was auto-generated by Fern from our API Definition. -from .copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel -from .copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle -from .copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel from .copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem -from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger -from .copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel -from .copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel -from .copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName -from .copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType from .copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings from .copilot_completion_request_sadtalker_settings_preprocess import ( CopilotCompletionRequestSadtalkerSettingsPreprocess, ) -from .copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel -from .copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider __all__ = [ - "CopilotCompletionRequestAsrModel", - "CopilotCompletionRequestCitationStyle", - "CopilotCompletionRequestEmbeddingModel", "CopilotCompletionRequestFunctionsItem", - "CopilotCompletionRequestFunctionsItemTrigger", - "CopilotCompletionRequestLipsyncModel", - "CopilotCompletionRequestOpenaiTtsModel", - "CopilotCompletionRequestOpenaiVoiceName", - "CopilotCompletionRequestResponseFormatType", "CopilotCompletionRequestSadtalkerSettings", "CopilotCompletionRequestSadtalkerSettingsPreprocess", - "CopilotCompletionRequestTranslationModel", - "CopilotCompletionRequestTtsProvider", ] diff --git a/src/gooey/copilot/types/copilot_completion_request_asr_model.py b/src/gooey/copilot/types/copilot_completion_request_asr_model.py deleted file mode 100644 index 65ae0f5..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/copilot/types/copilot_completion_request_citation_style.py b/src/gooey/copilot/types/copilot_completion_request_citation_style.py deleted file mode 100644 index 1bb273a..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_citation_style.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestCitationStyle = typing.Union[ - typing.Literal[ - "number", - "title", - "url", - "symbol", - "markdown", - "html", - "slack_mrkdwn", - "plaintext", - "number_markdown", - "number_html", - "number_slack_mrkdwn", - "number_plaintext", - "symbol_markdown", - "symbol_html", - "symbol_slack_mrkdwn", - "symbol_plaintext", - ], - typing.Any, -] diff --git a/src/gooey/copilot/types/copilot_completion_request_embedding_model.py b/src/gooey/copilot/types/copilot_completion_request_embedding_model.py deleted file mode 100644 index 4655801..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/copilot/types/copilot_completion_request_functions_item.py b/src/gooey/copilot/types/copilot_completion_request_functions_item.py index c9654f1..d5cc53e 100644 --- a/src/gooey/copilot/types/copilot_completion_request_functions_item.py +++ b/src/gooey/copilot/types/copilot_completion_request_functions_item.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. from ...core.pydantic_utilities import UniversalBaseModel -from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger +from ...types.function_trigger import FunctionTrigger import pydantic from ...core.pydantic_utilities import IS_PYDANTIC_V2 import typing @@ -9,7 +9,7 @@ class CopilotCompletionRequestFunctionsItem(UniversalBaseModel): url: str - trigger: CopilotCompletionRequestFunctionsItemTrigger = pydantic.Field() + trigger: FunctionTrigger = pydantic.Field() """ When to run this function. `pre` runs before the recipe, `post` runs after the recipe. """ diff --git a/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py b/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py deleted file mode 100644 index cf3e214..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_functions_item_trigger.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py b/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py deleted file mode 100644 index 865bc4b..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py b/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py deleted file mode 100644 index 4f4a35b..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py b/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py deleted file mode 100644 index f60a6b3..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_openai_voice_name.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestOpenaiVoiceName = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/gooey/copilot/types/copilot_completion_request_response_format_type.py b/src/gooey/copilot/types/copilot_completion_request_response_format_type.py deleted file mode 100644 index 3c9dbb0..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_translation_model.py b/src/gooey/copilot/types/copilot_completion_request_translation_model.py deleted file mode 100644 index 10b0b5a..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_translation_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_tts_provider.py b/src/gooey/copilot/types/copilot_completion_request_tts_provider.py deleted file mode 100644 index 4dec4b0..0000000 --- a/src/gooey/copilot/types/copilot_completion_request_tts_provider.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CopilotCompletionRequestTtsProvider = typing.Union[ - typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any -] diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py index 9c28e89..12788ee 100644 --- a/src/gooey/core/client_wrapper.py +++ b/src/gooey/core/client_wrapper.py @@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "gooeyai", - "X-Fern-SDK-Version": "0.0.1-beta26", + "X-Fern-SDK-Version": "0.0.1-beta27", } headers["Authorization"] = f"Bearer {self._get_api_key()}" return headers diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py index 9087b38..283f770 100644 --- a/src/gooey/types/__init__.py +++ b/src/gooey/types/__init__.py @@ -4,47 +4,44 @@ from .agg_function_function import AggFunctionFunction from .agg_function_result import AggFunctionResult from .agg_function_result_function import AggFunctionResultFunction +from .animation_models import AnimationModels from .animation_prompt import AnimationPrompt from .asr_chunk import AsrChunk +from .asr_models import AsrModels +from .asr_output_format import AsrOutputFormat from .asr_output_json import AsrOutputJson from .asr_page_output import AsrPageOutput from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem from .asr_page_request import AsrPageRequest -from .asr_page_request_output_format import AsrPageRequestOutputFormat -from .asr_page_request_selected_model import AsrPageRequestSelectedModel -from .asr_page_request_translation_model import AsrPageRequestTranslationModel from .asr_page_status_response import AsrPageStatusResponse from .async_api_response_model_v3 import AsyncApiResponseModelV3 from .balance_response import BalanceResponse from .bot_broadcast_filters import BotBroadcastFilters from .bot_broadcast_request_model import BotBroadcastRequestModel from .bulk_eval_page_output import BulkEvalPageOutput -from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType from .bulk_eval_page_status_response import BulkEvalPageStatusResponse from .bulk_runner_page_output import BulkRunnerPageOutput from .bulk_runner_page_request import BulkRunnerPageRequest from .bulk_runner_page_status_response import BulkRunnerPageStatusResponse from .button_pressed import ButtonPressed from .called_function_response import CalledFunctionResponse -from .called_function_response_trigger import CalledFunctionResponseTrigger from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam from .chyron_plant_page_output import ChyronPlantPageOutput from .chyron_plant_page_request import ChyronPlantPageRequest from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse +from .citation_styles import CitationStyles +from .combine_documents_chains import CombineDocumentsChains from .compare_llm_page_output import CompareLlmPageOutput -from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType from .compare_llm_page_status_response import CompareLlmPageStatusResponse from .compare_text2img_page_output import CompareText2ImgPageOutput -from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler -from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse from .compare_upscaler_page_output import CompareUpscalerPageOutput from .compare_upscaler_page_request import CompareUpscalerPageRequest -from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse from .console_logs import ConsoleLogs from .console_logs_level import ConsoleLogsLevel +from .control_net_models import ControlNetModels from .conversation_entry import ConversationEntry from .conversation_entry_content import ConversationEntryContent from .conversation_entry_content_item import ( @@ -55,138 +52,82 @@ from .conversation_entry_role import ConversationEntryRole from .conversation_start import ConversationStart from .create_stream_request import CreateStreamRequest -from .create_stream_request_asr_model import CreateStreamRequestAsrModel -from .create_stream_request_citation_style import CreateStreamRequestCitationStyle -from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel -from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel -from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel -from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName -from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType -from .create_stream_request_translation_model import CreateStreamRequestTranslationModel -from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider from .create_stream_response import CreateStreamResponse from .deforum_sd_page_output import DeforumSdPageOutput -from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel from .deforum_sd_page_status_response import DeforumSdPageStatusResponse from .doc_extract_page_output import DocExtractPageOutput from .doc_extract_page_request import DocExtractPageRequest -from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType -from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel from .doc_extract_page_status_response import DocExtractPageStatusResponse from .doc_search_page_output import DocSearchPageOutput -from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle -from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel -from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery -from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType from .doc_search_page_status_response import DocSearchPageStatusResponse from .doc_summary_page_output import DocSummaryPageOutput from .doc_summary_page_request import DocSummaryPageRequest -from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType -from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel from .doc_summary_page_status_response import DocSummaryPageStatusResponse -from .doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType -from .doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput -from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse +from .embedding_models import EmbeddingModels from .embeddings_page_output import EmbeddingsPageOutput -from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel from .embeddings_page_status_response import EmbeddingsPageStatusResponse from .eval_prompt import EvalPrompt from .face_inpainting_page_output import FaceInpaintingPageOutput from .face_inpainting_page_request import FaceInpaintingPageRequest -from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse from .final_response import FinalResponse +from .function_trigger import FunctionTrigger from .functions_page_output import FunctionsPageOutput from .functions_page_status_response import FunctionsPageStatusResponse from .generic_error_response import GenericErrorResponse from .generic_error_response_detail import GenericErrorResponseDetail from .google_gpt_page_output import GoogleGptPageOutput -from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel -from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType from .google_gpt_page_status_response import GoogleGptPageStatusResponse from .google_image_gen_page_output import GoogleImageGenPageOutput -from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse from .http_validation_error import HttpValidationError +from .image_segmentation_models import ImageSegmentationModels from .image_segmentation_page_output import ImageSegmentationPageOutput from .image_segmentation_page_request import ImageSegmentationPageRequest -from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse +from .image_to_image_models import ImageToImageModels from .image_url import ImageUrl from .image_url_detail import ImageUrlDetail from .img2img_page_output import Img2ImgPageOutput from .img2img_page_request import Img2ImgPageRequest -from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel -from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem -from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel from .img2img_page_status_response import Img2ImgPageStatusResponse +from .inpainting_models import InpaintingModels +from .keyword_query import KeywordQuery from .large_language_models import LargeLanguageModels from .letter_writer_page_output import LetterWriterPageOutput from .letter_writer_page_request import LetterWriterPageRequest from .letter_writer_page_status_response import LetterWriterPageStatusResponse +from .lipsync_models import LipsyncModels from .lipsync_page_output import LipsyncPageOutput from .lipsync_page_request import LipsyncPageRequest -from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel from .lipsync_page_status_response import LipsyncPageStatusResponse -from .lipsync_request_selected_model import LipsyncRequestSelectedModel from .lipsync_tts_page_output import LipsyncTtsPageOutput from .lipsync_tts_page_request import LipsyncTtsPageRequest -from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel -from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName -from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel -from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse -from .lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel -from .lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName -from .lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel -from .lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider from .llm_tools import LlmTools from .message_part import MessagePart from .object_inpainting_page_output import ObjectInpaintingPageOutput from .object_inpainting_page_request import ObjectInpaintingPageRequest -from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse -from .portrait_request_selected_model import PortraitRequestSelectedModel -from .product_image_request_selected_model import ProductImageRequestSelectedModel +from .open_ai_tts_models import OpenAiTtsModels +from .open_ai_tts_voices import OpenAiTtsVoices from .prompt_tree_node import PromptTreeNode from .prompt_tree_node_prompt import PromptTreeNodePrompt from .qr_code_generator_page_output import QrCodeGeneratorPageOutput from .qr_code_generator_page_request import QrCodeGeneratorPageRequest -from .qr_code_generator_page_request_image_prompt_controlnet_models_item import ( - QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, -) -from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler -from .qr_code_generator_page_request_selected_controlnet_model_item import ( - QrCodeGeneratorPageRequestSelectedControlnetModelItem, -) -from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel from .qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse -from .qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem -from .qr_code_request_scheduler import QrCodeRequestScheduler -from .qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem -from .qr_code_request_selected_model import QrCodeRequestSelectedModel from .recipe_function import RecipeFunction -from .recipe_function_trigger import RecipeFunctionTrigger from .recipe_run_state import RecipeRunState from .related_doc_search_response import RelatedDocSearchResponse from .related_google_gpt_response import RelatedGoogleGptResponse from .related_qn_a_doc_page_output import RelatedQnADocPageOutput -from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle -from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel -from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery -from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse from .related_qn_a_page_output import RelatedQnAPageOutput -from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel -from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse -from .remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel -from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem -from .remix_image_request_selected_model import RemixImageRequestSelectedModel -from .remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel from .reply_button import ReplyButton +from .response_format_type import ResponseFormatType from .response_model import ResponseModel from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery from .response_model_final_prompt import ResponseModelFinalPrompt @@ -195,38 +136,31 @@ from .run_start import RunStart from .sad_talker_settings import SadTalkerSettings from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess +from .schedulers import Schedulers from .search_reference import SearchReference +from .selected_control_net_models import SelectedControlNetModels from .seo_summary_page_output import SeoSummaryPageOutput -from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType from .seo_summary_page_status_response import SeoSummaryPageStatusResponse -from .serp_search_location import SerpSearchLocation +from .serp_search_locations import SerpSearchLocations from .serp_search_type import SerpSearchType from .smart_gpt_page_output import SmartGptPageOutput -from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType from .smart_gpt_page_status_response import SmartGptPageStatusResponse from .social_lookup_email_page_output import SocialLookupEmailPageOutput -from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse -from .speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat -from .speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel -from .speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel from .stream_error import StreamError -from .synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType -from .synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel +from .text2audio_models import Text2AudioModels from .text2audio_page_output import Text2AudioPageOutput from .text2audio_page_status_response import Text2AudioPageStatusResponse +from .text_to_image_models import TextToImageModels from .text_to_speech_page_output import TextToSpeechPageOutput -from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel -from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName -from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse +from .text_to_speech_providers import TextToSpeechProviders from .training_data_model import TrainingDataModel -from .translate_request_selected_model import TranslateRequestSelectedModel +from .translation_models import TranslationModels from .translation_page_output import TranslationPageOutput from .translation_page_request import TranslationPageRequest -from .translation_page_request_selected_model import TranslationPageRequestSelectedModel from .translation_page_status_response import TranslationPageStatusResponse -from .upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem +from .upscaler_models import UpscalerModels from .validation_error import ValidationError from .validation_error_loc_item import ValidationErrorLocItem from .vcard import Vcard @@ -234,19 +168,9 @@ from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt from .video_bots_page_request import VideoBotsPageRequest -from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel -from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle -from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem -from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger -from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel -from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel -from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName -from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings from .video_bots_page_request_sadtalker_settings_preprocess import VideoBotsPageRequestSadtalkerSettingsPreprocess -from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel -from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider from .video_bots_page_status_response import VideoBotsPageStatusResponse __all__ = [ @@ -254,47 +178,44 @@ "AggFunctionFunction", "AggFunctionResult", "AggFunctionResultFunction", + "AnimationModels", "AnimationPrompt", "AsrChunk", + "AsrModels", + "AsrOutputFormat", "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", "AsrPageRequest", - "AsrPageRequestOutputFormat", - "AsrPageRequestSelectedModel", - "AsrPageRequestTranslationModel", "AsrPageStatusResponse", "AsyncApiResponseModelV3", "BalanceResponse", "BotBroadcastFilters", "BotBroadcastRequestModel", "BulkEvalPageOutput", - "BulkEvalPageRequestResponseFormatType", "BulkEvalPageStatusResponse", "BulkRunnerPageOutput", "BulkRunnerPageRequest", "BulkRunnerPageStatusResponse", "ButtonPressed", "CalledFunctionResponse", - "CalledFunctionResponseTrigger", "ChatCompletionContentPartImageParam", "ChatCompletionContentPartTextParam", "ChyronPlantPageOutput", "ChyronPlantPageRequest", "ChyronPlantPageStatusResponse", + "CitationStyles", + "CombineDocumentsChains", "CompareLlmPageOutput", - "CompareLlmPageRequestResponseFormatType", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", - "CompareText2ImgPageRequestScheduler", - "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", "CompareUpscalerPageRequest", - "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageStatusResponse", "ConsoleLogs", "ConsoleLogsLevel", + "ControlNetModels", "ConversationEntry", "ConversationEntryContent", "ConversationEntryContentItem", @@ -303,134 +224,82 @@ "ConversationEntryRole", "ConversationStart", "CreateStreamRequest", - "CreateStreamRequestAsrModel", - "CreateStreamRequestCitationStyle", - "CreateStreamRequestEmbeddingModel", - "CreateStreamRequestLipsyncModel", - "CreateStreamRequestOpenaiTtsModel", - "CreateStreamRequestOpenaiVoiceName", - "CreateStreamRequestResponseFormatType", - "CreateStreamRequestTranslationModel", - "CreateStreamRequestTtsProvider", "CreateStreamResponse", "DeforumSdPageOutput", - "DeforumSdPageRequestSelectedModel", "DeforumSdPageStatusResponse", "DocExtractPageOutput", "DocExtractPageRequest", - "DocExtractPageRequestResponseFormatType", - "DocExtractPageRequestSelectedAsrModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", - "DocSearchPageRequestCitationStyle", - "DocSearchPageRequestEmbeddingModel", - "DocSearchPageRequestKeywordQuery", - "DocSearchPageRequestResponseFormatType", "DocSearchPageStatusResponse", "DocSummaryPageOutput", "DocSummaryPageRequest", - "DocSummaryPageRequestResponseFormatType", - "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageStatusResponse", - "DocSummaryRequestResponseFormatType", - "DocSummaryRequestSelectedAsrModel", "EmailFaceInpaintingPageOutput", - "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", + "EmbeddingModels", "EmbeddingsPageOutput", - "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", "FaceInpaintingPageRequest", - "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageStatusResponse", "FinalResponse", + "FunctionTrigger", "FunctionsPageOutput", "FunctionsPageStatusResponse", "GenericErrorResponse", "GenericErrorResponseDetail", "GoogleGptPageOutput", - "GoogleGptPageRequestEmbeddingModel", - "GoogleGptPageRequestResponseFormatType", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", - "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageStatusResponse", "HttpValidationError", + "ImageSegmentationModels", "ImageSegmentationPageOutput", "ImageSegmentationPageRequest", - "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageStatusResponse", + "ImageToImageModels", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", "Img2ImgPageRequest", - "Img2ImgPageRequestSelectedControlnetModel", - "Img2ImgPageRequestSelectedControlnetModelItem", - "Img2ImgPageRequestSelectedModel", "Img2ImgPageStatusResponse", + "InpaintingModels", + "KeywordQuery", "LargeLanguageModels", "LetterWriterPageOutput", "LetterWriterPageRequest", "LetterWriterPageStatusResponse", + "LipsyncModels", "LipsyncPageOutput", "LipsyncPageRequest", - "LipsyncPageRequestSelectedModel", "LipsyncPageStatusResponse", - "LipsyncRequestSelectedModel", "LipsyncTtsPageOutput", "LipsyncTtsPageRequest", - "LipsyncTtsPageRequestOpenaiTtsModel", - "LipsyncTtsPageRequestOpenaiVoiceName", - "LipsyncTtsPageRequestSelectedModel", - "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", - "LipsyncTtsRequestOpenaiTtsModel", - "LipsyncTtsRequestOpenaiVoiceName", - "LipsyncTtsRequestSelectedModel", - "LipsyncTtsRequestTtsProvider", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", "ObjectInpaintingPageRequest", - "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", - "PortraitRequestSelectedModel", - "ProductImageRequestSelectedModel", + "OpenAiTtsModels", + "OpenAiTtsVoices", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", "QrCodeGeneratorPageRequest", - "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", - "QrCodeGeneratorPageRequestScheduler", - "QrCodeGeneratorPageRequestSelectedControlnetModelItem", - "QrCodeGeneratorPageRequestSelectedModel", "QrCodeGeneratorPageStatusResponse", - "QrCodeRequestImagePromptControlnetModelsItem", - "QrCodeRequestScheduler", - "QrCodeRequestSelectedControlnetModelItem", - "QrCodeRequestSelectedModel", "RecipeFunction", - "RecipeFunctionTrigger", "RecipeRunState", "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", - "RelatedQnADocPageRequestCitationStyle", - "RelatedQnADocPageRequestEmbeddingModel", - "RelatedQnADocPageRequestKeywordQuery", - "RelatedQnADocPageRequestResponseFormatType", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", - "RelatedQnAPageRequestEmbeddingModel", - "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageStatusResponse", - "RemixImageRequestSelectedControlnetModel", - "RemixImageRequestSelectedControlnetModelItem", - "RemixImageRequestSelectedModel", - "RemoveBackgroundRequestSelectedModel", "ReplyButton", + "ResponseFormatType", "ResponseModel", "ResponseModelFinalKeywordQuery", "ResponseModelFinalPrompt", @@ -439,38 +308,31 @@ "RunStart", "SadTalkerSettings", "SadTalkerSettingsPreprocess", + "Schedulers", "SearchReference", + "SelectedControlNetModels", "SeoSummaryPageOutput", - "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageStatusResponse", - "SerpSearchLocation", + "SerpSearchLocations", "SerpSearchType", "SmartGptPageOutput", - "SmartGptPageRequestResponseFormatType", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", - "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageStatusResponse", - "SpeechRecognitionRequestOutputFormat", - "SpeechRecognitionRequestSelectedModel", - "SpeechRecognitionRequestTranslationModel", "StreamError", - "SynthesizeDataRequestResponseFormatType", - "SynthesizeDataRequestSelectedAsrModel", + "Text2AudioModels", "Text2AudioPageOutput", "Text2AudioPageStatusResponse", + "TextToImageModels", "TextToSpeechPageOutput", - "TextToSpeechPageRequestOpenaiTtsModel", - "TextToSpeechPageRequestOpenaiVoiceName", - "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", + "TextToSpeechProviders", "TrainingDataModel", - "TranslateRequestSelectedModel", + "TranslationModels", "TranslationPageOutput", "TranslationPageRequest", - "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", - "UpscaleRequestSelectedModelsItem", + "UpscalerModels", "ValidationError", "ValidationErrorLocItem", "Vcard", @@ -478,18 +340,8 @@ "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", "VideoBotsPageRequest", - "VideoBotsPageRequestAsrModel", - "VideoBotsPageRequestCitationStyle", - "VideoBotsPageRequestEmbeddingModel", "VideoBotsPageRequestFunctionsItem", - "VideoBotsPageRequestFunctionsItemTrigger", - "VideoBotsPageRequestLipsyncModel", - "VideoBotsPageRequestOpenaiTtsModel", - "VideoBotsPageRequestOpenaiVoiceName", - "VideoBotsPageRequestResponseFormatType", "VideoBotsPageRequestSadtalkerSettings", "VideoBotsPageRequestSadtalkerSettingsPreprocess", - "VideoBotsPageRequestTranslationModel", - "VideoBotsPageRequestTtsProvider", "VideoBotsPageStatusResponse", ] diff --git a/src/gooey/types/animation_models.py b/src/gooey/types/animation_models.py new file mode 100644 index 0000000..8ad7a84 --- /dev/null +++ b/src/gooey/types/animation_models.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AnimationModels = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any] diff --git a/src/gooey/types/create_stream_request_asr_model.py b/src/gooey/types/asr_models.py similarity index 91% rename from src/gooey/types/create_stream_request_asr_model.py rename to src/gooey/types/asr_models.py index af166fa..16c222a 100644 --- a/src/gooey/types/create_stream_request_asr_model.py +++ b/src/gooey/types/asr_models.py @@ -2,7 +2,7 @@ import typing -CreateStreamRequestAsrModel = typing.Union[ +AsrModels = typing.Union[ typing.Literal[ "whisper_large_v2", "whisper_large_v3", diff --git a/src/gooey/types/asr_output_format.py b/src/gooey/types/asr_output_format.py new file mode 100644 index 0000000..b3b0e2d --- /dev/null +++ b/src/gooey/types/asr_output_format.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsrOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py index 1d35181..1210679 100644 --- a/src/gooey/types/asr_page_request.py +++ b/src/gooey/types/asr_page_request.py @@ -4,9 +4,9 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .asr_page_request_selected_model import AsrPageRequestSelectedModel -from .asr_page_request_translation_model import AsrPageRequestTranslationModel -from .asr_page_request_output_format import AsrPageRequestOutputFormat +from .asr_models import AsrModels +from .translation_models import TranslationModels +from .asr_output_format import AsrOutputFormat from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -19,10 +19,10 @@ class AsrPageRequest(UniversalBaseModel): """ documents: typing.List[str] - selected_model: typing.Optional[AsrPageRequestSelectedModel] = None + selected_model: typing.Optional[AsrModels] = None language: typing.Optional[str] = None - translation_model: typing.Optional[AsrPageRequestTranslationModel] = None - output_format: typing.Optional[AsrPageRequestOutputFormat] = None + translation_model: typing.Optional[TranslationModels] = None + output_format: typing.Optional[AsrOutputFormat] = None google_translate_target: typing.Optional[str] = pydantic.Field(default=None) """ use `translation_model` & `translation_target` instead. diff --git a/src/gooey/types/asr_page_request_output_format.py b/src/gooey/types/asr_page_request_output_format.py deleted file mode 100644 index 101e681..0000000 --- a/src/gooey/types/asr_page_request_output_format.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsrPageRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/asr_page_request_selected_model.py b/src/gooey/types/asr_page_request_selected_model.py deleted file mode 100644 index 4e80d3c..0000000 --- a/src/gooey/types/asr_page_request_selected_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsrPageRequestSelectedModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/asr_page_request_translation_model.py b/src/gooey/types/asr_page_request_translation_model.py deleted file mode 100644 index d5dcef6..0000000 --- a/src/gooey/types/asr_page_request_translation_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsrPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/bulk_eval_page_request_response_format_type.py b/src/gooey/types/bulk_eval_page_request_response_format_type.py deleted file mode 100644 index f1c242f..0000000 --- a/src/gooey/types/bulk_eval_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -BulkEvalPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/called_function_response.py b/src/gooey/types/called_function_response.py index 9076f2d..7b421c4 100644 --- a/src/gooey/types/called_function_response.py +++ b/src/gooey/types/called_function_response.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. from ..core.pydantic_utilities import UniversalBaseModel -from .called_function_response_trigger import CalledFunctionResponseTrigger +from .function_trigger import FunctionTrigger import typing from ..core.pydantic_utilities import IS_PYDANTIC_V2 import pydantic @@ -9,7 +9,7 @@ class CalledFunctionResponse(UniversalBaseModel): url: str - trigger: CalledFunctionResponseTrigger + trigger: FunctionTrigger return_value: typing.Optional[typing.Optional[typing.Any]] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/called_function_response_trigger.py b/src/gooey/types/called_function_response_trigger.py deleted file mode 100644 index d232a4f..0000000 --- a/src/gooey/types/called_function_response_trigger.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CalledFunctionResponseTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/create_stream_request_citation_style.py b/src/gooey/types/citation_styles.py similarity index 90% rename from src/gooey/types/create_stream_request_citation_style.py rename to src/gooey/types/citation_styles.py index e57bab1..4d822c2 100644 --- a/src/gooey/types/create_stream_request_citation_style.py +++ b/src/gooey/types/citation_styles.py @@ -2,7 +2,7 @@ import typing -CreateStreamRequestCitationStyle = typing.Union[ +CitationStyles = typing.Union[ typing.Literal[ "number", "title", diff --git a/src/gooey/types/doc_search_page_request_keyword_query.py b/src/gooey/types/combine_documents_chains.py similarity index 52% rename from src/gooey/types/doc_search_page_request_keyword_query.py rename to src/gooey/types/combine_documents_chains.py index 8083b3d..c457e00 100644 --- a/src/gooey/types/doc_search_page_request_keyword_query.py +++ b/src/gooey/types/combine_documents_chains.py @@ -2,4 +2,4 @@ import typing -DocSearchPageRequestKeywordQuery = typing.Union[str, typing.List[str]] +CombineDocumentsChains = typing.Literal["map_reduce"] diff --git a/src/gooey/types/compare_llm_page_request_response_format_type.py b/src/gooey/types/compare_llm_page_request_response_format_type.py deleted file mode 100644 index a846068..0000000 --- a/src/gooey/types/compare_llm_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CompareLlmPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/compare_text2img_page_request_scheduler.py b/src/gooey/types/compare_text2img_page_request_scheduler.py deleted file mode 100644 index 29ce840..0000000 --- a/src/gooey/types/compare_text2img_page_request_scheduler.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CompareText2ImgPageRequestScheduler = typing.Union[ - typing.Literal[ - "singlestep_dpm_solver", - "multistep_dpm_solver", - "dpm_sde", - "dpm_discrete", - "dpm_discrete_ancestral", - "unipc", - "lms_discrete", - "heun", - "euler", - "euler_ancestral", - "pndm", - "ddpm", - "ddim", - "deis", - ], - typing.Any, -] diff --git a/src/gooey/types/compare_text2img_page_request_selected_models_item.py b/src/gooey/types/compare_text2img_page_request_selected_models_item.py deleted file mode 100644 index 4154491..0000000 --- a/src/gooey/types/compare_text2img_page_request_selected_models_item.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CompareText2ImgPageRequestSelectedModelsItem = typing.Union[ - typing.Literal[ - "dream_shaper", - "dreamlike_2", - "sd_2", - "sd_1_5", - "dall_e", - "dall_e_3", - "openjourney_2", - "openjourney", - "analog_diffusion", - "protogen_5_3", - "jack_qiao", - "rodent_diffusion_1_5", - "deepfloyd_if", - ], - typing.Any, -] diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py index 8cfb4e7..849d668 100644 --- a/src/gooey/types/compare_upscaler_page_request.py +++ b/src/gooey/types/compare_upscaler_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem +from .upscaler_models import UpscalerModels from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -23,7 +23,7 @@ class CompareUpscalerPageRequest(UniversalBaseModel): The final upsampling scale of the image """ - selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None + selected_models: typing.Optional[typing.List[UpscalerModels]] = None selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None settings: typing.Optional[RunSettings] = None diff --git a/src/gooey/types/compare_upscaler_page_request_selected_models_item.py b/src/gooey/types/compare_upscaler_page_request_selected_models_item.py deleted file mode 100644 index eff4f6e..0000000 --- a/src/gooey/types/compare_upscaler_page_request_selected_models_item.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CompareUpscalerPageRequestSelectedModelsItem = typing.Union[ - typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any -] diff --git a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py b/src/gooey/types/control_net_models.py similarity index 89% rename from src/gooey/types/qr_code_request_selected_controlnet_model_item.py rename to src/gooey/types/control_net_models.py index c5cdc8d..5c5f68a 100644 --- a/src/gooey/types/qr_code_request_selected_controlnet_model_item.py +++ b/src/gooey/types/control_net_models.py @@ -2,7 +2,7 @@ import typing -QrCodeRequestSelectedControlnetModelItem = typing.Union[ +ControlNetModels = typing.Union[ typing.Literal[ "sd_controlnet_canny", "sd_controlnet_depth", diff --git a/src/gooey/types/create_stream_request.py b/src/gooey/types/create_stream_request.py index 2d4745b..315e069 100644 --- a/src/gooey/types/create_stream_request.py +++ b/src/gooey/types/create_stream_request.py @@ -7,16 +7,16 @@ from .recipe_function import RecipeFunction from .conversation_entry import ConversationEntry from .large_language_models import LargeLanguageModels -from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel -from .create_stream_request_citation_style import CreateStreamRequestCitationStyle -from .create_stream_request_asr_model import CreateStreamRequestAsrModel -from .create_stream_request_translation_model import CreateStreamRequestTranslationModel -from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel +from .embedding_models import EmbeddingModels +from .citation_styles import CitationStyles +from .asr_models import AsrModels +from .translation_models import TranslationModels +from .lipsync_models import LipsyncModels from .llm_tools import LlmTools -from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType -from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider -from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName -from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel +from .response_format_type import ResponseFormatType +from .text_to_speech_providers import TextToSpeechProviders +from .open_ai_tts_voices import OpenAiTtsVoices +from .open_ai_tts_models import OpenAiTtsModels from .sad_talker_settings import SadTalkerSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -85,16 +85,16 @@ class CreateStreamRequest(UniversalBaseModel): max_references: typing.Optional[int] = None max_context_words: typing.Optional[int] = None scroll_jump: typing.Optional[int] = None - embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = None + embedding_model: typing.Optional[EmbeddingModels] = None dense_weight: typing.Optional[float] = pydantic.Field(default=None) """ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. """ - citation_style: typing.Optional[CreateStreamRequestCitationStyle] = None + citation_style: typing.Optional[CitationStyles] = None use_url_shortener: typing.Optional[bool] = None - asr_model: typing.Optional[CreateStreamRequestAsrModel] = pydantic.Field(default=None) + asr_model: typing.Optional[AsrModels] = pydantic.Field(default=None) """ Choose a model to transcribe incoming audio messages to text. """ @@ -104,7 +104,7 @@ class CreateStreamRequest(UniversalBaseModel): Choose a language to transcribe incoming audio messages to text. """ - translation_model: typing.Optional[CreateStreamRequestTranslationModel] = None + translation_model: typing.Optional[TranslationModels] = None user_language: typing.Optional[str] = pydantic.Field(default=None) """ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -120,7 +120,7 @@ class CreateStreamRequest(UniversalBaseModel): Translation Glossary for LLM Language (English) -> User Langauge """ - lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = None + lipsync_model: typing.Optional[LipsyncModels] = None tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None) """ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -131,8 +131,8 @@ class CreateStreamRequest(UniversalBaseModel): quality: typing.Optional[float] = None max_tokens: typing.Optional[int] = None sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = None - tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = None + response_format_type: typing.Optional[ResponseFormatType] = None + tts_provider: typing.Optional[TextToSpeechProviders] = None uberduck_voice_name: typing.Optional[str] = None uberduck_speaking_rate: typing.Optional[float] = None google_voice_name: typing.Optional[str] = None @@ -152,8 +152,8 @@ class CreateStreamRequest(UniversalBaseModel): elevenlabs_style: typing.Optional[float] = None elevenlabs_speaker_boost: typing.Optional[bool] = None azure_voice_name: typing.Optional[str] = None - openai_voice_name: typing.Optional[CreateStreamRequestOpenaiVoiceName] = None - openai_tts_model: typing.Optional[CreateStreamRequestOpenaiTtsModel] = None + openai_voice_name: typing.Optional[OpenAiTtsVoices] = None + openai_tts_model: typing.Optional[OpenAiTtsModels] = None input_face: typing.Optional[str] = None face_padding_top: typing.Optional[int] = None face_padding_bottom: typing.Optional[int] = None diff --git a/src/gooey/types/create_stream_request_lipsync_model.py b/src/gooey/types/create_stream_request_lipsync_model.py deleted file mode 100644 index c207d45..0000000 --- a/src/gooey/types/create_stream_request_lipsync_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateStreamRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/create_stream_request_openai_tts_model.py b/src/gooey/types/create_stream_request_openai_tts_model.py deleted file mode 100644 index 475ca67..0000000 --- a/src/gooey/types/create_stream_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateStreamRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/create_stream_request_openai_voice_name.py b/src/gooey/types/create_stream_request_openai_voice_name.py deleted file mode 100644 index 4f3dd7a..0000000 --- a/src/gooey/types/create_stream_request_openai_voice_name.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateStreamRequestOpenaiVoiceName = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/gooey/types/create_stream_request_response_format_type.py b/src/gooey/types/create_stream_request_response_format_type.py deleted file mode 100644 index dc5024d..0000000 --- a/src/gooey/types/create_stream_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateStreamRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/create_stream_request_translation_model.py b/src/gooey/types/create_stream_request_translation_model.py deleted file mode 100644 index 3876937..0000000 --- a/src/gooey/types/create_stream_request_translation_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateStreamRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/create_stream_request_tts_provider.py b/src/gooey/types/create_stream_request_tts_provider.py deleted file mode 100644 index cad602d..0000000 --- a/src/gooey/types/create_stream_request_tts_provider.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CreateStreamRequestTtsProvider = typing.Union[ - typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any -] diff --git a/src/gooey/types/deforum_sd_page_request_selected_model.py b/src/gooey/types/deforum_sd_page_request_selected_model.py deleted file mode 100644 index 3af657a..0000000 --- a/src/gooey/types/deforum_sd_page_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DeforumSdPageRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any] diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py index 9690c6c..4beeb94 100644 --- a/src/gooey/types/doc_extract_page_request.py +++ b/src/gooey/types/doc_extract_page_request.py @@ -4,9 +4,9 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel +from .asr_models import AsrModels from .large_language_models import LargeLanguageModels -from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType +from .response_format_type import ResponseFormatType from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -20,7 +20,7 @@ class DocExtractPageRequest(UniversalBaseModel): documents: typing.List[str] sheet_url: typing.Optional[str] = None - selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None + selected_asr_model: typing.Optional[AsrModels] = None google_translate_target: typing.Optional[str] = None glossary_document: typing.Optional[str] = None task_instructions: typing.Optional[str] = None @@ -30,7 +30,7 @@ class DocExtractPageRequest(UniversalBaseModel): quality: typing.Optional[float] = None max_tokens: typing.Optional[int] = None sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = None + response_format_type: typing.Optional[ResponseFormatType] = None settings: typing.Optional[RunSettings] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/doc_extract_page_request_response_format_type.py b/src/gooey/types/doc_extract_page_request_response_format_type.py deleted file mode 100644 index 0ad7c14..0000000 --- a/src/gooey/types/doc_extract_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocExtractPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_extract_page_request_selected_asr_model.py b/src/gooey/types/doc_extract_page_request_selected_asr_model.py deleted file mode 100644 index a358400..0000000 --- a/src/gooey/types/doc_extract_page_request_selected_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocExtractPageRequestSelectedAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/doc_search_page_request_citation_style.py b/src/gooey/types/doc_search_page_request_citation_style.py deleted file mode 100644 index b47b3be..0000000 --- a/src/gooey/types/doc_search_page_request_citation_style.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocSearchPageRequestCitationStyle = typing.Union[ - typing.Literal[ - "number", - "title", - "url", - "symbol", - "markdown", - "html", - "slack_mrkdwn", - "plaintext", - "number_markdown", - "number_html", - "number_slack_mrkdwn", - "number_plaintext", - "symbol_markdown", - "symbol_html", - "symbol_slack_mrkdwn", - "symbol_plaintext", - ], - typing.Any, -] diff --git a/src/gooey/types/doc_search_page_request_embedding_model.py b/src/gooey/types/doc_search_page_request_embedding_model.py deleted file mode 100644 index fb35612..0000000 --- a/src/gooey/types/doc_search_page_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocSearchPageRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/doc_search_page_request_response_format_type.py b/src/gooey/types/doc_search_page_request_response_format_type.py deleted file mode 100644 index 856b641..0000000 --- a/src/gooey/types/doc_search_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocSearchPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py index 466ddc1..dadd11a 100644 --- a/src/gooey/types/doc_summary_page_request.py +++ b/src/gooey/types/doc_summary_page_request.py @@ -5,8 +5,9 @@ from .recipe_function import RecipeFunction import pydantic from .large_language_models import LargeLanguageModels -from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel -from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType +from .combine_documents_chains import CombineDocumentsChains +from .asr_models import AsrModels +from .response_format_type import ResponseFormatType from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -22,15 +23,15 @@ class DocSummaryPageRequest(UniversalBaseModel): task_instructions: typing.Optional[str] = None merge_instructions: typing.Optional[str] = None selected_model: typing.Optional[LargeLanguageModels] = None - chain_type: typing.Optional[typing.Literal["map_reduce"]] = None - selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None + chain_type: typing.Optional[CombineDocumentsChains] = None + selected_asr_model: typing.Optional[AsrModels] = None google_translate_target: typing.Optional[str] = None avoid_repetition: typing.Optional[bool] = None num_outputs: typing.Optional[int] = None quality: typing.Optional[float] = None max_tokens: typing.Optional[int] = None sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = None + response_format_type: typing.Optional[ResponseFormatType] = None settings: typing.Optional[RunSettings] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/doc_summary_page_request_response_format_type.py b/src/gooey/types/doc_summary_page_request_response_format_type.py deleted file mode 100644 index 318ad7f..0000000 --- a/src/gooey/types/doc_summary_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_page_request_selected_asr_model.py b/src/gooey/types/doc_summary_page_request_selected_asr_model.py deleted file mode 100644 index c04cc7a..0000000 --- a/src/gooey/types/doc_summary_page_request_selected_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocSummaryPageRequestSelectedAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/doc_summary_request_response_format_type.py b/src/gooey/types/doc_summary_request_response_format_type.py deleted file mode 100644 index 8fabf9b..0000000 --- a/src/gooey/types/doc_summary_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocSummaryRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_request_selected_asr_model.py b/src/gooey/types/doc_summary_request_selected_asr_model.py deleted file mode 100644 index 8b8a338..0000000 --- a/src/gooey/types/doc_summary_request_selected_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -DocSummaryRequestSelectedAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/email_face_inpainting_page_request_selected_model.py b/src/gooey/types/email_face_inpainting_page_request_selected_model.py deleted file mode 100644 index 822b5a6..0000000 --- a/src/gooey/types/email_face_inpainting_page_request_selected_model.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EmailFaceInpaintingPageRequestSelectedModel = typing.Union[ - typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any -] diff --git a/src/gooey/types/create_stream_request_embedding_model.py b/src/gooey/types/embedding_models.py similarity index 87% rename from src/gooey/types/create_stream_request_embedding_model.py rename to src/gooey/types/embedding_models.py index cef26bf..8007d2d 100644 --- a/src/gooey/types/create_stream_request_embedding_model.py +++ b/src/gooey/types/embedding_models.py @@ -2,7 +2,7 @@ import typing -CreateStreamRequestEmbeddingModel = typing.Union[ +EmbeddingModels = typing.Union[ typing.Literal[ "openai_3_large", "openai_3_small", diff --git a/src/gooey/types/embeddings_page_request_selected_model.py b/src/gooey/types/embeddings_page_request_selected_model.py deleted file mode 100644 index a03ecc8..0000000 --- a/src/gooey/types/embeddings_page_request_selected_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -EmbeddingsPageRequestSelectedModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py index a653205..8e88a4f 100644 --- a/src/gooey/types/face_inpainting_page_request.py +++ b/src/gooey/types/face_inpainting_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel +from .inpainting_models import InpaintingModels from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -21,7 +21,7 @@ class FaceInpaintingPageRequest(UniversalBaseModel): face_scale: typing.Optional[float] = None face_pos_x: typing.Optional[float] = None face_pos_y: typing.Optional[float] = None - selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None + selected_model: typing.Optional[InpaintingModels] = None negative_prompt: typing.Optional[str] = None num_outputs: typing.Optional[int] = None quality: typing.Optional[int] = None diff --git a/src/gooey/types/face_inpainting_page_request_selected_model.py b/src/gooey/types/face_inpainting_page_request_selected_model.py deleted file mode 100644 index 9b8eab6..0000000 --- a/src/gooey/types/face_inpainting_page_request_selected_model.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -FaceInpaintingPageRequestSelectedModel = typing.Union[ - typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any -] diff --git a/src/gooey/types/related_qn_a_doc_page_request_keyword_query.py b/src/gooey/types/function_trigger.py similarity index 51% rename from src/gooey/types/related_qn_a_doc_page_request_keyword_query.py rename to src/gooey/types/function_trigger.py index 4f35322..6986a3e 100644 --- a/src/gooey/types/related_qn_a_doc_page_request_keyword_query.py +++ b/src/gooey/types/function_trigger.py @@ -2,4 +2,4 @@ import typing -RelatedQnADocPageRequestKeywordQuery = typing.Union[str, typing.List[str]] +FunctionTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/google_gpt_page_request_embedding_model.py b/src/gooey/types/google_gpt_page_request_embedding_model.py deleted file mode 100644 index 66f060f..0000000 --- a/src/gooey/types/google_gpt_page_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -GoogleGptPageRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/google_gpt_page_request_response_format_type.py b/src/gooey/types/google_gpt_page_request_response_format_type.py deleted file mode 100644 index dd04dec..0000000 --- a/src/gooey/types/google_gpt_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -GoogleGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/google_image_gen_page_request_selected_model.py b/src/gooey/types/google_image_gen_page_request_selected_model.py deleted file mode 100644 index c872962..0000000 --- a/src/gooey/types/google_image_gen_page_request_selected_model.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -GoogleImageGenPageRequestSelectedModel = typing.Union[ - typing.Literal[ - "dream_shaper", - "dreamlike_2", - "sd_2", - "sd_1_5", - "dall_e", - "instruct_pix2pix", - "openjourney_2", - "openjourney", - "analog_diffusion", - "protogen_5_3", - "jack_qiao", - "rodent_diffusion_1_5", - ], - typing.Any, -] diff --git a/src/gooey/types/image_segmentation_models.py b/src/gooey/types/image_segmentation_models.py new file mode 100644 index 0000000..aae4fee --- /dev/null +++ b/src/gooey/types/image_segmentation_models.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ImageSegmentationModels = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py index a2ea60d..3e1952c 100644 --- a/src/gooey/types/image_segmentation_page_request.py +++ b/src/gooey/types/image_segmentation_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel +from .image_segmentation_models import ImageSegmentationModels from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -17,7 +17,7 @@ class ImageSegmentationPageRequest(UniversalBaseModel): """ input_image: str - selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None + selected_model: typing.Optional[ImageSegmentationModels] = None mask_threshold: typing.Optional[float] = None rect_persepective_transform: typing.Optional[bool] = None reflection_opacity: typing.Optional[float] = None diff --git a/src/gooey/types/image_segmentation_page_request_selected_model.py b/src/gooey/types/image_segmentation_page_request_selected_model.py deleted file mode 100644 index 9b4b8d7..0000000 --- a/src/gooey/types/image_segmentation_page_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ImageSegmentationPageRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/remix_image_request_selected_model.py b/src/gooey/types/image_to_image_models.py similarity index 89% rename from src/gooey/types/remix_image_request_selected_model.py rename to src/gooey/types/image_to_image_models.py index 245d6b0..70c9201 100644 --- a/src/gooey/types/remix_image_request_selected_model.py +++ b/src/gooey/types/image_to_image_models.py @@ -2,7 +2,7 @@ import typing -RemixImageRequestSelectedModel = typing.Union[ +ImageToImageModels = typing.Union[ typing.Literal[ "dream_shaper", "dreamlike_2", diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py index f3cfd2f..91e080f 100644 --- a/src/gooey/types/img2img_page_request.py +++ b/src/gooey/types/img2img_page_request.py @@ -4,8 +4,8 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel -from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel +from .image_to_image_models import ImageToImageModels +from .selected_control_net_models import SelectedControlNetModels from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -19,8 +19,8 @@ class Img2ImgPageRequest(UniversalBaseModel): input_image: str text_prompt: typing.Optional[str] = None - selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None - selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None + selected_model: typing.Optional[ImageToImageModels] = None + selected_controlnet_model: typing.Optional[SelectedControlNetModels] = None negative_prompt: typing.Optional[str] = None num_outputs: typing.Optional[int] = None quality: typing.Optional[int] = None diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model.py b/src/gooey/types/img2img_page_request_selected_controlnet_model.py deleted file mode 100644 index df9cb36..0000000 --- a/src/gooey/types/img2img_page_request_selected_controlnet_model.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem - -Img2ImgPageRequestSelectedControlnetModel = typing.Union[ - typing.List[Img2ImgPageRequestSelectedControlnetModelItem], - typing.Literal["sd_controlnet_canny"], - typing.Literal["sd_controlnet_depth"], - typing.Literal["sd_controlnet_hed"], - typing.Literal["sd_controlnet_mlsd"], - typing.Literal["sd_controlnet_normal"], - typing.Literal["sd_controlnet_openpose"], - typing.Literal["sd_controlnet_scribble"], - typing.Literal["sd_controlnet_seg"], - typing.Literal["sd_controlnet_tile"], - typing.Literal["sd_controlnet_brightness"], - typing.Literal["control_v1p_sd15_qrcode_monster_v2"], -] diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py b/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py deleted file mode 100644 index 1569cf5..0000000 --- a/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py +++ /dev/null @@ -1,20 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -Img2ImgPageRequestSelectedControlnetModelItem = typing.Union[ - typing.Literal[ - "sd_controlnet_canny", - "sd_controlnet_depth", - "sd_controlnet_hed", - "sd_controlnet_mlsd", - "sd_controlnet_normal", - "sd_controlnet_openpose", - "sd_controlnet_scribble", - "sd_controlnet_seg", - "sd_controlnet_tile", - "sd_controlnet_brightness", - "control_v1p_sd15_qrcode_monster_v2", - ], - typing.Any, -] diff --git a/src/gooey/types/img2img_page_request_selected_model.py b/src/gooey/types/img2img_page_request_selected_model.py deleted file mode 100644 index 506c2b1..0000000 --- a/src/gooey/types/img2img_page_request_selected_model.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -Img2ImgPageRequestSelectedModel = typing.Union[ - typing.Literal[ - "dream_shaper", - "dreamlike_2", - "sd_2", - "sd_1_5", - "dall_e", - "instruct_pix2pix", - "openjourney_2", - "openjourney", - "analog_diffusion", - "protogen_5_3", - "jack_qiao", - "rodent_diffusion_1_5", - ], - typing.Any, -] diff --git a/src/gooey/types/inpainting_models.py b/src/gooey/types/inpainting_models.py new file mode 100644 index 0000000..f851858 --- /dev/null +++ b/src/gooey/types/inpainting_models.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +InpaintingModels = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/keyword_query.py b/src/gooey/types/keyword_query.py new file mode 100644 index 0000000..42b3fed --- /dev/null +++ b/src/gooey/types/keyword_query.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +KeywordQuery = typing.Union[str, typing.List[str]] diff --git a/src/gooey/types/large_language_models.py b/src/gooey/types/large_language_models.py index 0e66b04..a3a475b 100644 --- a/src/gooey/types/large_language_models.py +++ b/src/gooey/types/large_language_models.py @@ -33,8 +33,10 @@ "claude_3_opus", "claude_3_sonnet", "claude_3_haiku", + "afrollama_v1", "sea_lion_7b_instruct", "llama3_8b_cpt_sea_lion_v2_instruct", + "llama3_8b_cpt_sea_lion_v2_1_instruct", "sarvam_2b", "text_davinci_003", "text_davinci_002", diff --git a/src/gooey/types/lipsync_models.py b/src/gooey/types/lipsync_models.py new file mode 100644 index 0000000..0ee41ee --- /dev/null +++ b/src/gooey/types/lipsync_models.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncModels = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/lipsync_page_output.py b/src/gooey/types/lipsync_page_output.py index e3b08c3..2b8e359 100644 --- a/src/gooey/types/lipsync_page_output.py +++ b/src/gooey/types/lipsync_page_output.py @@ -9,6 +9,7 @@ class LipsyncPageOutput(UniversalBaseModel): output_video: str + duration_sec: typing.Optional[float] = None called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py index 2914a1e..5ea1d3f 100644 --- a/src/gooey/types/lipsync_page_request.py +++ b/src/gooey/types/lipsync_page_request.py @@ -5,7 +5,7 @@ from .recipe_function import RecipeFunction import pydantic from .sad_talker_settings import SadTalkerSettings -from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel +from .lipsync_models import LipsyncModels from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -23,7 +23,7 @@ class LipsyncPageRequest(UniversalBaseModel): face_padding_left: typing.Optional[int] = None face_padding_right: typing.Optional[int] = None sadtalker_settings: typing.Optional[SadTalkerSettings] = None - selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None + selected_model: typing.Optional[LipsyncModels] = None input_audio: typing.Optional[str] = None settings: typing.Optional[RunSettings] = None diff --git a/src/gooey/types/lipsync_page_request_selected_model.py b/src/gooey/types/lipsync_page_request_selected_model.py deleted file mode 100644 index da68ef8..0000000 --- a/src/gooey/types/lipsync_page_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/lipsync_request_selected_model.py b/src/gooey/types/lipsync_request_selected_model.py deleted file mode 100644 index c5614b4..0000000 --- a/src/gooey/types/lipsync_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_page_output.py b/src/gooey/types/lipsync_tts_page_output.py index e687416..ea1a0c9 100644 --- a/src/gooey/types/lipsync_tts_page_output.py +++ b/src/gooey/types/lipsync_tts_page_output.py @@ -10,6 +10,7 @@ class LipsyncTtsPageOutput(UniversalBaseModel): audio_url: typing.Optional[str] = None output_video: str + duration_sec: typing.Optional[float] = None called_functions: typing.Optional[typing.List[CalledFunctionResponse]] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py index f4f5293..e6d2f19 100644 --- a/src/gooey/types/lipsync_tts_page_request.py +++ b/src/gooey/types/lipsync_tts_page_request.py @@ -4,11 +4,11 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider -from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName -from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel +from .text_to_speech_providers import TextToSpeechProviders +from .open_ai_tts_voices import OpenAiTtsVoices +from .open_ai_tts_models import OpenAiTtsModels from .sad_talker_settings import SadTalkerSettings -from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel +from .lipsync_models import LipsyncModels from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -21,7 +21,7 @@ class LipsyncTtsPageRequest(UniversalBaseModel): """ text_prompt: str - tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None + tts_provider: typing.Optional[TextToSpeechProviders] = None uberduck_voice_name: typing.Optional[str] = None uberduck_speaking_rate: typing.Optional[float] = None google_voice_name: typing.Optional[str] = None @@ -41,15 +41,15 @@ class LipsyncTtsPageRequest(UniversalBaseModel): elevenlabs_style: typing.Optional[float] = None elevenlabs_speaker_boost: typing.Optional[bool] = None azure_voice_name: typing.Optional[str] = None - openai_voice_name: typing.Optional[LipsyncTtsPageRequestOpenaiVoiceName] = None - openai_tts_model: typing.Optional[LipsyncTtsPageRequestOpenaiTtsModel] = None + openai_voice_name: typing.Optional[OpenAiTtsVoices] = None + openai_tts_model: typing.Optional[OpenAiTtsModels] = None input_face: typing.Optional[str] = None face_padding_top: typing.Optional[int] = None face_padding_bottom: typing.Optional[int] = None face_padding_left: typing.Optional[int] = None face_padding_right: typing.Optional[int] = None sadtalker_settings: typing.Optional[SadTalkerSettings] = None - selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None + selected_model: typing.Optional[LipsyncModels] = None settings: typing.Optional[RunSettings] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py deleted file mode 100644 index 453ab4a..0000000 --- a/src/gooey/types/lipsync_tts_page_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncTtsPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py b/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py deleted file mode 100644 index 4873924..0000000 --- a/src/gooey/types/lipsync_tts_page_request_openai_voice_name.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncTtsPageRequestOpenaiVoiceName = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/gooey/types/lipsync_tts_page_request_selected_model.py b/src/gooey/types/lipsync_tts_page_request_selected_model.py deleted file mode 100644 index 538058b..0000000 --- a/src/gooey/types/lipsync_tts_page_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncTtsPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_page_request_tts_provider.py b/src/gooey/types/lipsync_tts_page_request_tts_provider.py deleted file mode 100644 index 7e73fda..0000000 --- a/src/gooey/types/lipsync_tts_page_request_tts_provider.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncTtsPageRequestTtsProvider = typing.Union[ - typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any -] diff --git a/src/gooey/types/lipsync_tts_request_openai_tts_model.py b/src/gooey/types/lipsync_tts_request_openai_tts_model.py deleted file mode 100644 index 510dcfb..0000000 --- a/src/gooey/types/lipsync_tts_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncTtsRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_request_openai_voice_name.py b/src/gooey/types/lipsync_tts_request_openai_voice_name.py deleted file mode 100644 index 7ea601b..0000000 --- a/src/gooey/types/lipsync_tts_request_openai_voice_name.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncTtsRequestOpenaiVoiceName = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/gooey/types/lipsync_tts_request_selected_model.py b/src/gooey/types/lipsync_tts_request_selected_model.py deleted file mode 100644 index 9ece5a9..0000000 --- a/src/gooey/types/lipsync_tts_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncTtsRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py index 50b5b72..54d23b5 100644 --- a/src/gooey/types/object_inpainting_page_request.py +++ b/src/gooey/types/object_inpainting_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel +from .inpainting_models import InpaintingModels from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -22,7 +22,7 @@ class ObjectInpaintingPageRequest(UniversalBaseModel): obj_pos_x: typing.Optional[float] = None obj_pos_y: typing.Optional[float] = None mask_threshold: typing.Optional[float] = None - selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None + selected_model: typing.Optional[InpaintingModels] = None negative_prompt: typing.Optional[str] = None num_outputs: typing.Optional[int] = None quality: typing.Optional[int] = None diff --git a/src/gooey/types/object_inpainting_page_request_selected_model.py b/src/gooey/types/object_inpainting_page_request_selected_model.py deleted file mode 100644 index 92f1302..0000000 --- a/src/gooey/types/object_inpainting_page_request_selected_model.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ObjectInpaintingPageRequestSelectedModel = typing.Union[ - typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any -] diff --git a/src/gooey/types/recipe_function_trigger.py b/src/gooey/types/open_ai_tts_models.py similarity index 60% rename from src/gooey/types/recipe_function_trigger.py rename to src/gooey/types/open_ai_tts_models.py index 7c7dead..f04ce37 100644 --- a/src/gooey/types/recipe_function_trigger.py +++ b/src/gooey/types/open_ai_tts_models.py @@ -2,4 +2,4 @@ import typing -RecipeFunctionTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] +OpenAiTtsModels = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/open_ai_tts_voices.py b/src/gooey/types/open_ai_tts_voices.py new file mode 100644 index 0000000..26d538c --- /dev/null +++ b/src/gooey/types/open_ai_tts_voices.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +OpenAiTtsVoices = typing.Union[typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any] diff --git a/src/gooey/types/portrait_request_selected_model.py b/src/gooey/types/portrait_request_selected_model.py deleted file mode 100644 index 6c4a5ce..0000000 --- a/src/gooey/types/portrait_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -PortraitRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/product_image_request_selected_model.py b/src/gooey/types/product_image_request_selected_model.py deleted file mode 100644 index f1ce039..0000000 --- a/src/gooey/types/product_image_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ProductImageRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py index 68f3730..60831cb 100644 --- a/src/gooey/types/qr_code_generator_page_request.py +++ b/src/gooey/types/qr_code_generator_page_request.py @@ -5,14 +5,9 @@ from .recipe_function import RecipeFunction import pydantic from .vcard import Vcard -from .qr_code_generator_page_request_image_prompt_controlnet_models_item import ( - QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, -) -from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel -from .qr_code_generator_page_request_selected_controlnet_model_item import ( - QrCodeGeneratorPageRequestSelectedControlnetModelItem, -) -from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler +from .control_net_models import ControlNetModels +from .text_to_image_models import TextToImageModels +from .schedulers import Schedulers from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -32,24 +27,20 @@ class QrCodeGeneratorPageRequest(UniversalBaseModel): text_prompt: str negative_prompt: typing.Optional[str] = None image_prompt: typing.Optional[str] = None - image_prompt_controlnet_models: typing.Optional[ - typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] - ] = None + image_prompt_controlnet_models: typing.Optional[typing.List[ControlNetModels]] = None image_prompt_strength: typing.Optional[float] = None image_prompt_scale: typing.Optional[float] = None image_prompt_pos_x: typing.Optional[float] = None image_prompt_pos_y: typing.Optional[float] = None - selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None - selected_controlnet_model: typing.Optional[typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] = ( - None - ) + selected_model: typing.Optional[TextToImageModels] = None + selected_controlnet_model: typing.Optional[typing.List[ControlNetModels]] = None output_width: typing.Optional[int] = None output_height: typing.Optional[int] = None guidance_scale: typing.Optional[float] = None controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None num_outputs: typing.Optional[int] = None quality: typing.Optional[int] = None - scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = None + scheduler: typing.Optional[Schedulers] = None seed: typing.Optional[int] = None obj_scale: typing.Optional[float] = None obj_pos_x: typing.Optional[float] = None diff --git a/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py deleted file mode 100644 index 508e7e9..0000000 --- a/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py +++ /dev/null @@ -1,20 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -QrCodeGeneratorPageRequestImagePromptControlnetModelsItem = typing.Union[ - typing.Literal[ - "sd_controlnet_canny", - "sd_controlnet_depth", - "sd_controlnet_hed", - "sd_controlnet_mlsd", - "sd_controlnet_normal", - "sd_controlnet_openpose", - "sd_controlnet_scribble", - "sd_controlnet_seg", - "sd_controlnet_tile", - "sd_controlnet_brightness", - "control_v1p_sd15_qrcode_monster_v2", - ], - typing.Any, -] diff --git a/src/gooey/types/qr_code_generator_page_request_scheduler.py b/src/gooey/types/qr_code_generator_page_request_scheduler.py deleted file mode 100644 index e30308a..0000000 --- a/src/gooey/types/qr_code_generator_page_request_scheduler.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -QrCodeGeneratorPageRequestScheduler = typing.Union[ - typing.Literal[ - "singlestep_dpm_solver", - "multistep_dpm_solver", - "dpm_sde", - "dpm_discrete", - "dpm_discrete_ancestral", - "unipc", - "lms_discrete", - "heun", - "euler", - "euler_ancestral", - "pndm", - "ddpm", - "ddim", - "deis", - ], - typing.Any, -] diff --git a/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py deleted file mode 100644 index c6f1967..0000000 --- a/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py +++ /dev/null @@ -1,20 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -QrCodeGeneratorPageRequestSelectedControlnetModelItem = typing.Union[ - typing.Literal[ - "sd_controlnet_canny", - "sd_controlnet_depth", - "sd_controlnet_hed", - "sd_controlnet_mlsd", - "sd_controlnet_normal", - "sd_controlnet_openpose", - "sd_controlnet_scribble", - "sd_controlnet_seg", - "sd_controlnet_tile", - "sd_controlnet_brightness", - "control_v1p_sd15_qrcode_monster_v2", - ], - typing.Any, -] diff --git a/src/gooey/types/qr_code_generator_page_request_selected_model.py b/src/gooey/types/qr_code_generator_page_request_selected_model.py deleted file mode 100644 index 97282cb..0000000 --- a/src/gooey/types/qr_code_generator_page_request_selected_model.py +++ /dev/null @@ -1,22 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -QrCodeGeneratorPageRequestSelectedModel = typing.Union[ - typing.Literal[ - "dream_shaper", - "dreamlike_2", - "sd_2", - "sd_1_5", - "dall_e", - "dall_e_3", - "openjourney_2", - "openjourney", - "analog_diffusion", - "protogen_5_3", - "jack_qiao", - "rodent_diffusion_1_5", - "deepfloyd_if", - ], - typing.Any, -] diff --git a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py deleted file mode 100644 index 3be2ab6..0000000 --- a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py +++ /dev/null @@ -1,20 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -QrCodeRequestImagePromptControlnetModelsItem = typing.Union[ - typing.Literal[ - "sd_controlnet_canny", - "sd_controlnet_depth", - "sd_controlnet_hed", - "sd_controlnet_mlsd", - "sd_controlnet_normal", - "sd_controlnet_openpose", - "sd_controlnet_scribble", - "sd_controlnet_seg", - "sd_controlnet_tile", - "sd_controlnet_brightness", - "control_v1p_sd15_qrcode_monster_v2", - ], - typing.Any, -] diff --git a/src/gooey/types/recipe_function.py b/src/gooey/types/recipe_function.py index 08bea99..d5e1244 100644 --- a/src/gooey/types/recipe_function.py +++ b/src/gooey/types/recipe_function.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. from ..core.pydantic_utilities import UniversalBaseModel -from .recipe_function_trigger import RecipeFunctionTrigger +from .function_trigger import FunctionTrigger import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing @@ -9,7 +9,7 @@ class RecipeFunction(UniversalBaseModel): url: str - trigger: RecipeFunctionTrigger = pydantic.Field() + trigger: FunctionTrigger = pydantic.Field() """ When to run this function. `pre` runs before the recipe, `post` runs after the recipe. """ diff --git a/src/gooey/types/related_qn_a_doc_page_request_citation_style.py b/src/gooey/types/related_qn_a_doc_page_request_citation_style.py deleted file mode 100644 index b98f002..0000000 --- a/src/gooey/types/related_qn_a_doc_page_request_citation_style.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -RelatedQnADocPageRequestCitationStyle = typing.Union[ - typing.Literal[ - "number", - "title", - "url", - "symbol", - "markdown", - "html", - "slack_mrkdwn", - "plaintext", - "number_markdown", - "number_html", - "number_slack_mrkdwn", - "number_plaintext", - "symbol_markdown", - "symbol_html", - "symbol_slack_mrkdwn", - "symbol_plaintext", - ], - typing.Any, -] diff --git a/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py b/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py deleted file mode 100644 index 680bbb5..0000000 --- a/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -RelatedQnADocPageRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py b/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py deleted file mode 100644 index c65a896..0000000 --- a/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -RelatedQnADocPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/related_qn_a_page_request_embedding_model.py b/src/gooey/types/related_qn_a_page_request_embedding_model.py deleted file mode 100644 index a591920..0000000 --- a/src/gooey/types/related_qn_a_page_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -RelatedQnAPageRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/related_qn_a_page_request_response_format_type.py b/src/gooey/types/related_qn_a_page_request_response_format_type.py deleted file mode 100644 index 7bada87..0000000 --- a/src/gooey/types/related_qn_a_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -RelatedQnAPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model.py b/src/gooey/types/remix_image_request_selected_controlnet_model.py deleted file mode 100644 index eea207f..0000000 --- a/src/gooey/types/remix_image_request_selected_controlnet_model.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing -from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem - -RemixImageRequestSelectedControlnetModel = typing.Union[ - typing.List[RemixImageRequestSelectedControlnetModelItem], - typing.Literal["sd_controlnet_canny"], - typing.Literal["sd_controlnet_depth"], - typing.Literal["sd_controlnet_hed"], - typing.Literal["sd_controlnet_mlsd"], - typing.Literal["sd_controlnet_normal"], - typing.Literal["sd_controlnet_openpose"], - typing.Literal["sd_controlnet_scribble"], - typing.Literal["sd_controlnet_seg"], - typing.Literal["sd_controlnet_tile"], - typing.Literal["sd_controlnet_brightness"], - typing.Literal["control_v1p_sd15_qrcode_monster_v2"], -] diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py deleted file mode 100644 index b4f3ff0..0000000 --- a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py +++ /dev/null @@ -1,20 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -RemixImageRequestSelectedControlnetModelItem = typing.Union[ - typing.Literal[ - "sd_controlnet_canny", - "sd_controlnet_depth", - "sd_controlnet_hed", - "sd_controlnet_mlsd", - "sd_controlnet_normal", - "sd_controlnet_openpose", - "sd_controlnet_scribble", - "sd_controlnet_seg", - "sd_controlnet_tile", - "sd_controlnet_brightness", - "control_v1p_sd15_qrcode_monster_v2", - ], - typing.Any, -] diff --git a/src/gooey/types/remove_background_request_selected_model.py b/src/gooey/types/remove_background_request_selected_model.py deleted file mode 100644 index c84f0e7..0000000 --- a/src/gooey/types/remove_background_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -RemoveBackgroundRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/response_format_type.py b/src/gooey/types/response_format_type.py new file mode 100644 index 0000000..f8216e9 --- /dev/null +++ b/src/gooey/types/response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/qr_code_request_scheduler.py b/src/gooey/types/schedulers.py similarity index 91% rename from src/gooey/types/qr_code_request_scheduler.py rename to src/gooey/types/schedulers.py index 890b204..d3b5398 100644 --- a/src/gooey/types/qr_code_request_scheduler.py +++ b/src/gooey/types/schedulers.py @@ -2,7 +2,7 @@ import typing -QrCodeRequestScheduler = typing.Union[ +Schedulers = typing.Union[ typing.Literal[ "singlestep_dpm_solver", "multistep_dpm_solver", diff --git a/src/gooey/types/selected_control_net_models.py b/src/gooey/types/selected_control_net_models.py new file mode 100644 index 0000000..26f9b23 --- /dev/null +++ b/src/gooey/types/selected_control_net_models.py @@ -0,0 +1,6 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing +from .control_net_models import ControlNetModels + +SelectedControlNetModels = typing.Union[typing.List[ControlNetModels], ControlNetModels] diff --git a/src/gooey/types/seo_summary_page_request_response_format_type.py b/src/gooey/types/seo_summary_page_request_response_format_type.py deleted file mode 100644 index 26f948b..0000000 --- a/src/gooey/types/seo_summary_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SeoSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/serp_search_location.py b/src/gooey/types/serp_search_locations.py similarity index 98% rename from src/gooey/types/serp_search_location.py rename to src/gooey/types/serp_search_locations.py index 9b64ad9..2d5144d 100644 --- a/src/gooey/types/serp_search_location.py +++ b/src/gooey/types/serp_search_locations.py @@ -2,7 +2,7 @@ import typing -SerpSearchLocation = typing.Union[ +SerpSearchLocations = typing.Union[ typing.Literal[ "af", "al", diff --git a/src/gooey/types/smart_gpt_page_request_response_format_type.py b/src/gooey/types/smart_gpt_page_request_response_format_type.py deleted file mode 100644 index 1eaf901..0000000 --- a/src/gooey/types/smart_gpt_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SmartGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/social_lookup_email_page_request_response_format_type.py b/src/gooey/types/social_lookup_email_page_request_response_format_type.py deleted file mode 100644 index 46c50db..0000000 --- a/src/gooey/types/social_lookup_email_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SocialLookupEmailPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/speech_recognition_request_output_format.py b/src/gooey/types/speech_recognition_request_output_format.py deleted file mode 100644 index 4d2cf2b..0000000 --- a/src/gooey/types/speech_recognition_request_output_format.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SpeechRecognitionRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/speech_recognition_request_selected_model.py b/src/gooey/types/speech_recognition_request_selected_model.py deleted file mode 100644 index 9d2d28f..0000000 --- a/src/gooey/types/speech_recognition_request_selected_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SpeechRecognitionRequestSelectedModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/speech_recognition_request_translation_model.py b/src/gooey/types/speech_recognition_request_translation_model.py deleted file mode 100644 index 886ab92..0000000 --- a/src/gooey/types/speech_recognition_request_translation_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SpeechRecognitionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/synthesize_data_request_response_format_type.py b/src/gooey/types/synthesize_data_request_response_format_type.py deleted file mode 100644 index 3ab37a9..0000000 --- a/src/gooey/types/synthesize_data_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SynthesizeDataRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/synthesize_data_request_selected_asr_model.py b/src/gooey/types/synthesize_data_request_selected_asr_model.py deleted file mode 100644 index 6c1bc21..0000000 --- a/src/gooey/types/synthesize_data_request_selected_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -SynthesizeDataRequestSelectedAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/text2audio_models.py b/src/gooey/types/text2audio_models.py new file mode 100644 index 0000000..b3eb9eb --- /dev/null +++ b/src/gooey/types/text2audio_models.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Text2AudioModels = typing.Literal["audio_ldm"] diff --git a/src/gooey/types/qr_code_request_selected_model.py b/src/gooey/types/text_to_image_models.py similarity index 90% rename from src/gooey/types/qr_code_request_selected_model.py rename to src/gooey/types/text_to_image_models.py index 7ea963c..fd17514 100644 --- a/src/gooey/types/qr_code_request_selected_model.py +++ b/src/gooey/types/text_to_image_models.py @@ -2,7 +2,7 @@ import typing -QrCodeRequestSelectedModel = typing.Union[ +TextToImageModels = typing.Union[ typing.Literal[ "dream_shaper", "dreamlike_2", diff --git a/src/gooey/types/text_to_speech_page_request_openai_tts_model.py b/src/gooey/types/text_to_speech_page_request_openai_tts_model.py deleted file mode 100644 index 685dfff..0000000 --- a/src/gooey/types/text_to_speech_page_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TextToSpeechPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/text_to_speech_page_request_openai_voice_name.py b/src/gooey/types/text_to_speech_page_request_openai_voice_name.py deleted file mode 100644 index efd862f..0000000 --- a/src/gooey/types/text_to_speech_page_request_openai_voice_name.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TextToSpeechPageRequestOpenaiVoiceName = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/gooey/types/text_to_speech_page_request_tts_provider.py b/src/gooey/types/text_to_speech_page_request_tts_provider.py deleted file mode 100644 index a6b8938..0000000 --- a/src/gooey/types/text_to_speech_page_request_tts_provider.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TextToSpeechPageRequestTtsProvider = typing.Union[ - typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any -] diff --git a/src/gooey/types/lipsync_tts_request_tts_provider.py b/src/gooey/types/text_to_speech_providers.py similarity index 80% rename from src/gooey/types/lipsync_tts_request_tts_provider.py rename to src/gooey/types/text_to_speech_providers.py index 1a23fe3..f86047f 100644 --- a/src/gooey/types/lipsync_tts_request_tts_provider.py +++ b/src/gooey/types/text_to_speech_providers.py @@ -2,6 +2,6 @@ import typing -LipsyncTtsRequestTtsProvider = typing.Union[ +TextToSpeechProviders = typing.Union[ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any ] diff --git a/src/gooey/types/translate_request_selected_model.py b/src/gooey/types/translate_request_selected_model.py deleted file mode 100644 index b774b56..0000000 --- a/src/gooey/types/translate_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TranslateRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/translation_models.py b/src/gooey/types/translation_models.py new file mode 100644 index 0000000..136ecb8 --- /dev/null +++ b/src/gooey/types/translation_models.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TranslationModels = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py index 9c033a6..6845f7f 100644 --- a/src/gooey/types/translation_page_request.py +++ b/src/gooey/types/translation_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .translation_page_request_selected_model import TranslationPageRequestSelectedModel +from .translation_models import TranslationModels from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -17,7 +17,7 @@ class TranslationPageRequest(UniversalBaseModel): """ texts: typing.Optional[typing.List[str]] = None - selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None + selected_model: typing.Optional[TranslationModels] = None translation_source: typing.Optional[str] = None translation_target: typing.Optional[str] = None glossary_document: typing.Optional[str] = None diff --git a/src/gooey/types/translation_page_request_selected_model.py b/src/gooey/types/translation_page_request_selected_model.py deleted file mode 100644 index 62ae9ab..0000000 --- a/src/gooey/types/translation_page_request_selected_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TranslationPageRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/upscale_request_selected_models_item.py b/src/gooey/types/upscaler_models.py similarity index 78% rename from src/gooey/types/upscale_request_selected_models_item.py rename to src/gooey/types/upscaler_models.py index 1a8362e..314c03a 100644 --- a/src/gooey/types/upscale_request_selected_models_item.py +++ b/src/gooey/types/upscaler_models.py @@ -2,6 +2,6 @@ import typing -UpscaleRequestSelectedModelsItem = typing.Union[ +UpscalerModels = typing.Union[ typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any ] diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py index 6fb8b5e..3489fad 100644 --- a/src/gooey/types/video_bots_page_request.py +++ b/src/gooey/types/video_bots_page_request.py @@ -6,16 +6,16 @@ import pydantic from .conversation_entry import ConversationEntry from .large_language_models import LargeLanguageModels -from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel -from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle -from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel -from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel -from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel +from .embedding_models import EmbeddingModels +from .citation_styles import CitationStyles +from .asr_models import AsrModels +from .translation_models import TranslationModels +from .lipsync_models import LipsyncModels from .llm_tools import LlmTools -from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType -from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider -from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName -from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel +from .response_format_type import ResponseFormatType +from .text_to_speech_providers import TextToSpeechProviders +from .open_ai_tts_voices import OpenAiTtsVoices +from .open_ai_tts_models import OpenAiTtsModels from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -52,16 +52,16 @@ class VideoBotsPageRequest(UniversalBaseModel): max_references: typing.Optional[int] = None max_context_words: typing.Optional[int] = None scroll_jump: typing.Optional[int] = None - embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None + embedding_model: typing.Optional[EmbeddingModels] = None dense_weight: typing.Optional[float] = pydantic.Field(default=None) """ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. """ - citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None + citation_style: typing.Optional[CitationStyles] = None use_url_shortener: typing.Optional[bool] = None - asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic.Field(default=None) + asr_model: typing.Optional[AsrModels] = pydantic.Field(default=None) """ Choose a model to transcribe incoming audio messages to text. """ @@ -71,7 +71,7 @@ class VideoBotsPageRequest(UniversalBaseModel): Choose a language to transcribe incoming audio messages to text. """ - translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None + translation_model: typing.Optional[TranslationModels] = None user_language: typing.Optional[str] = pydantic.Field(default=None) """ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -79,7 +79,7 @@ class VideoBotsPageRequest(UniversalBaseModel): input_glossary_document: typing.Optional[str] = None output_glossary_document: typing.Optional[str] = None - lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None + lipsync_model: typing.Optional[LipsyncModels] = None tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None) """ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -90,8 +90,8 @@ class VideoBotsPageRequest(UniversalBaseModel): quality: typing.Optional[float] = None max_tokens: typing.Optional[int] = None sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = None - tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None + response_format_type: typing.Optional[ResponseFormatType] = None + tts_provider: typing.Optional[TextToSpeechProviders] = None uberduck_voice_name: typing.Optional[str] = None uberduck_speaking_rate: typing.Optional[float] = None google_voice_name: typing.Optional[str] = None @@ -111,8 +111,8 @@ class VideoBotsPageRequest(UniversalBaseModel): elevenlabs_style: typing.Optional[float] = None elevenlabs_speaker_boost: typing.Optional[bool] = None azure_voice_name: typing.Optional[str] = None - openai_voice_name: typing.Optional[VideoBotsPageRequestOpenaiVoiceName] = None - openai_tts_model: typing.Optional[VideoBotsPageRequestOpenaiTtsModel] = None + openai_voice_name: typing.Optional[OpenAiTtsVoices] = None + openai_tts_model: typing.Optional[OpenAiTtsModels] = None input_face: typing.Optional[str] = None face_padding_top: typing.Optional[int] = None face_padding_bottom: typing.Optional[int] = None diff --git a/src/gooey/types/video_bots_page_request_asr_model.py b/src/gooey/types/video_bots_page_request_asr_model.py deleted file mode 100644 index 7db13bc..0000000 --- a/src/gooey/types/video_bots_page_request_asr_model.py +++ /dev/null @@ -1,23 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestAsrModel = typing.Union[ - typing.Literal[ - "whisper_large_v2", - "whisper_large_v3", - "whisper_hindi_large_v2", - "whisper_telugu_large_v2", - "nemo_english", - "nemo_hindi", - "vakyansh_bhojpuri", - "gcp_v1", - "usm", - "deepgram", - "azure", - "seamless_m4t_v2", - "mms_1b_all", - "seamless_m4t", - ], - typing.Any, -] diff --git a/src/gooey/types/video_bots_page_request_citation_style.py b/src/gooey/types/video_bots_page_request_citation_style.py deleted file mode 100644 index dc3630b..0000000 --- a/src/gooey/types/video_bots_page_request_citation_style.py +++ /dev/null @@ -1,25 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestCitationStyle = typing.Union[ - typing.Literal[ - "number", - "title", - "url", - "symbol", - "markdown", - "html", - "slack_mrkdwn", - "plaintext", - "number_markdown", - "number_html", - "number_slack_mrkdwn", - "number_plaintext", - "symbol_markdown", - "symbol_html", - "symbol_slack_mrkdwn", - "symbol_plaintext", - ], - typing.Any, -] diff --git a/src/gooey/types/video_bots_page_request_embedding_model.py b/src/gooey/types/video_bots_page_request_embedding_model.py deleted file mode 100644 index 19c8972..0000000 --- a/src/gooey/types/video_bots_page_request_embedding_model.py +++ /dev/null @@ -1,18 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestEmbeddingModel = typing.Union[ - typing.Literal[ - "openai_3_large", - "openai_3_small", - "openai_ada_2", - "e5_large_v2", - "e5_base_v2", - "multilingual_e5_base", - "multilingual_e5_large", - "gte_large", - "gte_base", - ], - typing.Any, -] diff --git a/src/gooey/types/video_bots_page_request_functions_item.py b/src/gooey/types/video_bots_page_request_functions_item.py index 5803c05..40e7e4d 100644 --- a/src/gooey/types/video_bots_page_request_functions_item.py +++ b/src/gooey/types/video_bots_page_request_functions_item.py @@ -1,7 +1,7 @@ # This file was auto-generated by Fern from our API Definition. from ..core.pydantic_utilities import UniversalBaseModel -from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger +from .function_trigger import FunctionTrigger import pydantic from ..core.pydantic_utilities import IS_PYDANTIC_V2 import typing @@ -9,7 +9,7 @@ class VideoBotsPageRequestFunctionsItem(UniversalBaseModel): url: str - trigger: VideoBotsPageRequestFunctionsItemTrigger = pydantic.Field() + trigger: FunctionTrigger = pydantic.Field() """ When to run this function. `pre` runs before the recipe, `post` runs after the recipe. """ diff --git a/src/gooey/types/video_bots_page_request_functions_item_trigger.py b/src/gooey/types/video_bots_page_request_functions_item_trigger.py deleted file mode 100644 index b3c2078..0000000 --- a/src/gooey/types/video_bots_page_request_functions_item_trigger.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestFunctionsItemTrigger = typing.Union[typing.Literal["pre", "post"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request_lipsync_model.py b/src/gooey/types/video_bots_page_request_lipsync_model.py deleted file mode 100644 index 3bb98e0..0000000 --- a/src/gooey/types/video_bots_page_request_lipsync_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request_openai_tts_model.py b/src/gooey/types/video_bots_page_request_openai_tts_model.py deleted file mode 100644 index 1df5de0..0000000 --- a/src/gooey/types/video_bots_page_request_openai_tts_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestOpenaiTtsModel = typing.Union[typing.Literal["tts_1", "tts_1_hd"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request_openai_voice_name.py b/src/gooey/types/video_bots_page_request_openai_voice_name.py deleted file mode 100644 index a08f96c..0000000 --- a/src/gooey/types/video_bots_page_request_openai_voice_name.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestOpenaiVoiceName = typing.Union[ - typing.Literal["alloy", "echo", "fable", "onyx", "nova", "shimmer"], typing.Any -] diff --git a/src/gooey/types/video_bots_page_request_response_format_type.py b/src/gooey/types/video_bots_page_request_response_format_type.py deleted file mode 100644 index 25cc8f1..0000000 --- a/src/gooey/types/video_bots_page_request_response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request_translation_model.py b/src/gooey/types/video_bots_page_request_translation_model.py deleted file mode 100644 index 0373c0c..0000000 --- a/src/gooey/types/video_bots_page_request_translation_model.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request_tts_provider.py b/src/gooey/types/video_bots_page_request_tts_provider.py deleted file mode 100644 index 3fc8d0a..0000000 --- a/src/gooey/types/video_bots_page_request_tts_provider.py +++ /dev/null @@ -1,7 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -VideoBotsPageRequestTtsProvider = typing.Union[ - typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any -]