diff --git a/pyproject.toml b/pyproject.toml index 318af9f..d66b377 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "gooeyai" -version = "0.0.1-beta25" +version = "0.0.1-beta26" description = "" readme = "README.md" authors = [] diff --git a/reference.md b/reference.md index ad2abad..a067ff0 100644 --- a/reference.md +++ b/reference.md @@ -80,7 +80,7 @@ client.animate(
-**selected_model:** `typing.Optional[AnimationModels]` +**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]`
@@ -308,7 +308,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**image_prompt_controlnet_models:** `typing.Optional[typing.List[ControlNetModels]]` +**image_prompt_controlnet_models:** `typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]]`
@@ -348,7 +348,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[TextToImageModels]` +**selected_model:** `typing.Optional[QrCodeRequestSelectedModel]`
@@ -356,7 +356,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_controlnet_model:** `typing.Optional[typing.List[ControlNetModels]]` +**selected_controlnet_model:** `typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]]`
@@ -412,7 +412,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**scheduler:** `typing.Optional[Schedulers]` +**scheduler:** `typing.Optional[QrCodeRequestScheduler]`
@@ -605,7 +605,7 @@ client.seo_people_also_ask(
-**embedding_model:** `typing.Optional[EmbeddingModels]` +**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]`
@@ -666,7 +666,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[RelatedQnAPageRequestResponseFormatType]`
@@ -674,7 +674,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_location:** `typing.Optional[SerpSearchLocations]` +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -893,7 +893,7 @@ client.seo_content(
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[SeoSummaryPageRequestResponseFormatType]`
@@ -901,7 +901,7 @@ client.seo_content(
-**serp_search_location:** `typing.Optional[SerpSearchLocations]` +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -1086,7 +1086,7 @@ client.web_search_llm(
-**embedding_model:** `typing.Optional[EmbeddingModels]` +**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]`
@@ -1147,7 +1147,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[GoogleGptPageRequestResponseFormatType]`
@@ -1155,7 +1155,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_location:** `typing.Optional[SerpSearchLocations]` +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -1331,7 +1331,7 @@ client.personalize_email(
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[SocialLookupEmailPageRequestResponseFormatType]`
@@ -1647,7 +1647,7 @@ Aggregate using one or more operations. Uses [pandas](https://pandas.pydata.org/
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[BulkEvalPageRequestResponseFormatType]`
@@ -1753,7 +1753,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_asr_model:** `typing.Optional[AsrModels]` +**selected_asr_model:** `typing.Optional[SynthesizeDataRequestSelectedAsrModel]`
@@ -1835,7 +1835,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[SynthesizeDataRequestResponseFormatType]`
@@ -1977,7 +1977,7 @@ client.llm()
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[CompareLlmPageRequestResponseFormatType]`
@@ -2121,7 +2121,7 @@ client.rag(
-**embedding_model:** `typing.Optional[EmbeddingModels]` +**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]`
@@ -2166,7 +2166,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[CitationStyles]` +**citation_style:** `typing.Optional[DocSearchPageRequestCitationStyle]`
@@ -2214,7 +2214,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[DocSearchPageRequestResponseFormatType]`
@@ -2382,7 +2382,7 @@ client.smart_gpt(
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[SmartGptPageRequestResponseFormatType]`
@@ -2502,7 +2502,7 @@ typing.List[core.File]` — See core.File for more documentation
-**chain_type:** `typing.Optional[CombineDocumentsChains]` +**chain_type:** `typing.Optional[typing.Literal["map_reduce"]]`
@@ -2510,7 +2510,7 @@ typing.List[core.File]` — See core.File for more documentation
-**selected_asr_model:** `typing.Optional[AsrModels]` +**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]`
@@ -2566,7 +2566,7 @@ typing.List[core.File]` — See core.File for more documentation
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[DocSummaryRequestResponseFormatType]`
@@ -2780,7 +2780,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[LipsyncModels]` +**selected_model:** `typing.Optional[LipsyncRequestSelectedModel]`
@@ -2886,7 +2886,7 @@ client.lipsync_tts(
-**tts_provider:** `typing.Optional[TextToSpeechProviders]` +**tts_provider:** `typing.Optional[LipsyncTtsRequestTtsProvider]`
@@ -3080,7 +3080,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[LipsyncModels]` +**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]`
@@ -3176,7 +3176,7 @@ client.text_to_speech(
-**tts_provider:** `typing.Optional[TextToSpeechProviders]` +**tts_provider:** `typing.Optional[TextToSpeechPageRequestTtsProvider]`
@@ -3408,7 +3408,7 @@ typing.List[core.File]` — See core.File for more documentation
-**selected_model:** `typing.Optional[AsrModels]` +**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]`
@@ -3424,7 +3424,7 @@ typing.List[core.File]` — See core.File for more documentation
-**translation_model:** `typing.Optional[TranslationModels]` +**translation_model:** `typing.Optional[SpeechRecognitionRequestTranslationModel]`
@@ -3432,7 +3432,7 @@ typing.List[core.File]` — See core.File for more documentation
-**output_format:** `typing.Optional[AsrOutputFormat]` +**output_format:** `typing.Optional[SpeechRecognitionRequestOutputFormat]`
@@ -3618,7 +3618,7 @@ client.text_to_music(
-**selected_models:** `typing.Optional[typing.Sequence[Text2AudioModels]]` +**selected_models:** `typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]]`
@@ -3712,7 +3712,7 @@ client.translate()
-**selected_model:** `typing.Optional[TranslationModels]` +**selected_model:** `typing.Optional[TranslateRequestSelectedModel]`
@@ -3842,7 +3842,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[ImageToImageModels]` +**selected_model:** `typing.Optional[RemixImageRequestSelectedModel]`
@@ -4106,7 +4106,7 @@ client.text_to_image(
-**selected_models:** `typing.Optional[typing.Sequence[TextToImageModels]]` +**selected_models:** `typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]]`
@@ -4114,7 +4114,7 @@ client.text_to_image(
-**scheduler:** `typing.Optional[Schedulers]` +**scheduler:** `typing.Optional[CompareText2ImgPageRequestScheduler]`
@@ -4268,7 +4268,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[InpaintingModels]` +**selected_model:** `typing.Optional[ProductImageRequestSelectedModel]`
@@ -4462,7 +4462,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[InpaintingModels]` +**selected_model:** `typing.Optional[PortraitRequestSelectedModel]`
@@ -4663,7 +4663,7 @@ client.image_from_email(
-**selected_model:** `typing.Optional[InpaintingModels]` +**selected_model:** `typing.Optional[EmailFaceInpaintingPageRequestSelectedModel]`
@@ -4896,7 +4896,7 @@ client.image_from_web_search(
-**serp_search_location:** `typing.Optional[SerpSearchLocations]` +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -4912,7 +4912,7 @@ client.image_from_web_search(
-**selected_model:** `typing.Optional[ImageToImageModels]` +**selected_model:** `typing.Optional[GoogleImageGenPageRequestSelectedModel]`
@@ -5072,7 +5072,7 @@ core.File` — See core.File for more documentation
-**selected_model:** `typing.Optional[ImageSegmentationModels]` +**selected_model:** `typing.Optional[RemoveBackgroundRequestSelectedModel]`
@@ -5236,7 +5236,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**selected_models:** `typing.Optional[typing.List[UpscalerModels]]` +**selected_models:** `typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]]`
@@ -5340,7 +5340,7 @@ client.embed(
-**selected_model:** `typing.Optional[EmbeddingModels]` +**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]`
@@ -5484,7 +5484,7 @@ client.seo_people_also_ask_doc(
-**embedding_model:** `typing.Optional[EmbeddingModels]` +**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]`
@@ -5529,7 +5529,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[CitationStyles]` +**citation_style:** `typing.Optional[RelatedQnADocPageRequestCitationStyle]`
@@ -5577,7 +5577,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[RelatedQnADocPageRequestResponseFormatType]`
@@ -5585,7 +5585,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**serp_search_location:** `typing.Optional[SerpSearchLocations]` +**serp_search_location:** `typing.Optional[SerpSearchLocation]`
@@ -5876,7 +5876,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-**embedding_model:** `typing.Optional[EmbeddingModels]` +**embedding_model:** `typing.Optional[CopilotCompletionRequestEmbeddingModel]`
@@ -5897,7 +5897,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**citation_style:** `typing.Optional[CitationStyles]` +**citation_style:** `typing.Optional[CopilotCompletionRequestCitationStyle]`
@@ -5913,7 +5913,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**asr_model:** `typing.Optional[AsrModels]` — Choose a model to transcribe incoming audio messages to text. +**asr_model:** `typing.Optional[CopilotCompletionRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
@@ -5929,7 +5929,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-**translation_model:** `typing.Optional[TranslationModels]` +**translation_model:** `typing.Optional[CopilotCompletionRequestTranslationModel]`
@@ -5965,7 +5965,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**lipsync_model:** `typing.Optional[LipsyncModels]` +**lipsync_model:** `typing.Optional[CopilotCompletionRequestLipsyncModel]`
@@ -6021,7 +6021,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**response_format_type:** `typing.Optional[ResponseFormatType]` +**response_format_type:** `typing.Optional[CopilotCompletionRequestResponseFormatType]`
@@ -6029,7 +6029,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-**tts_provider:** `typing.Optional[TextToSpeechProviders]` +**tts_provider:** `typing.Optional[CopilotCompletionRequestTtsProvider]`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py index b9e8980..86305e9 100644 --- a/src/gooey/__init__.py +++ b/src/gooey/__init__.py @@ -5,21 +5,22 @@ AggFunctionFunction, AggFunctionResult, AggFunctionResultFunction, - AnimationModels, AnimationPrompt, AsrChunk, - AsrModels, - AsrOutputFormat, AsrOutputJson, AsrPageOutput, AsrPageOutputOutputTextItem, AsrPageRequest, + AsrPageRequestOutputFormat, + AsrPageRequestSelectedModel, + AsrPageRequestTranslationModel, AsrPageStatusResponse, AsyncApiResponseModelV3, BalanceResponse, BotBroadcastFilters, BotBroadcastRequestModel, BulkEvalPageOutput, + BulkEvalPageRequestResponseFormatType, BulkEvalPageStatusResponse, BulkRunnerPageOutput, BulkRunnerPageRequest, @@ -32,18 +33,19 @@ ChyronPlantPageOutput, ChyronPlantPageRequest, ChyronPlantPageStatusResponse, - CitationStyles, - CombineDocumentsChains, CompareLlmPageOutput, + CompareLlmPageRequestResponseFormatType, CompareLlmPageStatusResponse, CompareText2ImgPageOutput, + CompareText2ImgPageRequestScheduler, + CompareText2ImgPageRequestSelectedModelsItem, CompareText2ImgPageStatusResponse, CompareUpscalerPageOutput, CompareUpscalerPageRequest, + CompareUpscalerPageRequestSelectedModelsItem, CompareUpscalerPageStatusResponse, ConsoleLogs, ConsoleLogsLevel, - ControlNetModels, ConversationEntry, ConversationEntryContent, ConversationEntryContentItem, @@ -52,28 +54,47 @@ ConversationEntryRole, ConversationStart, CreateStreamRequest, + CreateStreamRequestAsrModel, + CreateStreamRequestCitationStyle, + CreateStreamRequestEmbeddingModel, + CreateStreamRequestLipsyncModel, CreateStreamRequestOpenaiTtsModel, CreateStreamRequestOpenaiVoiceName, + CreateStreamRequestResponseFormatType, + CreateStreamRequestTranslationModel, + CreateStreamRequestTtsProvider, CreateStreamResponse, DeforumSdPageOutput, + DeforumSdPageRequestSelectedModel, DeforumSdPageStatusResponse, DocExtractPageOutput, DocExtractPageRequest, + DocExtractPageRequestResponseFormatType, + DocExtractPageRequestSelectedAsrModel, DocExtractPageStatusResponse, DocSearchPageOutput, + DocSearchPageRequestCitationStyle, + DocSearchPageRequestEmbeddingModel, DocSearchPageRequestKeywordQuery, + DocSearchPageRequestResponseFormatType, DocSearchPageStatusResponse, DocSummaryPageOutput, DocSummaryPageRequest, + DocSummaryPageRequestResponseFormatType, + DocSummaryPageRequestSelectedAsrModel, DocSummaryPageStatusResponse, + DocSummaryRequestResponseFormatType, + DocSummaryRequestSelectedAsrModel, EmailFaceInpaintingPageOutput, + EmailFaceInpaintingPageRequestSelectedModel, EmailFaceInpaintingPageStatusResponse, - EmbeddingModels, EmbeddingsPageOutput, + EmbeddingsPageRequestSelectedModel, EmbeddingsPageStatusResponse, EvalPrompt, FaceInpaintingPageOutput, FaceInpaintingPageRequest, + FaceInpaintingPageRequestSelectedModel, FaceInpaintingPageStatusResponse, FinalResponse, FunctionsPageOutput, @@ -81,60 +102,86 @@ GenericErrorResponse, GenericErrorResponseDetail, GoogleGptPageOutput, + GoogleGptPageRequestEmbeddingModel, + GoogleGptPageRequestResponseFormatType, GoogleGptPageStatusResponse, GoogleImageGenPageOutput, + GoogleImageGenPageRequestSelectedModel, GoogleImageGenPageStatusResponse, HttpValidationError, - ImageSegmentationModels, ImageSegmentationPageOutput, ImageSegmentationPageRequest, + ImageSegmentationPageRequestSelectedModel, ImageSegmentationPageStatusResponse, - ImageToImageModels, ImageUrl, ImageUrlDetail, Img2ImgPageOutput, Img2ImgPageRequest, Img2ImgPageRequestSelectedControlnetModel, + Img2ImgPageRequestSelectedControlnetModelItem, + Img2ImgPageRequestSelectedModel, Img2ImgPageStatusResponse, - InpaintingModels, LargeLanguageModels, LetterWriterPageOutput, LetterWriterPageRequest, LetterWriterPageStatusResponse, - LipsyncModels, LipsyncPageOutput, LipsyncPageRequest, + LipsyncPageRequestSelectedModel, LipsyncPageStatusResponse, + LipsyncRequestSelectedModel, LipsyncTtsPageOutput, LipsyncTtsPageRequest, LipsyncTtsPageRequestOpenaiTtsModel, LipsyncTtsPageRequestOpenaiVoiceName, + LipsyncTtsPageRequestSelectedModel, + LipsyncTtsPageRequestTtsProvider, LipsyncTtsPageStatusResponse, LipsyncTtsRequestOpenaiTtsModel, LipsyncTtsRequestOpenaiVoiceName, + LipsyncTtsRequestSelectedModel, + LipsyncTtsRequestTtsProvider, LlmTools, MessagePart, ObjectInpaintingPageOutput, ObjectInpaintingPageRequest, + ObjectInpaintingPageRequestSelectedModel, ObjectInpaintingPageStatusResponse, + PortraitRequestSelectedModel, + ProductImageRequestSelectedModel, PromptTreeNode, PromptTreeNodePrompt, QrCodeGeneratorPageOutput, QrCodeGeneratorPageRequest, + QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, + QrCodeGeneratorPageRequestScheduler, + QrCodeGeneratorPageRequestSelectedControlnetModelItem, + QrCodeGeneratorPageRequestSelectedModel, QrCodeGeneratorPageStatusResponse, + QrCodeRequestImagePromptControlnetModelsItem, + QrCodeRequestScheduler, + QrCodeRequestSelectedControlnetModelItem, + QrCodeRequestSelectedModel, RecipeFunction, RecipeFunctionTrigger, RecipeRunState, RelatedDocSearchResponse, RelatedGoogleGptResponse, RelatedQnADocPageOutput, + RelatedQnADocPageRequestCitationStyle, + RelatedQnADocPageRequestEmbeddingModel, RelatedQnADocPageRequestKeywordQuery, + RelatedQnADocPageRequestResponseFormatType, RelatedQnADocPageStatusResponse, RelatedQnAPageOutput, + RelatedQnAPageRequestEmbeddingModel, + RelatedQnAPageRequestResponseFormatType, RelatedQnAPageStatusResponse, RemixImageRequestSelectedControlnetModel, + RemixImageRequestSelectedControlnetModelItem, + RemixImageRequestSelectedModel, + RemoveBackgroundRequestSelectedModel, ReplyButton, - ResponseFormatType, ResponseModel, ResponseModelFinalKeywordQuery, ResponseModelFinalPrompt, @@ -143,32 +190,38 @@ RunStart, SadTalkerSettings, SadTalkerSettingsPreprocess, - Schedulers, SearchReference, SeoSummaryPageOutput, + SeoSummaryPageRequestResponseFormatType, SeoSummaryPageStatusResponse, - SerpSearchLocations, + SerpSearchLocation, SerpSearchType, SmartGptPageOutput, + SmartGptPageRequestResponseFormatType, SmartGptPageStatusResponse, SocialLookupEmailPageOutput, + SocialLookupEmailPageRequestResponseFormatType, SocialLookupEmailPageStatusResponse, + SpeechRecognitionRequestOutputFormat, + SpeechRecognitionRequestSelectedModel, + SpeechRecognitionRequestTranslationModel, StreamError, - Text2AudioModels, + SynthesizeDataRequestResponseFormatType, + SynthesizeDataRequestSelectedAsrModel, Text2AudioPageOutput, Text2AudioPageStatusResponse, - TextToImageModels, TextToSpeechPageOutput, TextToSpeechPageRequestOpenaiTtsModel, TextToSpeechPageRequestOpenaiVoiceName, + TextToSpeechPageRequestTtsProvider, TextToSpeechPageStatusResponse, - TextToSpeechProviders, TrainingDataModel, - TranslationModels, + TranslateRequestSelectedModel, TranslationPageOutput, TranslationPageRequest, + TranslationPageRequestSelectedModel, TranslationPageStatusResponse, - UpscalerModels, + UpscaleRequestSelectedModelsItem, ValidationError, ValidationErrorLocItem, Vcard, @@ -176,24 +229,38 @@ VideoBotsPageOutputFinalKeywordQuery, VideoBotsPageOutputFinalPrompt, VideoBotsPageRequest, + VideoBotsPageRequestAsrModel, + VideoBotsPageRequestCitationStyle, + VideoBotsPageRequestEmbeddingModel, VideoBotsPageRequestFunctionsItem, VideoBotsPageRequestFunctionsItemTrigger, + VideoBotsPageRequestLipsyncModel, VideoBotsPageRequestOpenaiTtsModel, VideoBotsPageRequestOpenaiVoiceName, + VideoBotsPageRequestResponseFormatType, VideoBotsPageRequestSadtalkerSettings, VideoBotsPageRequestSadtalkerSettingsPreprocess, + VideoBotsPageRequestTranslationModel, + VideoBotsPageRequestTtsProvider, VideoBotsPageStatusResponse, ) from .errors import PaymentRequiredError, TooManyRequestsError, UnprocessableEntityError from . import copilot from .client import AsyncGooey, Gooey from .copilot import ( + CopilotCompletionRequestAsrModel, + CopilotCompletionRequestCitationStyle, + CopilotCompletionRequestEmbeddingModel, CopilotCompletionRequestFunctionsItem, CopilotCompletionRequestFunctionsItemTrigger, + CopilotCompletionRequestLipsyncModel, CopilotCompletionRequestOpenaiTtsModel, CopilotCompletionRequestOpenaiVoiceName, + CopilotCompletionRequestResponseFormatType, CopilotCompletionRequestSadtalkerSettings, CopilotCompletionRequestSadtalkerSettingsPreprocess, + CopilotCompletionRequestTranslationModel, + CopilotCompletionRequestTtsProvider, ) from .environment import GooeyEnvironment from .version import __version__ @@ -203,15 +270,15 @@ "AggFunctionFunction", "AggFunctionResult", "AggFunctionResultFunction", - "AnimationModels", "AnimationPrompt", "AsrChunk", - "AsrModels", - "AsrOutputFormat", "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", "AsrPageRequest", + "AsrPageRequestOutputFormat", + "AsrPageRequestSelectedModel", + "AsrPageRequestTranslationModel", "AsrPageStatusResponse", "AsyncApiResponseModelV3", "AsyncGooey", @@ -219,6 +286,7 @@ "BotBroadcastFilters", "BotBroadcastRequestModel", "BulkEvalPageOutput", + "BulkEvalPageRequestResponseFormatType", "BulkEvalPageStatusResponse", "BulkRunnerPageOutput", "BulkRunnerPageRequest", @@ -231,18 +299,19 @@ "ChyronPlantPageOutput", "ChyronPlantPageRequest", "ChyronPlantPageStatusResponse", - "CitationStyles", - "CombineDocumentsChains", "CompareLlmPageOutput", + "CompareLlmPageRequestResponseFormatType", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", + "CompareText2ImgPageRequestScheduler", + "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", "CompareUpscalerPageRequest", + "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageStatusResponse", "ConsoleLogs", "ConsoleLogsLevel", - "ControlNetModels", "ConversationEntry", "ConversationEntryContent", "ConversationEntryContentItem", @@ -250,35 +319,61 @@ "ConversationEntryContentItem_Text", "ConversationEntryRole", "ConversationStart", + "CopilotCompletionRequestAsrModel", + "CopilotCompletionRequestCitationStyle", + "CopilotCompletionRequestEmbeddingModel", "CopilotCompletionRequestFunctionsItem", "CopilotCompletionRequestFunctionsItemTrigger", + "CopilotCompletionRequestLipsyncModel", "CopilotCompletionRequestOpenaiTtsModel", "CopilotCompletionRequestOpenaiVoiceName", + "CopilotCompletionRequestResponseFormatType", "CopilotCompletionRequestSadtalkerSettings", "CopilotCompletionRequestSadtalkerSettingsPreprocess", + "CopilotCompletionRequestTranslationModel", + "CopilotCompletionRequestTtsProvider", "CreateStreamRequest", + "CreateStreamRequestAsrModel", + "CreateStreamRequestCitationStyle", + "CreateStreamRequestEmbeddingModel", + "CreateStreamRequestLipsyncModel", "CreateStreamRequestOpenaiTtsModel", "CreateStreamRequestOpenaiVoiceName", + "CreateStreamRequestResponseFormatType", + "CreateStreamRequestTranslationModel", + "CreateStreamRequestTtsProvider", "CreateStreamResponse", "DeforumSdPageOutput", + "DeforumSdPageRequestSelectedModel", "DeforumSdPageStatusResponse", "DocExtractPageOutput", "DocExtractPageRequest", + "DocExtractPageRequestResponseFormatType", + "DocExtractPageRequestSelectedAsrModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", + "DocSearchPageRequestCitationStyle", + "DocSearchPageRequestEmbeddingModel", "DocSearchPageRequestKeywordQuery", + "DocSearchPageRequestResponseFormatType", "DocSearchPageStatusResponse", "DocSummaryPageOutput", "DocSummaryPageRequest", + "DocSummaryPageRequestResponseFormatType", + "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageStatusResponse", + "DocSummaryRequestResponseFormatType", + "DocSummaryRequestSelectedAsrModel", "EmailFaceInpaintingPageOutput", + "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", - "EmbeddingModels", "EmbeddingsPageOutput", + "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", "FaceInpaintingPageRequest", + "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageStatusResponse", "FinalResponse", "FunctionsPageOutput", @@ -288,61 +383,87 @@ "Gooey", "GooeyEnvironment", "GoogleGptPageOutput", + "GoogleGptPageRequestEmbeddingModel", + "GoogleGptPageRequestResponseFormatType", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", + "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageStatusResponse", "HttpValidationError", - "ImageSegmentationModels", "ImageSegmentationPageOutput", "ImageSegmentationPageRequest", + "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageStatusResponse", - "ImageToImageModels", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", "Img2ImgPageRequest", "Img2ImgPageRequestSelectedControlnetModel", + "Img2ImgPageRequestSelectedControlnetModelItem", + "Img2ImgPageRequestSelectedModel", "Img2ImgPageStatusResponse", - "InpaintingModels", "LargeLanguageModels", "LetterWriterPageOutput", "LetterWriterPageRequest", "LetterWriterPageStatusResponse", - "LipsyncModels", "LipsyncPageOutput", "LipsyncPageRequest", + "LipsyncPageRequestSelectedModel", "LipsyncPageStatusResponse", + "LipsyncRequestSelectedModel", "LipsyncTtsPageOutput", "LipsyncTtsPageRequest", "LipsyncTtsPageRequestOpenaiTtsModel", "LipsyncTtsPageRequestOpenaiVoiceName", + "LipsyncTtsPageRequestSelectedModel", + "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", "LipsyncTtsRequestOpenaiTtsModel", "LipsyncTtsRequestOpenaiVoiceName", + "LipsyncTtsRequestSelectedModel", + "LipsyncTtsRequestTtsProvider", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", "ObjectInpaintingPageRequest", + "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", "PaymentRequiredError", + "PortraitRequestSelectedModel", + "ProductImageRequestSelectedModel", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", "QrCodeGeneratorPageRequest", + "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", + "QrCodeGeneratorPageRequestScheduler", + "QrCodeGeneratorPageRequestSelectedControlnetModelItem", + "QrCodeGeneratorPageRequestSelectedModel", "QrCodeGeneratorPageStatusResponse", + "QrCodeRequestImagePromptControlnetModelsItem", + "QrCodeRequestScheduler", + "QrCodeRequestSelectedControlnetModelItem", + "QrCodeRequestSelectedModel", "RecipeFunction", "RecipeFunctionTrigger", "RecipeRunState", "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", + "RelatedQnADocPageRequestCitationStyle", + "RelatedQnADocPageRequestEmbeddingModel", "RelatedQnADocPageRequestKeywordQuery", + "RelatedQnADocPageRequestResponseFormatType", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", + "RelatedQnAPageRequestEmbeddingModel", + "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageStatusResponse", "RemixImageRequestSelectedControlnetModel", + "RemixImageRequestSelectedControlnetModelItem", + "RemixImageRequestSelectedModel", + "RemoveBackgroundRequestSelectedModel", "ReplyButton", - "ResponseFormatType", "ResponseModel", "ResponseModelFinalKeywordQuery", "ResponseModelFinalPrompt", @@ -351,34 +472,40 @@ "RunStart", "SadTalkerSettings", "SadTalkerSettingsPreprocess", - "Schedulers", "SearchReference", "SeoSummaryPageOutput", + "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageStatusResponse", - "SerpSearchLocations", + "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", + "SmartGptPageRequestResponseFormatType", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", + "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageStatusResponse", + "SpeechRecognitionRequestOutputFormat", + "SpeechRecognitionRequestSelectedModel", + "SpeechRecognitionRequestTranslationModel", "StreamError", - "Text2AudioModels", + "SynthesizeDataRequestResponseFormatType", + "SynthesizeDataRequestSelectedAsrModel", "Text2AudioPageOutput", "Text2AudioPageStatusResponse", - "TextToImageModels", "TextToSpeechPageOutput", "TextToSpeechPageRequestOpenaiTtsModel", "TextToSpeechPageRequestOpenaiVoiceName", + "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", - "TextToSpeechProviders", "TooManyRequestsError", "TrainingDataModel", - "TranslationModels", + "TranslateRequestSelectedModel", "TranslationPageOutput", "TranslationPageRequest", + "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", "UnprocessableEntityError", - "UpscalerModels", + "UpscaleRequestSelectedModelsItem", "ValidationError", "ValidationErrorLocItem", "Vcard", @@ -386,12 +513,19 @@ "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", "VideoBotsPageRequest", + "VideoBotsPageRequestAsrModel", + "VideoBotsPageRequestCitationStyle", + "VideoBotsPageRequestEmbeddingModel", "VideoBotsPageRequestFunctionsItem", "VideoBotsPageRequestFunctionsItemTrigger", + "VideoBotsPageRequestLipsyncModel", "VideoBotsPageRequestOpenaiTtsModel", "VideoBotsPageRequestOpenaiVoiceName", + "VideoBotsPageRequestResponseFormatType", "VideoBotsPageRequestSadtalkerSettings", "VideoBotsPageRequestSadtalkerSettingsPreprocess", + "VideoBotsPageRequestTranslationModel", + "VideoBotsPageRequestTtsProvider", "VideoBotsPageStatusResponse", "__version__", "copilot", diff --git a/src/gooey/client.py b/src/gooey/client.py index 10c5a84..6767f27 100644 --- a/src/gooey/client.py +++ b/src/gooey/client.py @@ -9,7 +9,7 @@ from .copilot.client import CopilotClient from .types.animation_prompt import AnimationPrompt from .types.recipe_function import RecipeFunction -from .types.animation_models import AnimationModels +from .types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel from .types.run_settings import RunSettings from .core.request_options import RequestOptions from .types.deforum_sd_page_output import DeforumSdPageOutput @@ -22,64 +22,88 @@ from json.decoder import JSONDecodeError from . import core from .types.vcard import Vcard -from .types.control_net_models import ControlNetModels -from .types.text_to_image_models import TextToImageModels -from .types.schedulers import Schedulers +from .types.qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem +from .types.qr_code_request_selected_model import QrCodeRequestSelectedModel +from .types.qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem +from .types.qr_code_request_scheduler import QrCodeRequestScheduler from .types.qr_code_generator_page_output import QrCodeGeneratorPageOutput from .types.large_language_models import LargeLanguageModels -from .types.embedding_models import EmbeddingModels -from .types.response_format_type import ResponseFormatType -from .types.serp_search_locations import SerpSearchLocations +from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel +from .types.related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType +from .types.serp_search_location import SerpSearchLocation from .types.serp_search_type import SerpSearchType from .types.related_qn_a_page_output import RelatedQnAPageOutput +from .types.seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType from .types.seo_summary_page_output import SeoSummaryPageOutput +from .types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel +from .types.google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType from .types.google_gpt_page_output import GoogleGptPageOutput +from .types.social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType from .types.social_lookup_email_page_output import SocialLookupEmailPageOutput from .types.bulk_runner_page_output import BulkRunnerPageOutput from .types.eval_prompt import EvalPrompt from .types.agg_function import AggFunction +from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType from .types.bulk_eval_page_output import BulkEvalPageOutput -from .types.asr_models import AsrModels +from .types.synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel +from .types.synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType from .types.doc_extract_page_output import DocExtractPageOutput +from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType from .types.compare_llm_page_output import CompareLlmPageOutput from .types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery -from .types.citation_styles import CitationStyles +from .types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel +from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle +from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType from .types.doc_search_page_output import DocSearchPageOutput +from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType from .types.smart_gpt_page_output import SmartGptPageOutput -from .types.combine_documents_chains import CombineDocumentsChains +from .types.doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel +from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType from .types.doc_summary_page_output import DocSummaryPageOutput from .types.functions_page_output import FunctionsPageOutput from .types.sad_talker_settings import SadTalkerSettings -from .types.lipsync_models import LipsyncModels +from .types.lipsync_request_selected_model import LipsyncRequestSelectedModel from .types.lipsync_page_output import LipsyncPageOutput -from .types.text_to_speech_providers import TextToSpeechProviders +from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName from .types.lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel +from .types.lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel from .types.lipsync_tts_page_output import LipsyncTtsPageOutput +from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel from .types.text_to_speech_page_output import TextToSpeechPageOutput -from .types.translation_models import TranslationModels -from .types.asr_output_format import AsrOutputFormat +from .types.speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel +from .types.speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel +from .types.speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat from .types.asr_page_output import AsrPageOutput -from .types.text2audio_models import Text2AudioModels from .types.text2audio_page_output import Text2AudioPageOutput +from .types.translate_request_selected_model import TranslateRequestSelectedModel from .types.translation_page_output import TranslationPageOutput -from .types.image_to_image_models import ImageToImageModels +from .types.remix_image_request_selected_model import RemixImageRequestSelectedModel from .types.remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel from .types.img2img_page_output import Img2ImgPageOutput +from .types.compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem +from .types.compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler from .types.compare_text2img_page_output import CompareText2ImgPageOutput -from .types.inpainting_models import InpaintingModels +from .types.product_image_request_selected_model import ProductImageRequestSelectedModel from .types.object_inpainting_page_output import ObjectInpaintingPageOutput +from .types.portrait_request_selected_model import PortraitRequestSelectedModel from .types.face_inpainting_page_output import FaceInpaintingPageOutput +from .types.email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel from .types.email_face_inpainting_page_output import EmailFaceInpaintingPageOutput +from .types.google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel from .types.google_image_gen_page_output import GoogleImageGenPageOutput -from .types.image_segmentation_models import ImageSegmentationModels +from .types.remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel from .types.image_segmentation_page_output import ImageSegmentationPageOutput -from .types.upscaler_models import UpscalerModels +from .types.upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem from .types.compare_upscaler_page_output import CompareUpscalerPageOutput +from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel from .types.embeddings_page_output import EmbeddingsPageOutput from .types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery +from .types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel +from .types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle +from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType from .types.related_qn_a_doc_page_output import RelatedQnADocPageOutput from .types.balance_response import BalanceResponse from .core.client_wrapper import AsyncClientWrapper @@ -159,7 +183,7 @@ def animate( functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[AnimationModels] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, animation_mode: typing.Optional[str] = OMIT, zoom: typing.Optional[str] = OMIT, translation_x: typing.Optional[str] = OMIT, @@ -186,7 +210,7 @@ def animate( max_frames : typing.Optional[int] - selected_model : typing.Optional[AnimationModels] + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] animation_mode : typing.Optional[str] @@ -316,20 +340,22 @@ def qr_code( use_url_shortener: typing.Optional[bool] = None, negative_prompt: typing.Optional[str] = None, image_prompt: typing.Optional[str] = None, - image_prompt_controlnet_models: typing.Optional[typing.List[ControlNetModels]] = None, + image_prompt_controlnet_models: typing.Optional[ + typing.List[QrCodeRequestImagePromptControlnetModelsItem] + ] = None, image_prompt_strength: typing.Optional[float] = None, image_prompt_scale: typing.Optional[float] = None, image_prompt_pos_x: typing.Optional[float] = None, image_prompt_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[TextToImageModels] = None, - selected_controlnet_model: typing.Optional[typing.List[ControlNetModels]] = None, + selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, output_width: typing.Optional[int] = None, output_height: typing.Optional[int] = None, guidance_scale: typing.Optional[float] = None, controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, - scheduler: typing.Optional[Schedulers] = None, + scheduler: typing.Optional[QrCodeRequestScheduler] = None, seed: typing.Optional[int] = None, obj_scale: typing.Optional[float] = None, obj_pos_x: typing.Optional[float] = None, @@ -365,7 +391,7 @@ def qr_code( image_prompt : typing.Optional[str] - image_prompt_controlnet_models : typing.Optional[typing.List[ControlNetModels]] + image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] image_prompt_strength : typing.Optional[float] @@ -375,9 +401,9 @@ def qr_code( image_prompt_pos_y : typing.Optional[float] - selected_model : typing.Optional[TextToImageModels] + selected_model : typing.Optional[QrCodeRequestSelectedModel] - selected_controlnet_model : typing.Optional[typing.List[ControlNetModels]] + selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] output_width : typing.Optional[int] @@ -391,7 +417,7 @@ def qr_code( quality : typing.Optional[int] - scheduler : typing.Optional[Schedulers] + scheduler : typing.Optional[QrCodeRequestScheduler] seed : typing.Optional[int] @@ -523,15 +549,15 @@ def seo_people_also_ask( max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[EmbeddingModels] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, dense_weight: typing.Optional[float] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -566,7 +592,7 @@ def seo_people_also_ask( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -584,9 +610,9 @@ def seo_people_also_ask( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -716,8 +742,8 @@ def seo_content( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -759,9 +785,9 @@ def seo_content( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -886,15 +912,15 @@ def web_search_llm( max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[EmbeddingModels] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, dense_weight: typing.Optional[float] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -929,7 +955,7 @@ def web_search_llm( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -947,9 +973,9 @@ def web_search_llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -1074,7 +1100,7 @@ def personalize_email( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> SocialLookupEmailPageOutput: @@ -1104,7 +1130,7 @@ def personalize_email( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1345,7 +1371,7 @@ def eval( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> BulkEvalPageOutput: @@ -1389,7 +1415,7 @@ def eval( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1488,7 +1514,7 @@ def synthesize_data( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, sheet_url: typing.Optional[core.File] = None, - selected_asr_model: typing.Optional[AsrModels] = None, + selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, google_translate_target: typing.Optional[str] = None, glossary_document: typing.Optional[core.File] = None, task_instructions: typing.Optional[str] = None, @@ -1498,7 +1524,7 @@ def synthesize_data( quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[ResponseFormatType] = None, + response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> DocExtractPageOutput: @@ -1518,7 +1544,7 @@ def synthesize_data( sheet_url : typing.Optional[core.File] See core.File for more documentation - selected_asr_model : typing.Optional[AsrModels] + selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] google_translate_target : typing.Optional[str] @@ -1539,7 +1565,7 @@ def synthesize_data( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1646,7 +1672,7 @@ def llm( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CompareLlmPageOutput: @@ -1674,7 +1700,7 @@ def llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1774,18 +1800,18 @@ def rag( max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[EmbeddingModels] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, dense_weight: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, selected_model: typing.Optional[LargeLanguageModels] = OMIT, - citation_style: typing.Optional[CitationStyles] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DocSearchPageOutput: @@ -1813,7 +1839,7 @@ def rag( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -1827,7 +1853,7 @@ def rag( selected_model : typing.Optional[LargeLanguageModels] - citation_style : typing.Optional[CitationStyles] + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] avoid_repetition : typing.Optional[bool] @@ -1839,7 +1865,7 @@ def rag( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -1955,7 +1981,7 @@ def smart_gpt( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> SmartGptPageOutput: @@ -1989,7 +2015,7 @@ def smart_gpt( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -2091,15 +2117,15 @@ def doc_summary( task_instructions: typing.Optional[str] = None, merge_instructions: typing.Optional[str] = None, selected_model: typing.Optional[LargeLanguageModels] = None, - chain_type: typing.Optional[CombineDocumentsChains] = None, - selected_asr_model: typing.Optional[AsrModels] = None, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, + selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, google_translate_target: typing.Optional[str] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[ResponseFormatType] = None, + response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> DocSummaryPageOutput: @@ -2122,9 +2148,9 @@ def doc_summary( selected_model : typing.Optional[LargeLanguageModels] - chain_type : typing.Optional[CombineDocumentsChains] + chain_type : typing.Optional[typing.Literal["map_reduce"]] - selected_asr_model : typing.Optional[AsrModels] + selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] google_translate_target : typing.Optional[str] @@ -2138,7 +2164,7 @@ def doc_summary( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -2341,7 +2367,7 @@ def lipsync( face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncModels] = None, + selected_model: typing.Optional[LipsyncRequestSelectedModel] = None, input_audio: typing.Optional[core.File] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, @@ -2369,7 +2395,7 @@ def lipsync( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[LipsyncModels] + selected_model : typing.Optional[LipsyncRequestSelectedModel] input_audio : typing.Optional[core.File] See core.File for more documentation @@ -2468,7 +2494,7 @@ def lipsync_tts( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - tts_provider: typing.Optional[TextToSpeechProviders] = None, + tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -2492,7 +2518,7 @@ def lipsync_tts( face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncModels] = None, + selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> LipsyncTtsPageOutput: @@ -2508,7 +2534,7 @@ def lipsync_tts( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[TextToSpeechProviders] + tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -2558,7 +2584,7 @@ def lipsync_tts( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[LipsyncModels] + selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -2674,7 +2700,7 @@ def text_to_speech( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[TextToSpeechProviders] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, uberduck_voice_name: typing.Optional[str] = OMIT, uberduck_speaking_rate: typing.Optional[float] = OMIT, google_voice_name: typing.Optional[str] = OMIT, @@ -2707,7 +2733,7 @@ def text_to_speech( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[TextToSpeechProviders] + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -2849,10 +2875,10 @@ def speech_recognition( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[AsrModels] = None, + selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, language: typing.Optional[str] = None, - translation_model: typing.Optional[TranslationModels] = None, - output_format: typing.Optional[AsrOutputFormat] = None, + translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, + output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, google_translate_target: typing.Optional[str] = None, translation_source: typing.Optional[str] = None, translation_target: typing.Optional[str] = None, @@ -2873,13 +2899,13 @@ def speech_recognition( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[AsrModels] + selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] language : typing.Optional[str] - translation_model : typing.Optional[TranslationModels] + translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] - output_format : typing.Optional[AsrOutputFormat] + output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] google_translate_target : typing.Optional[str] use `translation_model` & `translation_target` instead. @@ -2993,7 +3019,7 @@ def text_to_music( guidance_scale: typing.Optional[float] = OMIT, seed: typing.Optional[int] = OMIT, sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[Text2AudioModels]] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> Text2AudioPageOutput: @@ -3023,7 +3049,7 @@ def text_to_music( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.Sequence[Text2AudioModels]] + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] settings : typing.Optional[RunSettings] @@ -3120,7 +3146,7 @@ def translate( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, texts: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[TranslationModels] = None, + selected_model: typing.Optional[TranslateRequestSelectedModel] = None, translation_source: typing.Optional[str] = None, translation_target: typing.Optional[str] = None, glossary_document: typing.Optional[core.File] = None, @@ -3139,7 +3165,7 @@ def translate( texts : typing.Optional[typing.List[str]] - selected_model : typing.Optional[TranslationModels] + selected_model : typing.Optional[TranslateRequestSelectedModel] translation_source : typing.Optional[str] @@ -3240,7 +3266,7 @@ def remix_image( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, text_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[ImageToImageModels] = None, + selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, @@ -3270,7 +3296,7 @@ def remix_image( text_prompt : typing.Optional[str] - selected_model : typing.Optional[ImageToImageModels] + selected_model : typing.Optional[RemixImageRequestSelectedModel] selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] @@ -3404,8 +3430,8 @@ def text_to_image( guidance_scale: typing.Optional[float] = OMIT, seed: typing.Optional[int] = OMIT, sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[TextToImageModels]] = OMIT, - scheduler: typing.Optional[Schedulers] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, edit_instruction: typing.Optional[str] = OMIT, image_guidance_scale: typing.Optional[float] = OMIT, settings: typing.Optional[RunSettings] = OMIT, @@ -3443,9 +3469,9 @@ def text_to_image( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.Sequence[TextToImageModels]] + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - scheduler : typing.Optional[Schedulers] + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] edit_instruction : typing.Optional[str] @@ -3557,7 +3583,7 @@ def product_image( obj_pos_x: typing.Optional[float] = None, obj_pos_y: typing.Optional[float] = None, mask_threshold: typing.Optional[float] = None, - selected_model: typing.Optional[InpaintingModels] = None, + selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -3592,7 +3618,7 @@ def product_image( mask_threshold : typing.Optional[float] - selected_model : typing.Optional[InpaintingModels] + selected_model : typing.Optional[ProductImageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -3717,7 +3743,7 @@ def portrait( face_scale: typing.Optional[float] = None, face_pos_x: typing.Optional[float] = None, face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[InpaintingModels] = None, + selected_model: typing.Optional[PortraitRequestSelectedModel] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -3750,7 +3776,7 @@ def portrait( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[InpaintingModels] + selected_model : typing.Optional[PortraitRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -3875,7 +3901,7 @@ def image_from_email( face_scale: typing.Optional[float] = OMIT, face_pos_x: typing.Optional[float] = OMIT, face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[InpaintingModels] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, negative_prompt: typing.Optional[str] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[int] = OMIT, @@ -3917,7 +3943,7 @@ def image_from_email( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[InpaintingModels] + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -4062,9 +4088,9 @@ def image_from_web_search( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[ImageToImageModels] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, negative_prompt: typing.Optional[str] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[int] = OMIT, @@ -4090,12 +4116,12 @@ def image_from_web_search( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead - selected_model : typing.Optional[ImageToImageModels] + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -4213,7 +4239,7 @@ def remove_background( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[ImageSegmentationModels] = None, + selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, mask_threshold: typing.Optional[float] = None, rect_persepective_transform: typing.Optional[bool] = None, reflection_opacity: typing.Optional[float] = None, @@ -4236,7 +4262,7 @@ def remove_background( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[ImageSegmentationModels] + selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] mask_threshold : typing.Optional[float] @@ -4346,7 +4372,7 @@ def upscale( variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, input_image: typing.Optional[core.File] = None, input_video: typing.Optional[core.File] = None, - selected_models: typing.Optional[typing.List[UpscalerModels]] = None, + selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, @@ -4370,7 +4396,7 @@ def upscale( input_video : typing.Optional[core.File] See core.File for more documentation - selected_models : typing.Optional[typing.List[UpscalerModels]] + selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] @@ -4467,7 +4493,7 @@ def embed( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[EmbeddingModels] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> EmbeddingsPageOutput: @@ -4483,7 +4509,7 @@ def embed( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[EmbeddingModels] + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -4579,19 +4605,19 @@ def seo_people_also_ask_doc( max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[EmbeddingModels] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, dense_weight: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, selected_model: typing.Optional[LargeLanguageModels] = OMIT, - citation_style: typing.Optional[CitationStyles] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -4622,7 +4648,7 @@ def seo_people_also_ask_doc( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -4636,7 +4662,7 @@ def seo_people_also_ask_doc( selected_model : typing.Optional[LargeLanguageModels] - citation_style : typing.Optional[CitationStyles] + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] avoid_repetition : typing.Optional[bool] @@ -4648,9 +4674,9 @@ def seo_people_also_ask_doc( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -4873,7 +4899,7 @@ async def animate( functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, max_frames: typing.Optional[int] = OMIT, - selected_model: typing.Optional[AnimationModels] = OMIT, + selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT, animation_mode: typing.Optional[str] = OMIT, zoom: typing.Optional[str] = OMIT, translation_x: typing.Optional[str] = OMIT, @@ -4900,7 +4926,7 @@ async def animate( max_frames : typing.Optional[int] - selected_model : typing.Optional[AnimationModels] + selected_model : typing.Optional[DeforumSdPageRequestSelectedModel] animation_mode : typing.Optional[str] @@ -5038,20 +5064,22 @@ async def qr_code( use_url_shortener: typing.Optional[bool] = None, negative_prompt: typing.Optional[str] = None, image_prompt: typing.Optional[str] = None, - image_prompt_controlnet_models: typing.Optional[typing.List[ControlNetModels]] = None, + image_prompt_controlnet_models: typing.Optional[ + typing.List[QrCodeRequestImagePromptControlnetModelsItem] + ] = None, image_prompt_strength: typing.Optional[float] = None, image_prompt_scale: typing.Optional[float] = None, image_prompt_pos_x: typing.Optional[float] = None, image_prompt_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[TextToImageModels] = None, - selected_controlnet_model: typing.Optional[typing.List[ControlNetModels]] = None, + selected_model: typing.Optional[QrCodeRequestSelectedModel] = None, + selected_controlnet_model: typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] = None, output_width: typing.Optional[int] = None, output_height: typing.Optional[int] = None, guidance_scale: typing.Optional[float] = None, controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, - scheduler: typing.Optional[Schedulers] = None, + scheduler: typing.Optional[QrCodeRequestScheduler] = None, seed: typing.Optional[int] = None, obj_scale: typing.Optional[float] = None, obj_pos_x: typing.Optional[float] = None, @@ -5087,7 +5115,7 @@ async def qr_code( image_prompt : typing.Optional[str] - image_prompt_controlnet_models : typing.Optional[typing.List[ControlNetModels]] + image_prompt_controlnet_models : typing.Optional[typing.List[QrCodeRequestImagePromptControlnetModelsItem]] image_prompt_strength : typing.Optional[float] @@ -5097,9 +5125,9 @@ async def qr_code( image_prompt_pos_y : typing.Optional[float] - selected_model : typing.Optional[TextToImageModels] + selected_model : typing.Optional[QrCodeRequestSelectedModel] - selected_controlnet_model : typing.Optional[typing.List[ControlNetModels]] + selected_controlnet_model : typing.Optional[typing.List[QrCodeRequestSelectedControlnetModelItem]] output_width : typing.Optional[int] @@ -5113,7 +5141,7 @@ async def qr_code( quality : typing.Optional[int] - scheduler : typing.Optional[Schedulers] + scheduler : typing.Optional[QrCodeRequestScheduler] seed : typing.Optional[int] @@ -5253,15 +5281,15 @@ async def seo_people_also_ask( max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[EmbeddingModels] = OMIT, + embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT, dense_weight: typing.Optional[float] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -5296,7 +5324,7 @@ async def seo_people_also_ask( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -5314,9 +5342,9 @@ async def seo_people_also_ask( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -5454,8 +5482,8 @@ async def seo_content( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -5497,9 +5525,9 @@ async def seo_content( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -5632,15 +5660,15 @@ async def web_search_llm( max_references: typing.Optional[int] = OMIT, max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, - embedding_model: typing.Optional[EmbeddingModels] = OMIT, + embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT, dense_weight: typing.Optional[float] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -5675,7 +5703,7 @@ async def web_search_llm( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -5693,9 +5721,9 @@ async def web_search_llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead @@ -5828,7 +5856,7 @@ async def personalize_email( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[SocialLookupEmailPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> SocialLookupEmailPageOutput: @@ -5858,7 +5886,7 @@ async def personalize_email( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[SocialLookupEmailPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -6115,7 +6143,7 @@ async def eval( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[BulkEvalPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> BulkEvalPageOutput: @@ -6159,7 +6187,7 @@ async def eval( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[BulkEvalPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -6266,7 +6294,7 @@ async def synthesize_data( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, sheet_url: typing.Optional[core.File] = None, - selected_asr_model: typing.Optional[AsrModels] = None, + selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None, google_translate_target: typing.Optional[str] = None, glossary_document: typing.Optional[core.File] = None, task_instructions: typing.Optional[str] = None, @@ -6276,7 +6304,7 @@ async def synthesize_data( quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[ResponseFormatType] = None, + response_format_type: typing.Optional[SynthesizeDataRequestResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> DocExtractPageOutput: @@ -6296,7 +6324,7 @@ async def synthesize_data( sheet_url : typing.Optional[core.File] See core.File for more documentation - selected_asr_model : typing.Optional[AsrModels] + selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel] google_translate_target : typing.Optional[str] @@ -6317,7 +6345,7 @@ async def synthesize_data( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[SynthesizeDataRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -6432,7 +6460,7 @@ async def llm( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[CompareLlmPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> CompareLlmPageOutput: @@ -6460,7 +6488,7 @@ async def llm( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[CompareLlmPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -6568,18 +6596,18 @@ async def rag( max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[EmbeddingModels] = OMIT, + embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT, dense_weight: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, selected_model: typing.Optional[LargeLanguageModels] = OMIT, - citation_style: typing.Optional[CitationStyles] = OMIT, + citation_style: typing.Optional[DocSearchPageRequestCitationStyle] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[DocSearchPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> DocSearchPageOutput: @@ -6607,7 +6635,7 @@ async def rag( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -6621,7 +6649,7 @@ async def rag( selected_model : typing.Optional[LargeLanguageModels] - citation_style : typing.Optional[CitationStyles] + citation_style : typing.Optional[DocSearchPageRequestCitationStyle] avoid_repetition : typing.Optional[bool] @@ -6633,7 +6661,7 @@ async def rag( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[DocSearchPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -6757,7 +6785,7 @@ async def smart_gpt( quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, + response_format_type: typing.Optional[SmartGptPageRequestResponseFormatType] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> SmartGptPageOutput: @@ -6791,7 +6819,7 @@ async def smart_gpt( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[SmartGptPageRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -6901,15 +6929,15 @@ async def doc_summary( task_instructions: typing.Optional[str] = None, merge_instructions: typing.Optional[str] = None, selected_model: typing.Optional[LargeLanguageModels] = None, - chain_type: typing.Optional[CombineDocumentsChains] = None, - selected_asr_model: typing.Optional[AsrModels] = None, + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None, + selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None, google_translate_target: typing.Optional[str] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[ResponseFormatType] = None, + response_format_type: typing.Optional[DocSummaryRequestResponseFormatType] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> DocSummaryPageOutput: @@ -6932,9 +6960,9 @@ async def doc_summary( selected_model : typing.Optional[LargeLanguageModels] - chain_type : typing.Optional[CombineDocumentsChains] + chain_type : typing.Optional[typing.Literal["map_reduce"]] - selected_asr_model : typing.Optional[AsrModels] + selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel] google_translate_target : typing.Optional[str] @@ -6948,7 +6976,7 @@ async def doc_summary( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[DocSummaryRequestResponseFormatType] settings : typing.Optional[RunSettings] @@ -7167,7 +7195,7 @@ async def lipsync( face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncModels] = None, + selected_model: typing.Optional[LipsyncRequestSelectedModel] = None, input_audio: typing.Optional[core.File] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, @@ -7195,7 +7223,7 @@ async def lipsync( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[LipsyncModels] + selected_model : typing.Optional[LipsyncRequestSelectedModel] input_audio : typing.Optional[core.File] See core.File for more documentation @@ -7302,7 +7330,7 @@ async def lipsync_tts( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - tts_provider: typing.Optional[TextToSpeechProviders] = None, + tts_provider: typing.Optional[LipsyncTtsRequestTtsProvider] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -7326,7 +7354,7 @@ async def lipsync_tts( face_padding_left: typing.Optional[int] = None, face_padding_right: typing.Optional[int] = None, sadtalker_settings: typing.Optional[SadTalkerSettings] = None, - selected_model: typing.Optional[LipsyncModels] = None, + selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, ) -> LipsyncTtsPageOutput: @@ -7342,7 +7370,7 @@ async def lipsync_tts( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[TextToSpeechProviders] + tts_provider : typing.Optional[LipsyncTtsRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -7392,7 +7420,7 @@ async def lipsync_tts( sadtalker_settings : typing.Optional[SadTalkerSettings] - selected_model : typing.Optional[LipsyncModels] + selected_model : typing.Optional[LipsyncTtsRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -7516,7 +7544,7 @@ async def text_to_speech( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - tts_provider: typing.Optional[TextToSpeechProviders] = OMIT, + tts_provider: typing.Optional[TextToSpeechPageRequestTtsProvider] = OMIT, uberduck_voice_name: typing.Optional[str] = OMIT, uberduck_speaking_rate: typing.Optional[float] = OMIT, google_voice_name: typing.Optional[str] = OMIT, @@ -7549,7 +7577,7 @@ async def text_to_speech( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - tts_provider : typing.Optional[TextToSpeechProviders] + tts_provider : typing.Optional[TextToSpeechPageRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -7699,10 +7727,10 @@ async def speech_recognition( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[AsrModels] = None, + selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None, language: typing.Optional[str] = None, - translation_model: typing.Optional[TranslationModels] = None, - output_format: typing.Optional[AsrOutputFormat] = None, + translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None, + output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None, google_translate_target: typing.Optional[str] = None, translation_source: typing.Optional[str] = None, translation_target: typing.Optional[str] = None, @@ -7723,13 +7751,13 @@ async def speech_recognition( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[AsrModels] + selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel] language : typing.Optional[str] - translation_model : typing.Optional[TranslationModels] + translation_model : typing.Optional[SpeechRecognitionRequestTranslationModel] - output_format : typing.Optional[AsrOutputFormat] + output_format : typing.Optional[SpeechRecognitionRequestOutputFormat] google_translate_target : typing.Optional[str] use `translation_model` & `translation_target` instead. @@ -7851,7 +7879,7 @@ async def text_to_music( guidance_scale: typing.Optional[float] = OMIT, seed: typing.Optional[int] = OMIT, sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[Text2AudioModels]] = OMIT, + selected_models: typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> Text2AudioPageOutput: @@ -7881,7 +7909,7 @@ async def text_to_music( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.Sequence[Text2AudioModels]] + selected_models : typing.Optional[typing.Sequence[typing.Literal["audio_ldm"]]] settings : typing.Optional[RunSettings] @@ -7986,7 +8014,7 @@ async def translate( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, texts: typing.Optional[typing.List[str]] = None, - selected_model: typing.Optional[TranslationModels] = None, + selected_model: typing.Optional[TranslateRequestSelectedModel] = None, translation_source: typing.Optional[str] = None, translation_target: typing.Optional[str] = None, glossary_document: typing.Optional[core.File] = None, @@ -8005,7 +8033,7 @@ async def translate( texts : typing.Optional[typing.List[str]] - selected_model : typing.Optional[TranslationModels] + selected_model : typing.Optional[TranslateRequestSelectedModel] translation_source : typing.Optional[str] @@ -8114,7 +8142,7 @@ async def remix_image( functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, text_prompt: typing.Optional[str] = None, - selected_model: typing.Optional[ImageToImageModels] = None, + selected_model: typing.Optional[RemixImageRequestSelectedModel] = None, selected_controlnet_model: typing.Optional[RemixImageRequestSelectedControlnetModel] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, @@ -8144,7 +8172,7 @@ async def remix_image( text_prompt : typing.Optional[str] - selected_model : typing.Optional[ImageToImageModels] + selected_model : typing.Optional[RemixImageRequestSelectedModel] selected_controlnet_model : typing.Optional[RemixImageRequestSelectedControlnetModel] @@ -8286,8 +8314,8 @@ async def text_to_image( guidance_scale: typing.Optional[float] = OMIT, seed: typing.Optional[int] = OMIT, sd2upscaling: typing.Optional[bool] = OMIT, - selected_models: typing.Optional[typing.Sequence[TextToImageModels]] = OMIT, - scheduler: typing.Optional[Schedulers] = OMIT, + selected_models: typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] = OMIT, + scheduler: typing.Optional[CompareText2ImgPageRequestScheduler] = OMIT, edit_instruction: typing.Optional[str] = OMIT, image_guidance_scale: typing.Optional[float] = OMIT, settings: typing.Optional[RunSettings] = OMIT, @@ -8325,9 +8353,9 @@ async def text_to_image( sd2upscaling : typing.Optional[bool] - selected_models : typing.Optional[typing.Sequence[TextToImageModels]] + selected_models : typing.Optional[typing.Sequence[CompareText2ImgPageRequestSelectedModelsItem]] - scheduler : typing.Optional[Schedulers] + scheduler : typing.Optional[CompareText2ImgPageRequestScheduler] edit_instruction : typing.Optional[str] @@ -8447,7 +8475,7 @@ async def product_image( obj_pos_x: typing.Optional[float] = None, obj_pos_y: typing.Optional[float] = None, mask_threshold: typing.Optional[float] = None, - selected_model: typing.Optional[InpaintingModels] = None, + selected_model: typing.Optional[ProductImageRequestSelectedModel] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -8482,7 +8510,7 @@ async def product_image( mask_threshold : typing.Optional[float] - selected_model : typing.Optional[InpaintingModels] + selected_model : typing.Optional[ProductImageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -8615,7 +8643,7 @@ async def portrait( face_scale: typing.Optional[float] = None, face_pos_x: typing.Optional[float] = None, face_pos_y: typing.Optional[float] = None, - selected_model: typing.Optional[InpaintingModels] = None, + selected_model: typing.Optional[PortraitRequestSelectedModel] = None, negative_prompt: typing.Optional[str] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[int] = None, @@ -8648,7 +8676,7 @@ async def portrait( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[InpaintingModels] + selected_model : typing.Optional[PortraitRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -8781,7 +8809,7 @@ async def image_from_email( face_scale: typing.Optional[float] = OMIT, face_pos_x: typing.Optional[float] = OMIT, face_pos_y: typing.Optional[float] = OMIT, - selected_model: typing.Optional[InpaintingModels] = OMIT, + selected_model: typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] = OMIT, negative_prompt: typing.Optional[str] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[int] = OMIT, @@ -8823,7 +8851,7 @@ async def image_from_email( face_pos_y : typing.Optional[float] - selected_model : typing.Optional[InpaintingModels] + selected_model : typing.Optional[EmailFaceInpaintingPageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -8976,9 +9004,9 @@ async def image_from_web_search( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, - selected_model: typing.Optional[ImageToImageModels] = OMIT, + selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT, negative_prompt: typing.Optional[str] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[int] = OMIT, @@ -9004,12 +9032,12 @@ async def image_from_web_search( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead - selected_model : typing.Optional[ImageToImageModels] + selected_model : typing.Optional[GoogleImageGenPageRequestSelectedModel] negative_prompt : typing.Optional[str] @@ -9135,7 +9163,7 @@ async def remove_background( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.List[RecipeFunction]] = None, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, - selected_model: typing.Optional[ImageSegmentationModels] = None, + selected_model: typing.Optional[RemoveBackgroundRequestSelectedModel] = None, mask_threshold: typing.Optional[float] = None, rect_persepective_transform: typing.Optional[bool] = None, reflection_opacity: typing.Optional[float] = None, @@ -9158,7 +9186,7 @@ async def remove_background( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[ImageSegmentationModels] + selected_model : typing.Optional[RemoveBackgroundRequestSelectedModel] mask_threshold : typing.Optional[float] @@ -9276,7 +9304,7 @@ async def upscale( variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None, input_image: typing.Optional[core.File] = None, input_video: typing.Optional[core.File] = None, - selected_models: typing.Optional[typing.List[UpscalerModels]] = None, + selected_models: typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] = None, selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None, settings: typing.Optional[RunSettings] = None, request_options: typing.Optional[RequestOptions] = None, @@ -9300,7 +9328,7 @@ async def upscale( input_video : typing.Optional[core.File] See core.File for more documentation - selected_models : typing.Optional[typing.List[UpscalerModels]] + selected_models : typing.Optional[typing.List[UpscaleRequestSelectedModelsItem]] selected_bg_model : typing.Optional[typing.Literal["real_esrgan_x2"]] @@ -9405,7 +9433,7 @@ async def embed( example_id: typing.Optional[str] = None, functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT, variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT, - selected_model: typing.Optional[EmbeddingModels] = OMIT, + selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT, settings: typing.Optional[RunSettings] = OMIT, request_options: typing.Optional[RequestOptions] = None, ) -> EmbeddingsPageOutput: @@ -9421,7 +9449,7 @@ async def embed( variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] Variables to be used as Jinja prompt templates and in functions as arguments - selected_model : typing.Optional[EmbeddingModels] + selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel] settings : typing.Optional[RunSettings] @@ -9525,19 +9553,19 @@ async def seo_people_also_ask_doc( max_context_words: typing.Optional[int] = OMIT, scroll_jump: typing.Optional[int] = OMIT, doc_extract_url: typing.Optional[str] = OMIT, - embedding_model: typing.Optional[EmbeddingModels] = OMIT, + embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT, dense_weight: typing.Optional[float] = OMIT, task_instructions: typing.Optional[str] = OMIT, query_instructions: typing.Optional[str] = OMIT, selected_model: typing.Optional[LargeLanguageModels] = OMIT, - citation_style: typing.Optional[CitationStyles] = OMIT, + citation_style: typing.Optional[RelatedQnADocPageRequestCitationStyle] = OMIT, avoid_repetition: typing.Optional[bool] = OMIT, num_outputs: typing.Optional[int] = OMIT, quality: typing.Optional[float] = OMIT, max_tokens: typing.Optional[int] = OMIT, sampling_temperature: typing.Optional[float] = OMIT, - response_format_type: typing.Optional[ResponseFormatType] = OMIT, - serp_search_location: typing.Optional[SerpSearchLocations] = OMIT, + response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT, + serp_search_location: typing.Optional[SerpSearchLocation] = OMIT, scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT, serp_search_type: typing.Optional[SerpSearchType] = OMIT, scaleserp_search_field: typing.Optional[str] = OMIT, @@ -9568,7 +9596,7 @@ async def seo_people_also_ask_doc( doc_extract_url : typing.Optional[str] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -9582,7 +9610,7 @@ async def seo_people_also_ask_doc( selected_model : typing.Optional[LargeLanguageModels] - citation_style : typing.Optional[CitationStyles] + citation_style : typing.Optional[RelatedQnADocPageRequestCitationStyle] avoid_repetition : typing.Optional[bool] @@ -9594,9 +9622,9 @@ async def seo_people_also_ask_doc( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType] - serp_search_location : typing.Optional[SerpSearchLocations] + serp_search_location : typing.Optional[SerpSearchLocation] scaleserp_locations : typing.Optional[typing.Sequence[str]] DEPRECATED: use `serp_search_location` instead diff --git a/src/gooey/copilot/__init__.py b/src/gooey/copilot/__init__.py index 0d11408..3234b31 100644 --- a/src/gooey/copilot/__init__.py +++ b/src/gooey/copilot/__init__.py @@ -1,19 +1,33 @@ # This file was auto-generated by Fern from our API Definition. from .types import ( + CopilotCompletionRequestAsrModel, + CopilotCompletionRequestCitationStyle, + CopilotCompletionRequestEmbeddingModel, CopilotCompletionRequestFunctionsItem, CopilotCompletionRequestFunctionsItemTrigger, + CopilotCompletionRequestLipsyncModel, CopilotCompletionRequestOpenaiTtsModel, CopilotCompletionRequestOpenaiVoiceName, + CopilotCompletionRequestResponseFormatType, CopilotCompletionRequestSadtalkerSettings, CopilotCompletionRequestSadtalkerSettingsPreprocess, + CopilotCompletionRequestTranslationModel, + CopilotCompletionRequestTtsProvider, ) __all__ = [ + "CopilotCompletionRequestAsrModel", + "CopilotCompletionRequestCitationStyle", + "CopilotCompletionRequestEmbeddingModel", "CopilotCompletionRequestFunctionsItem", "CopilotCompletionRequestFunctionsItemTrigger", + "CopilotCompletionRequestLipsyncModel", "CopilotCompletionRequestOpenaiTtsModel", "CopilotCompletionRequestOpenaiVoiceName", + "CopilotCompletionRequestResponseFormatType", "CopilotCompletionRequestSadtalkerSettings", "CopilotCompletionRequestSadtalkerSettingsPreprocess", + "CopilotCompletionRequestTranslationModel", + "CopilotCompletionRequestTtsProvider", ] diff --git a/src/gooey/copilot/client.py b/src/gooey/copilot/client.py index 247a892..9dcc465 100644 --- a/src/gooey/copilot/client.py +++ b/src/gooey/copilot/client.py @@ -6,14 +6,14 @@ from .. import core from ..types.conversation_entry import ConversationEntry from ..types.large_language_models import LargeLanguageModels -from ..types.embedding_models import EmbeddingModels -from ..types.citation_styles import CitationStyles -from ..types.asr_models import AsrModels -from ..types.translation_models import TranslationModels -from ..types.lipsync_models import LipsyncModels +from .types.copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel +from .types.copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle +from .types.copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel +from .types.copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel +from .types.copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel from ..types.llm_tools import LlmTools -from ..types.response_format_type import ResponseFormatType -from ..types.text_to_speech_providers import TextToSpeechProviders +from .types.copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType +from .types.copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider from .types.copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName from .types.copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel from .types.copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings @@ -60,25 +60,25 @@ def completion( max_references: typing.Optional[int] = None, max_context_words: typing.Optional[int] = None, scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[EmbeddingModels] = None, + embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None, dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[CitationStyles] = None, + citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None, use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[AsrModels] = None, + asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None, asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[TranslationModels] = None, + translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None, user_language: typing.Optional[str] = None, input_glossary_document: typing.Optional[core.File] = None, output_glossary_document: typing.Optional[core.File] = None, - lipsync_model: typing.Optional[LipsyncModels] = None, + lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None, tools: typing.Optional[typing.List[LlmTools]] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[ResponseFormatType] = None, - tts_provider: typing.Optional[TextToSpeechProviders] = None, + response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None, + tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -152,7 +152,7 @@ def completion( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -160,17 +160,17 @@ def completion( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[CitationStyles] + citation_style : typing.Optional[CopilotCompletionRequestCitationStyle] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[AsrModels] + asr_model : typing.Optional[CopilotCompletionRequestAsrModel] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[TranslationModels] + translation_model : typing.Optional[CopilotCompletionRequestTranslationModel] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -181,7 +181,7 @@ def completion( output_glossary_document : typing.Optional[core.File] See core.File for more documentation - lipsync_model : typing.Optional[LipsyncModels] + lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel] tools : typing.Optional[typing.List[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -196,9 +196,9 @@ def completion( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType] - tts_provider : typing.Optional[TextToSpeechProviders] + tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider] uberduck_voice_name : typing.Optional[str] @@ -412,25 +412,25 @@ async def completion( max_references: typing.Optional[int] = None, max_context_words: typing.Optional[int] = None, scroll_jump: typing.Optional[int] = None, - embedding_model: typing.Optional[EmbeddingModels] = None, + embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None, dense_weight: typing.Optional[float] = None, - citation_style: typing.Optional[CitationStyles] = None, + citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None, use_url_shortener: typing.Optional[bool] = None, - asr_model: typing.Optional[AsrModels] = None, + asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None, asr_language: typing.Optional[str] = None, - translation_model: typing.Optional[TranslationModels] = None, + translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None, user_language: typing.Optional[str] = None, input_glossary_document: typing.Optional[core.File] = None, output_glossary_document: typing.Optional[core.File] = None, - lipsync_model: typing.Optional[LipsyncModels] = None, + lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None, tools: typing.Optional[typing.List[LlmTools]] = None, avoid_repetition: typing.Optional[bool] = None, num_outputs: typing.Optional[int] = None, quality: typing.Optional[float] = None, max_tokens: typing.Optional[int] = None, sampling_temperature: typing.Optional[float] = None, - response_format_type: typing.Optional[ResponseFormatType] = None, - tts_provider: typing.Optional[TextToSpeechProviders] = None, + response_format_type: typing.Optional[CopilotCompletionRequestResponseFormatType] = None, + tts_provider: typing.Optional[CopilotCompletionRequestTtsProvider] = None, uberduck_voice_name: typing.Optional[str] = None, uberduck_speaking_rate: typing.Optional[float] = None, google_voice_name: typing.Optional[str] = None, @@ -504,7 +504,7 @@ async def completion( scroll_jump : typing.Optional[int] - embedding_model : typing.Optional[EmbeddingModels] + embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel] dense_weight : typing.Optional[float] @@ -512,17 +512,17 @@ async def completion( Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. - citation_style : typing.Optional[CitationStyles] + citation_style : typing.Optional[CopilotCompletionRequestCitationStyle] use_url_shortener : typing.Optional[bool] - asr_model : typing.Optional[AsrModels] + asr_model : typing.Optional[CopilotCompletionRequestAsrModel] Choose a model to transcribe incoming audio messages to text. asr_language : typing.Optional[str] Choose a language to transcribe incoming audio messages to text. - translation_model : typing.Optional[TranslationModels] + translation_model : typing.Optional[CopilotCompletionRequestTranslationModel] user_language : typing.Optional[str] Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -533,7 +533,7 @@ async def completion( output_glossary_document : typing.Optional[core.File] See core.File for more documentation - lipsync_model : typing.Optional[LipsyncModels] + lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel] tools : typing.Optional[typing.List[LlmTools]] Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -548,9 +548,9 @@ async def completion( sampling_temperature : typing.Optional[float] - response_format_type : typing.Optional[ResponseFormatType] + response_format_type : typing.Optional[CopilotCompletionRequestResponseFormatType] - tts_provider : typing.Optional[TextToSpeechProviders] + tts_provider : typing.Optional[CopilotCompletionRequestTtsProvider] uberduck_voice_name : typing.Optional[str] diff --git a/src/gooey/copilot/types/__init__.py b/src/gooey/copilot/types/__init__.py index 2094b54..1cdf619 100644 --- a/src/gooey/copilot/types/__init__.py +++ b/src/gooey/copilot/types/__init__.py @@ -1,19 +1,33 @@ # This file was auto-generated by Fern from our API Definition. +from .copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel +from .copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle +from .copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel from .copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger +from .copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel from .copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel from .copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName +from .copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType from .copilot_completion_request_sadtalker_settings import CopilotCompletionRequestSadtalkerSettings from .copilot_completion_request_sadtalker_settings_preprocess import ( CopilotCompletionRequestSadtalkerSettingsPreprocess, ) +from .copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel +from .copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider __all__ = [ + "CopilotCompletionRequestAsrModel", + "CopilotCompletionRequestCitationStyle", + "CopilotCompletionRequestEmbeddingModel", "CopilotCompletionRequestFunctionsItem", "CopilotCompletionRequestFunctionsItemTrigger", + "CopilotCompletionRequestLipsyncModel", "CopilotCompletionRequestOpenaiTtsModel", "CopilotCompletionRequestOpenaiVoiceName", + "CopilotCompletionRequestResponseFormatType", "CopilotCompletionRequestSadtalkerSettings", "CopilotCompletionRequestSadtalkerSettingsPreprocess", + "CopilotCompletionRequestTranslationModel", + "CopilotCompletionRequestTtsProvider", ] diff --git a/src/gooey/copilot/types/copilot_completion_request_asr_model.py b/src/gooey/copilot/types/copilot_completion_request_asr_model.py new file mode 100644 index 0000000..65ae0f5 --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/copilot/types/copilot_completion_request_citation_style.py b/src/gooey/copilot/types/copilot_completion_request_citation_style.py new file mode 100644 index 0000000..1bb273a --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_citation_style.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestCitationStyle = typing.Union[ + typing.Literal[ + "number", + "title", + "url", + "symbol", + "markdown", + "html", + "slack_mrkdwn", + "plaintext", + "number_markdown", + "number_html", + "number_slack_mrkdwn", + "number_plaintext", + "symbol_markdown", + "symbol_html", + "symbol_slack_mrkdwn", + "symbol_plaintext", + ], + typing.Any, +] diff --git a/src/gooey/copilot/types/copilot_completion_request_embedding_model.py b/src/gooey/copilot/types/copilot_completion_request_embedding_model.py new file mode 100644 index 0000000..4655801 --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py b/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py new file mode 100644 index 0000000..865bc4b --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_response_format_type.py b/src/gooey/copilot/types/copilot_completion_request_response_format_type.py new file mode 100644 index 0000000..3c9dbb0 --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_translation_model.py b/src/gooey/copilot/types/copilot_completion_request_translation_model.py new file mode 100644 index 0000000..10b0b5a --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/copilot/types/copilot_completion_request_tts_provider.py b/src/gooey/copilot/types/copilot_completion_request_tts_provider.py new file mode 100644 index 0000000..4dec4b0 --- /dev/null +++ b/src/gooey/copilot/types/copilot_completion_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CopilotCompletionRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +] diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py index c630ac9..9c28e89 100644 --- a/src/gooey/core/client_wrapper.py +++ b/src/gooey/core/client_wrapper.py @@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]: headers: typing.Dict[str, str] = { "X-Fern-Language": "Python", "X-Fern-SDK-Name": "gooeyai", - "X-Fern-SDK-Version": "0.0.1-beta25", + "X-Fern-SDK-Version": "0.0.1-beta26", } headers["Authorization"] = f"Bearer {self._get_api_key()}" return headers diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py index dfe0d3c..9087b38 100644 --- a/src/gooey/types/__init__.py +++ b/src/gooey/types/__init__.py @@ -4,21 +4,22 @@ from .agg_function_function import AggFunctionFunction from .agg_function_result import AggFunctionResult from .agg_function_result_function import AggFunctionResultFunction -from .animation_models import AnimationModels from .animation_prompt import AnimationPrompt from .asr_chunk import AsrChunk -from .asr_models import AsrModels -from .asr_output_format import AsrOutputFormat from .asr_output_json import AsrOutputJson from .asr_page_output import AsrPageOutput from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem from .asr_page_request import AsrPageRequest +from .asr_page_request_output_format import AsrPageRequestOutputFormat +from .asr_page_request_selected_model import AsrPageRequestSelectedModel +from .asr_page_request_translation_model import AsrPageRequestTranslationModel from .asr_page_status_response import AsrPageStatusResponse from .async_api_response_model_v3 import AsyncApiResponseModelV3 from .balance_response import BalanceResponse from .bot_broadcast_filters import BotBroadcastFilters from .bot_broadcast_request_model import BotBroadcastRequestModel from .bulk_eval_page_output import BulkEvalPageOutput +from .bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType from .bulk_eval_page_status_response import BulkEvalPageStatusResponse from .bulk_runner_page_output import BulkRunnerPageOutput from .bulk_runner_page_request import BulkRunnerPageRequest @@ -31,18 +32,19 @@ from .chyron_plant_page_output import ChyronPlantPageOutput from .chyron_plant_page_request import ChyronPlantPageRequest from .chyron_plant_page_status_response import ChyronPlantPageStatusResponse -from .citation_styles import CitationStyles -from .combine_documents_chains import CombineDocumentsChains from .compare_llm_page_output import CompareLlmPageOutput +from .compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType from .compare_llm_page_status_response import CompareLlmPageStatusResponse from .compare_text2img_page_output import CompareText2ImgPageOutput +from .compare_text2img_page_request_scheduler import CompareText2ImgPageRequestScheduler +from .compare_text2img_page_request_selected_models_item import CompareText2ImgPageRequestSelectedModelsItem from .compare_text2img_page_status_response import CompareText2ImgPageStatusResponse from .compare_upscaler_page_output import CompareUpscalerPageOutput from .compare_upscaler_page_request import CompareUpscalerPageRequest +from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem from .compare_upscaler_page_status_response import CompareUpscalerPageStatusResponse from .console_logs import ConsoleLogs from .console_logs_level import ConsoleLogsLevel -from .control_net_models import ControlNetModels from .conversation_entry import ConversationEntry from .conversation_entry_content import ConversationEntryContent from .conversation_entry_content_item import ( @@ -53,28 +55,47 @@ from .conversation_entry_role import ConversationEntryRole from .conversation_start import ConversationStart from .create_stream_request import CreateStreamRequest +from .create_stream_request_asr_model import CreateStreamRequestAsrModel +from .create_stream_request_citation_style import CreateStreamRequestCitationStyle +from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel +from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName +from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType +from .create_stream_request_translation_model import CreateStreamRequestTranslationModel +from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider from .create_stream_response import CreateStreamResponse from .deforum_sd_page_output import DeforumSdPageOutput +from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel from .deforum_sd_page_status_response import DeforumSdPageStatusResponse from .doc_extract_page_output import DocExtractPageOutput from .doc_extract_page_request import DocExtractPageRequest +from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType +from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel from .doc_extract_page_status_response import DocExtractPageStatusResponse from .doc_search_page_output import DocSearchPageOutput +from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle +from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery +from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType from .doc_search_page_status_response import DocSearchPageStatusResponse from .doc_summary_page_output import DocSummaryPageOutput from .doc_summary_page_request import DocSummaryPageRequest +from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType +from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel from .doc_summary_page_status_response import DocSummaryPageStatusResponse +from .doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType +from .doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput +from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse -from .embedding_models import EmbeddingModels from .embeddings_page_output import EmbeddingsPageOutput +from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel from .embeddings_page_status_response import EmbeddingsPageStatusResponse from .eval_prompt import EvalPrompt from .face_inpainting_page_output import FaceInpaintingPageOutput from .face_inpainting_page_request import FaceInpaintingPageRequest +from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel from .face_inpainting_page_status_response import FaceInpaintingPageStatusResponse from .final_response import FinalResponse from .functions_page_output import FunctionsPageOutput @@ -82,60 +103,90 @@ from .generic_error_response import GenericErrorResponse from .generic_error_response_detail import GenericErrorResponseDetail from .google_gpt_page_output import GoogleGptPageOutput +from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel +from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType from .google_gpt_page_status_response import GoogleGptPageStatusResponse from .google_image_gen_page_output import GoogleImageGenPageOutput +from .google_image_gen_page_request_selected_model import GoogleImageGenPageRequestSelectedModel from .google_image_gen_page_status_response import GoogleImageGenPageStatusResponse from .http_validation_error import HttpValidationError -from .image_segmentation_models import ImageSegmentationModels from .image_segmentation_page_output import ImageSegmentationPageOutput from .image_segmentation_page_request import ImageSegmentationPageRequest +from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel from .image_segmentation_page_status_response import ImageSegmentationPageStatusResponse -from .image_to_image_models import ImageToImageModels from .image_url import ImageUrl from .image_url_detail import ImageUrlDetail from .img2img_page_output import Img2ImgPageOutput from .img2img_page_request import Img2ImgPageRequest from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel +from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem +from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel from .img2img_page_status_response import Img2ImgPageStatusResponse -from .inpainting_models import InpaintingModels from .large_language_models import LargeLanguageModels from .letter_writer_page_output import LetterWriterPageOutput from .letter_writer_page_request import LetterWriterPageRequest from .letter_writer_page_status_response import LetterWriterPageStatusResponse -from .lipsync_models import LipsyncModels from .lipsync_page_output import LipsyncPageOutput from .lipsync_page_request import LipsyncPageRequest +from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel from .lipsync_page_status_response import LipsyncPageStatusResponse +from .lipsync_request_selected_model import LipsyncRequestSelectedModel from .lipsync_tts_page_output import LipsyncTtsPageOutput from .lipsync_tts_page_request import LipsyncTtsPageRequest from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName +from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel +from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse from .lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel from .lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName +from .lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel +from .lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider from .llm_tools import LlmTools from .message_part import MessagePart from .object_inpainting_page_output import ObjectInpaintingPageOutput from .object_inpainting_page_request import ObjectInpaintingPageRequest +from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel from .object_inpainting_page_status_response import ObjectInpaintingPageStatusResponse +from .portrait_request_selected_model import PortraitRequestSelectedModel +from .product_image_request_selected_model import ProductImageRequestSelectedModel from .prompt_tree_node import PromptTreeNode from .prompt_tree_node_prompt import PromptTreeNodePrompt from .qr_code_generator_page_output import QrCodeGeneratorPageOutput from .qr_code_generator_page_request import QrCodeGeneratorPageRequest +from .qr_code_generator_page_request_image_prompt_controlnet_models_item import ( + QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, +) +from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler +from .qr_code_generator_page_request_selected_controlnet_model_item import ( + QrCodeGeneratorPageRequestSelectedControlnetModelItem, +) +from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel from .qr_code_generator_page_status_response import QrCodeGeneratorPageStatusResponse +from .qr_code_request_image_prompt_controlnet_models_item import QrCodeRequestImagePromptControlnetModelsItem +from .qr_code_request_scheduler import QrCodeRequestScheduler +from .qr_code_request_selected_controlnet_model_item import QrCodeRequestSelectedControlnetModelItem +from .qr_code_request_selected_model import QrCodeRequestSelectedModel from .recipe_function import RecipeFunction from .recipe_function_trigger import RecipeFunctionTrigger from .recipe_run_state import RecipeRunState from .related_doc_search_response import RelatedDocSearchResponse from .related_google_gpt_response import RelatedGoogleGptResponse from .related_qn_a_doc_page_output import RelatedQnADocPageOutput +from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle +from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery +from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse from .related_qn_a_page_output import RelatedQnAPageOutput +from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel +from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse from .remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel +from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem +from .remix_image_request_selected_model import RemixImageRequestSelectedModel +from .remove_background_request_selected_model import RemoveBackgroundRequestSelectedModel from .reply_button import ReplyButton -from .response_format_type import ResponseFormatType from .response_model import ResponseModel from .response_model_final_keyword_query import ResponseModelFinalKeywordQuery from .response_model_final_prompt import ResponseModelFinalPrompt @@ -144,32 +195,38 @@ from .run_start import RunStart from .sad_talker_settings import SadTalkerSettings from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess -from .schedulers import Schedulers from .search_reference import SearchReference from .seo_summary_page_output import SeoSummaryPageOutput +from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType from .seo_summary_page_status_response import SeoSummaryPageStatusResponse -from .serp_search_locations import SerpSearchLocations +from .serp_search_location import SerpSearchLocation from .serp_search_type import SerpSearchType from .smart_gpt_page_output import SmartGptPageOutput +from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType from .smart_gpt_page_status_response import SmartGptPageStatusResponse from .social_lookup_email_page_output import SocialLookupEmailPageOutput +from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse +from .speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat +from .speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel +from .speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel from .stream_error import StreamError -from .text2audio_models import Text2AudioModels +from .synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType +from .synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel from .text2audio_page_output import Text2AudioPageOutput from .text2audio_page_status_response import Text2AudioPageStatusResponse -from .text_to_image_models import TextToImageModels from .text_to_speech_page_output import TextToSpeechPageOutput from .text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel from .text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName +from .text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider from .text_to_speech_page_status_response import TextToSpeechPageStatusResponse -from .text_to_speech_providers import TextToSpeechProviders from .training_data_model import TrainingDataModel -from .translation_models import TranslationModels +from .translate_request_selected_model import TranslateRequestSelectedModel from .translation_page_output import TranslationPageOutput from .translation_page_request import TranslationPageRequest +from .translation_page_request_selected_model import TranslationPageRequestSelectedModel from .translation_page_status_response import TranslationPageStatusResponse -from .upscaler_models import UpscalerModels +from .upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem from .validation_error import ValidationError from .validation_error_loc_item import ValidationErrorLocItem from .vcard import Vcard @@ -177,12 +234,19 @@ from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt from .video_bots_page_request import VideoBotsPageRequest +from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel +from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle +from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger +from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName +from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings from .video_bots_page_request_sadtalker_settings_preprocess import VideoBotsPageRequestSadtalkerSettingsPreprocess +from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel +from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider from .video_bots_page_status_response import VideoBotsPageStatusResponse __all__ = [ @@ -190,21 +254,22 @@ "AggFunctionFunction", "AggFunctionResult", "AggFunctionResultFunction", - "AnimationModels", "AnimationPrompt", "AsrChunk", - "AsrModels", - "AsrOutputFormat", "AsrOutputJson", "AsrPageOutput", "AsrPageOutputOutputTextItem", "AsrPageRequest", + "AsrPageRequestOutputFormat", + "AsrPageRequestSelectedModel", + "AsrPageRequestTranslationModel", "AsrPageStatusResponse", "AsyncApiResponseModelV3", "BalanceResponse", "BotBroadcastFilters", "BotBroadcastRequestModel", "BulkEvalPageOutput", + "BulkEvalPageRequestResponseFormatType", "BulkEvalPageStatusResponse", "BulkRunnerPageOutput", "BulkRunnerPageRequest", @@ -217,18 +282,19 @@ "ChyronPlantPageOutput", "ChyronPlantPageRequest", "ChyronPlantPageStatusResponse", - "CitationStyles", - "CombineDocumentsChains", "CompareLlmPageOutput", + "CompareLlmPageRequestResponseFormatType", "CompareLlmPageStatusResponse", "CompareText2ImgPageOutput", + "CompareText2ImgPageRequestScheduler", + "CompareText2ImgPageRequestSelectedModelsItem", "CompareText2ImgPageStatusResponse", "CompareUpscalerPageOutput", "CompareUpscalerPageRequest", + "CompareUpscalerPageRequestSelectedModelsItem", "CompareUpscalerPageStatusResponse", "ConsoleLogs", "ConsoleLogsLevel", - "ControlNetModels", "ConversationEntry", "ConversationEntryContent", "ConversationEntryContentItem", @@ -237,28 +303,47 @@ "ConversationEntryRole", "ConversationStart", "CreateStreamRequest", + "CreateStreamRequestAsrModel", + "CreateStreamRequestCitationStyle", + "CreateStreamRequestEmbeddingModel", + "CreateStreamRequestLipsyncModel", "CreateStreamRequestOpenaiTtsModel", "CreateStreamRequestOpenaiVoiceName", + "CreateStreamRequestResponseFormatType", + "CreateStreamRequestTranslationModel", + "CreateStreamRequestTtsProvider", "CreateStreamResponse", "DeforumSdPageOutput", + "DeforumSdPageRequestSelectedModel", "DeforumSdPageStatusResponse", "DocExtractPageOutput", "DocExtractPageRequest", + "DocExtractPageRequestResponseFormatType", + "DocExtractPageRequestSelectedAsrModel", "DocExtractPageStatusResponse", "DocSearchPageOutput", + "DocSearchPageRequestCitationStyle", + "DocSearchPageRequestEmbeddingModel", "DocSearchPageRequestKeywordQuery", + "DocSearchPageRequestResponseFormatType", "DocSearchPageStatusResponse", "DocSummaryPageOutput", "DocSummaryPageRequest", + "DocSummaryPageRequestResponseFormatType", + "DocSummaryPageRequestSelectedAsrModel", "DocSummaryPageStatusResponse", + "DocSummaryRequestResponseFormatType", + "DocSummaryRequestSelectedAsrModel", "EmailFaceInpaintingPageOutput", + "EmailFaceInpaintingPageRequestSelectedModel", "EmailFaceInpaintingPageStatusResponse", - "EmbeddingModels", "EmbeddingsPageOutput", + "EmbeddingsPageRequestSelectedModel", "EmbeddingsPageStatusResponse", "EvalPrompt", "FaceInpaintingPageOutput", "FaceInpaintingPageRequest", + "FaceInpaintingPageRequestSelectedModel", "FaceInpaintingPageStatusResponse", "FinalResponse", "FunctionsPageOutput", @@ -266,60 +351,86 @@ "GenericErrorResponse", "GenericErrorResponseDetail", "GoogleGptPageOutput", + "GoogleGptPageRequestEmbeddingModel", + "GoogleGptPageRequestResponseFormatType", "GoogleGptPageStatusResponse", "GoogleImageGenPageOutput", + "GoogleImageGenPageRequestSelectedModel", "GoogleImageGenPageStatusResponse", "HttpValidationError", - "ImageSegmentationModels", "ImageSegmentationPageOutput", "ImageSegmentationPageRequest", + "ImageSegmentationPageRequestSelectedModel", "ImageSegmentationPageStatusResponse", - "ImageToImageModels", "ImageUrl", "ImageUrlDetail", "Img2ImgPageOutput", "Img2ImgPageRequest", "Img2ImgPageRequestSelectedControlnetModel", + "Img2ImgPageRequestSelectedControlnetModelItem", + "Img2ImgPageRequestSelectedModel", "Img2ImgPageStatusResponse", - "InpaintingModels", "LargeLanguageModels", "LetterWriterPageOutput", "LetterWriterPageRequest", "LetterWriterPageStatusResponse", - "LipsyncModels", "LipsyncPageOutput", "LipsyncPageRequest", + "LipsyncPageRequestSelectedModel", "LipsyncPageStatusResponse", + "LipsyncRequestSelectedModel", "LipsyncTtsPageOutput", "LipsyncTtsPageRequest", "LipsyncTtsPageRequestOpenaiTtsModel", "LipsyncTtsPageRequestOpenaiVoiceName", + "LipsyncTtsPageRequestSelectedModel", + "LipsyncTtsPageRequestTtsProvider", "LipsyncTtsPageStatusResponse", "LipsyncTtsRequestOpenaiTtsModel", "LipsyncTtsRequestOpenaiVoiceName", + "LipsyncTtsRequestSelectedModel", + "LipsyncTtsRequestTtsProvider", "LlmTools", "MessagePart", "ObjectInpaintingPageOutput", "ObjectInpaintingPageRequest", + "ObjectInpaintingPageRequestSelectedModel", "ObjectInpaintingPageStatusResponse", + "PortraitRequestSelectedModel", + "ProductImageRequestSelectedModel", "PromptTreeNode", "PromptTreeNodePrompt", "QrCodeGeneratorPageOutput", "QrCodeGeneratorPageRequest", + "QrCodeGeneratorPageRequestImagePromptControlnetModelsItem", + "QrCodeGeneratorPageRequestScheduler", + "QrCodeGeneratorPageRequestSelectedControlnetModelItem", + "QrCodeGeneratorPageRequestSelectedModel", "QrCodeGeneratorPageStatusResponse", + "QrCodeRequestImagePromptControlnetModelsItem", + "QrCodeRequestScheduler", + "QrCodeRequestSelectedControlnetModelItem", + "QrCodeRequestSelectedModel", "RecipeFunction", "RecipeFunctionTrigger", "RecipeRunState", "RelatedDocSearchResponse", "RelatedGoogleGptResponse", "RelatedQnADocPageOutput", + "RelatedQnADocPageRequestCitationStyle", + "RelatedQnADocPageRequestEmbeddingModel", "RelatedQnADocPageRequestKeywordQuery", + "RelatedQnADocPageRequestResponseFormatType", "RelatedQnADocPageStatusResponse", "RelatedQnAPageOutput", + "RelatedQnAPageRequestEmbeddingModel", + "RelatedQnAPageRequestResponseFormatType", "RelatedQnAPageStatusResponse", "RemixImageRequestSelectedControlnetModel", + "RemixImageRequestSelectedControlnetModelItem", + "RemixImageRequestSelectedModel", + "RemoveBackgroundRequestSelectedModel", "ReplyButton", - "ResponseFormatType", "ResponseModel", "ResponseModelFinalKeywordQuery", "ResponseModelFinalPrompt", @@ -328,32 +439,38 @@ "RunStart", "SadTalkerSettings", "SadTalkerSettingsPreprocess", - "Schedulers", "SearchReference", "SeoSummaryPageOutput", + "SeoSummaryPageRequestResponseFormatType", "SeoSummaryPageStatusResponse", - "SerpSearchLocations", + "SerpSearchLocation", "SerpSearchType", "SmartGptPageOutput", + "SmartGptPageRequestResponseFormatType", "SmartGptPageStatusResponse", "SocialLookupEmailPageOutput", + "SocialLookupEmailPageRequestResponseFormatType", "SocialLookupEmailPageStatusResponse", + "SpeechRecognitionRequestOutputFormat", + "SpeechRecognitionRequestSelectedModel", + "SpeechRecognitionRequestTranslationModel", "StreamError", - "Text2AudioModels", + "SynthesizeDataRequestResponseFormatType", + "SynthesizeDataRequestSelectedAsrModel", "Text2AudioPageOutput", "Text2AudioPageStatusResponse", - "TextToImageModels", "TextToSpeechPageOutput", "TextToSpeechPageRequestOpenaiTtsModel", "TextToSpeechPageRequestOpenaiVoiceName", + "TextToSpeechPageRequestTtsProvider", "TextToSpeechPageStatusResponse", - "TextToSpeechProviders", "TrainingDataModel", - "TranslationModels", + "TranslateRequestSelectedModel", "TranslationPageOutput", "TranslationPageRequest", + "TranslationPageRequestSelectedModel", "TranslationPageStatusResponse", - "UpscalerModels", + "UpscaleRequestSelectedModelsItem", "ValidationError", "ValidationErrorLocItem", "Vcard", @@ -361,11 +478,18 @@ "VideoBotsPageOutputFinalKeywordQuery", "VideoBotsPageOutputFinalPrompt", "VideoBotsPageRequest", + "VideoBotsPageRequestAsrModel", + "VideoBotsPageRequestCitationStyle", + "VideoBotsPageRequestEmbeddingModel", "VideoBotsPageRequestFunctionsItem", "VideoBotsPageRequestFunctionsItemTrigger", + "VideoBotsPageRequestLipsyncModel", "VideoBotsPageRequestOpenaiTtsModel", "VideoBotsPageRequestOpenaiVoiceName", + "VideoBotsPageRequestResponseFormatType", "VideoBotsPageRequestSadtalkerSettings", "VideoBotsPageRequestSadtalkerSettingsPreprocess", + "VideoBotsPageRequestTranslationModel", + "VideoBotsPageRequestTtsProvider", "VideoBotsPageStatusResponse", ] diff --git a/src/gooey/types/animation_models.py b/src/gooey/types/animation_models.py deleted file mode 100644 index 8ad7a84..0000000 --- a/src/gooey/types/animation_models.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AnimationModels = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any] diff --git a/src/gooey/types/asr_output_format.py b/src/gooey/types/asr_output_format.py deleted file mode 100644 index b3b0e2d..0000000 --- a/src/gooey/types/asr_output_format.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -AsrOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py index 1210679..1d35181 100644 --- a/src/gooey/types/asr_page_request.py +++ b/src/gooey/types/asr_page_request.py @@ -4,9 +4,9 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .asr_models import AsrModels -from .translation_models import TranslationModels -from .asr_output_format import AsrOutputFormat +from .asr_page_request_selected_model import AsrPageRequestSelectedModel +from .asr_page_request_translation_model import AsrPageRequestTranslationModel +from .asr_page_request_output_format import AsrPageRequestOutputFormat from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -19,10 +19,10 @@ class AsrPageRequest(UniversalBaseModel): """ documents: typing.List[str] - selected_model: typing.Optional[AsrModels] = None + selected_model: typing.Optional[AsrPageRequestSelectedModel] = None language: typing.Optional[str] = None - translation_model: typing.Optional[TranslationModels] = None - output_format: typing.Optional[AsrOutputFormat] = None + translation_model: typing.Optional[AsrPageRequestTranslationModel] = None + output_format: typing.Optional[AsrPageRequestOutputFormat] = None google_translate_target: typing.Optional[str] = pydantic.Field(default=None) """ use `translation_model` & `translation_target` instead. diff --git a/src/gooey/types/asr_page_request_output_format.py b/src/gooey/types/asr_page_request_output_format.py new file mode 100644 index 0000000..101e681 --- /dev/null +++ b/src/gooey/types/asr_page_request_output_format.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsrPageRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/asr_models.py b/src/gooey/types/asr_page_request_selected_model.py similarity index 91% rename from src/gooey/types/asr_models.py rename to src/gooey/types/asr_page_request_selected_model.py index 16c222a..4e80d3c 100644 --- a/src/gooey/types/asr_models.py +++ b/src/gooey/types/asr_page_request_selected_model.py @@ -2,7 +2,7 @@ import typing -AsrModels = typing.Union[ +AsrPageRequestSelectedModel = typing.Union[ typing.Literal[ "whisper_large_v2", "whisper_large_v3", diff --git a/src/gooey/types/asr_page_request_translation_model.py b/src/gooey/types/asr_page_request_translation_model.py new file mode 100644 index 0000000..d5dcef6 --- /dev/null +++ b/src/gooey/types/asr_page_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +AsrPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/bulk_eval_page_request_response_format_type.py b/src/gooey/types/bulk_eval_page_request_response_format_type.py new file mode 100644 index 0000000..f1c242f --- /dev/null +++ b/src/gooey/types/bulk_eval_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +BulkEvalPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/combine_documents_chains.py b/src/gooey/types/combine_documents_chains.py deleted file mode 100644 index c457e00..0000000 --- a/src/gooey/types/combine_documents_chains.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -CombineDocumentsChains = typing.Literal["map_reduce"] diff --git a/src/gooey/types/compare_llm_page_request_response_format_type.py b/src/gooey/types/compare_llm_page_request_response_format_type.py new file mode 100644 index 0000000..a846068 --- /dev/null +++ b/src/gooey/types/compare_llm_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CompareLlmPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/compare_text2img_page_request_scheduler.py b/src/gooey/types/compare_text2img_page_request_scheduler.py new file mode 100644 index 0000000..29ce840 --- /dev/null +++ b/src/gooey/types/compare_text2img_page_request_scheduler.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CompareText2ImgPageRequestScheduler = typing.Union[ + typing.Literal[ + "singlestep_dpm_solver", + "multistep_dpm_solver", + "dpm_sde", + "dpm_discrete", + "dpm_discrete_ancestral", + "unipc", + "lms_discrete", + "heun", + "euler", + "euler_ancestral", + "pndm", + "ddpm", + "ddim", + "deis", + ], + typing.Any, +] diff --git a/src/gooey/types/compare_text2img_page_request_selected_models_item.py b/src/gooey/types/compare_text2img_page_request_selected_models_item.py new file mode 100644 index 0000000..4154491 --- /dev/null +++ b/src/gooey/types/compare_text2img_page_request_selected_models_item.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CompareText2ImgPageRequestSelectedModelsItem = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "dall_e_3", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + "deepfloyd_if", + ], + typing.Any, +] diff --git a/src/gooey/types/compare_upscaler_page_request.py b/src/gooey/types/compare_upscaler_page_request.py index 849d668..8cfb4e7 100644 --- a/src/gooey/types/compare_upscaler_page_request.py +++ b/src/gooey/types/compare_upscaler_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .upscaler_models import UpscalerModels +from .compare_upscaler_page_request_selected_models_item import CompareUpscalerPageRequestSelectedModelsItem from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -23,7 +23,7 @@ class CompareUpscalerPageRequest(UniversalBaseModel): The final upsampling scale of the image """ - selected_models: typing.Optional[typing.List[UpscalerModels]] = None + selected_models: typing.Optional[typing.List[CompareUpscalerPageRequestSelectedModelsItem]] = None selected_bg_model: typing.Optional[typing.Literal["real_esrgan_x2"]] = None settings: typing.Optional[RunSettings] = None diff --git a/src/gooey/types/compare_upscaler_page_request_selected_models_item.py b/src/gooey/types/compare_upscaler_page_request_selected_models_item.py new file mode 100644 index 0000000..eff4f6e --- /dev/null +++ b/src/gooey/types/compare_upscaler_page_request_selected_models_item.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CompareUpscalerPageRequestSelectedModelsItem = typing.Union[ + typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any +] diff --git a/src/gooey/types/create_stream_request.py b/src/gooey/types/create_stream_request.py index bf4541f..2d4745b 100644 --- a/src/gooey/types/create_stream_request.py +++ b/src/gooey/types/create_stream_request.py @@ -7,14 +7,14 @@ from .recipe_function import RecipeFunction from .conversation_entry import ConversationEntry from .large_language_models import LargeLanguageModels -from .embedding_models import EmbeddingModels -from .citation_styles import CitationStyles -from .asr_models import AsrModels -from .translation_models import TranslationModels -from .lipsync_models import LipsyncModels +from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel +from .create_stream_request_citation_style import CreateStreamRequestCitationStyle +from .create_stream_request_asr_model import CreateStreamRequestAsrModel +from .create_stream_request_translation_model import CreateStreamRequestTranslationModel +from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel from .llm_tools import LlmTools -from .response_format_type import ResponseFormatType -from .text_to_speech_providers import TextToSpeechProviders +from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType +from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel from .sad_talker_settings import SadTalkerSettings @@ -85,16 +85,16 @@ class CreateStreamRequest(UniversalBaseModel): max_references: typing.Optional[int] = None max_context_words: typing.Optional[int] = None scroll_jump: typing.Optional[int] = None - embedding_model: typing.Optional[EmbeddingModels] = None + embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = None dense_weight: typing.Optional[float] = pydantic.Field(default=None) """ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. """ - citation_style: typing.Optional[CitationStyles] = None + citation_style: typing.Optional[CreateStreamRequestCitationStyle] = None use_url_shortener: typing.Optional[bool] = None - asr_model: typing.Optional[AsrModels] = pydantic.Field(default=None) + asr_model: typing.Optional[CreateStreamRequestAsrModel] = pydantic.Field(default=None) """ Choose a model to transcribe incoming audio messages to text. """ @@ -104,7 +104,7 @@ class CreateStreamRequest(UniversalBaseModel): Choose a language to transcribe incoming audio messages to text. """ - translation_model: typing.Optional[TranslationModels] = None + translation_model: typing.Optional[CreateStreamRequestTranslationModel] = None user_language: typing.Optional[str] = pydantic.Field(default=None) """ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -120,7 +120,7 @@ class CreateStreamRequest(UniversalBaseModel): Translation Glossary for LLM Language (English) -> User Langauge """ - lipsync_model: typing.Optional[LipsyncModels] = None + lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = None tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None) """ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -131,8 +131,8 @@ class CreateStreamRequest(UniversalBaseModel): quality: typing.Optional[float] = None max_tokens: typing.Optional[int] = None sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[ResponseFormatType] = None - tts_provider: typing.Optional[TextToSpeechProviders] = None + response_format_type: typing.Optional[CreateStreamRequestResponseFormatType] = None + tts_provider: typing.Optional[CreateStreamRequestTtsProvider] = None uberduck_voice_name: typing.Optional[str] = None uberduck_speaking_rate: typing.Optional[float] = None google_voice_name: typing.Optional[str] = None diff --git a/src/gooey/types/create_stream_request_asr_model.py b/src/gooey/types/create_stream_request_asr_model.py new file mode 100644 index 0000000..af166fa --- /dev/null +++ b/src/gooey/types/create_stream_request_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateStreamRequestAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/citation_styles.py b/src/gooey/types/create_stream_request_citation_style.py similarity index 90% rename from src/gooey/types/citation_styles.py rename to src/gooey/types/create_stream_request_citation_style.py index 4d822c2..e57bab1 100644 --- a/src/gooey/types/citation_styles.py +++ b/src/gooey/types/create_stream_request_citation_style.py @@ -2,7 +2,7 @@ import typing -CitationStyles = typing.Union[ +CreateStreamRequestCitationStyle = typing.Union[ typing.Literal[ "number", "title", diff --git a/src/gooey/types/embedding_models.py b/src/gooey/types/create_stream_request_embedding_model.py similarity index 87% rename from src/gooey/types/embedding_models.py rename to src/gooey/types/create_stream_request_embedding_model.py index 8007d2d..cef26bf 100644 --- a/src/gooey/types/embedding_models.py +++ b/src/gooey/types/create_stream_request_embedding_model.py @@ -2,7 +2,7 @@ import typing -EmbeddingModels = typing.Union[ +CreateStreamRequestEmbeddingModel = typing.Union[ typing.Literal[ "openai_3_large", "openai_3_small", diff --git a/src/gooey/types/create_stream_request_lipsync_model.py b/src/gooey/types/create_stream_request_lipsync_model.py new file mode 100644 index 0000000..c207d45 --- /dev/null +++ b/src/gooey/types/create_stream_request_lipsync_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateStreamRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/create_stream_request_response_format_type.py b/src/gooey/types/create_stream_request_response_format_type.py new file mode 100644 index 0000000..dc5024d --- /dev/null +++ b/src/gooey/types/create_stream_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateStreamRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/create_stream_request_translation_model.py b/src/gooey/types/create_stream_request_translation_model.py new file mode 100644 index 0000000..3876937 --- /dev/null +++ b/src/gooey/types/create_stream_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateStreamRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/create_stream_request_tts_provider.py b/src/gooey/types/create_stream_request_tts_provider.py new file mode 100644 index 0000000..cad602d --- /dev/null +++ b/src/gooey/types/create_stream_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +CreateStreamRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +] diff --git a/src/gooey/types/deforum_sd_page_request_selected_model.py b/src/gooey/types/deforum_sd_page_request_selected_model.py new file mode 100644 index 0000000..3af657a --- /dev/null +++ b/src/gooey/types/deforum_sd_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DeforumSdPageRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any] diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py index 4beeb94..9690c6c 100644 --- a/src/gooey/types/doc_extract_page_request.py +++ b/src/gooey/types/doc_extract_page_request.py @@ -4,9 +4,9 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .asr_models import AsrModels +from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel from .large_language_models import LargeLanguageModels -from .response_format_type import ResponseFormatType +from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -20,7 +20,7 @@ class DocExtractPageRequest(UniversalBaseModel): documents: typing.List[str] sheet_url: typing.Optional[str] = None - selected_asr_model: typing.Optional[AsrModels] = None + selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None google_translate_target: typing.Optional[str] = None glossary_document: typing.Optional[str] = None task_instructions: typing.Optional[str] = None @@ -30,7 +30,7 @@ class DocExtractPageRequest(UniversalBaseModel): quality: typing.Optional[float] = None max_tokens: typing.Optional[int] = None sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[ResponseFormatType] = None + response_format_type: typing.Optional[DocExtractPageRequestResponseFormatType] = None settings: typing.Optional[RunSettings] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/doc_extract_page_request_response_format_type.py b/src/gooey/types/doc_extract_page_request_response_format_type.py new file mode 100644 index 0000000..0ad7c14 --- /dev/null +++ b/src/gooey/types/doc_extract_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocExtractPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_extract_page_request_selected_asr_model.py b/src/gooey/types/doc_extract_page_request_selected_asr_model.py new file mode 100644 index 0000000..a358400 --- /dev/null +++ b/src/gooey/types/doc_extract_page_request_selected_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocExtractPageRequestSelectedAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/doc_search_page_request_citation_style.py b/src/gooey/types/doc_search_page_request_citation_style.py new file mode 100644 index 0000000..b47b3be --- /dev/null +++ b/src/gooey/types/doc_search_page_request_citation_style.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSearchPageRequestCitationStyle = typing.Union[ + typing.Literal[ + "number", + "title", + "url", + "symbol", + "markdown", + "html", + "slack_mrkdwn", + "plaintext", + "number_markdown", + "number_html", + "number_slack_mrkdwn", + "number_plaintext", + "symbol_markdown", + "symbol_html", + "symbol_slack_mrkdwn", + "symbol_plaintext", + ], + typing.Any, +] diff --git a/src/gooey/types/doc_search_page_request_embedding_model.py b/src/gooey/types/doc_search_page_request_embedding_model.py new file mode 100644 index 0000000..fb35612 --- /dev/null +++ b/src/gooey/types/doc_search_page_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSearchPageRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/doc_search_page_request_response_format_type.py b/src/gooey/types/doc_search_page_request_response_format_type.py new file mode 100644 index 0000000..856b641 --- /dev/null +++ b/src/gooey/types/doc_search_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSearchPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py index dadd11a..466ddc1 100644 --- a/src/gooey/types/doc_summary_page_request.py +++ b/src/gooey/types/doc_summary_page_request.py @@ -5,9 +5,8 @@ from .recipe_function import RecipeFunction import pydantic from .large_language_models import LargeLanguageModels -from .combine_documents_chains import CombineDocumentsChains -from .asr_models import AsrModels -from .response_format_type import ResponseFormatType +from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel +from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -23,15 +22,15 @@ class DocSummaryPageRequest(UniversalBaseModel): task_instructions: typing.Optional[str] = None merge_instructions: typing.Optional[str] = None selected_model: typing.Optional[LargeLanguageModels] = None - chain_type: typing.Optional[CombineDocumentsChains] = None - selected_asr_model: typing.Optional[AsrModels] = None + chain_type: typing.Optional[typing.Literal["map_reduce"]] = None + selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None google_translate_target: typing.Optional[str] = None avoid_repetition: typing.Optional[bool] = None num_outputs: typing.Optional[int] = None quality: typing.Optional[float] = None max_tokens: typing.Optional[int] = None sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[ResponseFormatType] = None + response_format_type: typing.Optional[DocSummaryPageRequestResponseFormatType] = None settings: typing.Optional[RunSettings] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/doc_summary_page_request_response_format_type.py b/src/gooey/types/doc_summary_page_request_response_format_type.py new file mode 100644 index 0000000..318ad7f --- /dev/null +++ b/src/gooey/types/doc_summary_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_page_request_selected_asr_model.py b/src/gooey/types/doc_summary_page_request_selected_asr_model.py new file mode 100644 index 0000000..c04cc7a --- /dev/null +++ b/src/gooey/types/doc_summary_page_request_selected_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryPageRequestSelectedAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/doc_summary_request_response_format_type.py b/src/gooey/types/doc_summary_request_response_format_type.py new file mode 100644 index 0000000..8fabf9b --- /dev/null +++ b/src/gooey/types/doc_summary_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/doc_summary_request_selected_asr_model.py b/src/gooey/types/doc_summary_request_selected_asr_model.py new file mode 100644 index 0000000..8b8a338 --- /dev/null +++ b/src/gooey/types/doc_summary_request_selected_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +DocSummaryRequestSelectedAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/email_face_inpainting_page_request_selected_model.py b/src/gooey/types/email_face_inpainting_page_request_selected_model.py new file mode 100644 index 0000000..822b5a6 --- /dev/null +++ b/src/gooey/types/email_face_inpainting_page_request_selected_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EmailFaceInpaintingPageRequestSelectedModel = typing.Union[ + typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any +] diff --git a/src/gooey/types/embeddings_page_request_selected_model.py b/src/gooey/types/embeddings_page_request_selected_model.py new file mode 100644 index 0000000..a03ecc8 --- /dev/null +++ b/src/gooey/types/embeddings_page_request_selected_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +EmbeddingsPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/face_inpainting_page_request.py b/src/gooey/types/face_inpainting_page_request.py index 8e88a4f..a653205 100644 --- a/src/gooey/types/face_inpainting_page_request.py +++ b/src/gooey/types/face_inpainting_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .inpainting_models import InpaintingModels +from .face_inpainting_page_request_selected_model import FaceInpaintingPageRequestSelectedModel from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -21,7 +21,7 @@ class FaceInpaintingPageRequest(UniversalBaseModel): face_scale: typing.Optional[float] = None face_pos_x: typing.Optional[float] = None face_pos_y: typing.Optional[float] = None - selected_model: typing.Optional[InpaintingModels] = None + selected_model: typing.Optional[FaceInpaintingPageRequestSelectedModel] = None negative_prompt: typing.Optional[str] = None num_outputs: typing.Optional[int] = None quality: typing.Optional[int] = None diff --git a/src/gooey/types/face_inpainting_page_request_selected_model.py b/src/gooey/types/face_inpainting_page_request_selected_model.py new file mode 100644 index 0000000..9b8eab6 --- /dev/null +++ b/src/gooey/types/face_inpainting_page_request_selected_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +FaceInpaintingPageRequestSelectedModel = typing.Union[ + typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any +] diff --git a/src/gooey/types/google_gpt_page_request_embedding_model.py b/src/gooey/types/google_gpt_page_request_embedding_model.py new file mode 100644 index 0000000..66f060f --- /dev/null +++ b/src/gooey/types/google_gpt_page_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleGptPageRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/google_gpt_page_request_response_format_type.py b/src/gooey/types/google_gpt_page_request_response_format_type.py new file mode 100644 index 0000000..dd04dec --- /dev/null +++ b/src/gooey/types/google_gpt_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/google_image_gen_page_request_selected_model.py b/src/gooey/types/google_image_gen_page_request_selected_model.py new file mode 100644 index 0000000..c872962 --- /dev/null +++ b/src/gooey/types/google_image_gen_page_request_selected_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +GoogleImageGenPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "instruct_pix2pix", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + ], + typing.Any, +] diff --git a/src/gooey/types/image_segmentation_models.py b/src/gooey/types/image_segmentation_models.py deleted file mode 100644 index aae4fee..0000000 --- a/src/gooey/types/image_segmentation_models.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ImageSegmentationModels = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/image_segmentation_page_request.py b/src/gooey/types/image_segmentation_page_request.py index 3e1952c..a2ea60d 100644 --- a/src/gooey/types/image_segmentation_page_request.py +++ b/src/gooey/types/image_segmentation_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .image_segmentation_models import ImageSegmentationModels +from .image_segmentation_page_request_selected_model import ImageSegmentationPageRequestSelectedModel from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -17,7 +17,7 @@ class ImageSegmentationPageRequest(UniversalBaseModel): """ input_image: str - selected_model: typing.Optional[ImageSegmentationModels] = None + selected_model: typing.Optional[ImageSegmentationPageRequestSelectedModel] = None mask_threshold: typing.Optional[float] = None rect_persepective_transform: typing.Optional[bool] = None reflection_opacity: typing.Optional[float] = None diff --git a/src/gooey/types/image_segmentation_page_request_selected_model.py b/src/gooey/types/image_segmentation_page_request_selected_model.py new file mode 100644 index 0000000..9b4b8d7 --- /dev/null +++ b/src/gooey/types/image_segmentation_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ImageSegmentationPageRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/img2img_page_request.py b/src/gooey/types/img2img_page_request.py index 2b689aa..f3cfd2f 100644 --- a/src/gooey/types/img2img_page_request.py +++ b/src/gooey/types/img2img_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .image_to_image_models import ImageToImageModels +from .img2img_page_request_selected_model import Img2ImgPageRequestSelectedModel from .img2img_page_request_selected_controlnet_model import Img2ImgPageRequestSelectedControlnetModel from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -19,7 +19,7 @@ class Img2ImgPageRequest(UniversalBaseModel): input_image: str text_prompt: typing.Optional[str] = None - selected_model: typing.Optional[ImageToImageModels] = None + selected_model: typing.Optional[Img2ImgPageRequestSelectedModel] = None selected_controlnet_model: typing.Optional[Img2ImgPageRequestSelectedControlnetModel] = None negative_prompt: typing.Optional[str] = None num_outputs: typing.Optional[int] = None diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model.py b/src/gooey/types/img2img_page_request_selected_controlnet_model.py index 514d737..df9cb36 100644 --- a/src/gooey/types/img2img_page_request_selected_controlnet_model.py +++ b/src/gooey/types/img2img_page_request_selected_controlnet_model.py @@ -1,6 +1,19 @@ # This file was auto-generated by Fern from our API Definition. import typing -from .control_net_models import ControlNetModels +from .img2img_page_request_selected_controlnet_model_item import Img2ImgPageRequestSelectedControlnetModelItem -Img2ImgPageRequestSelectedControlnetModel = typing.Union[typing.List[ControlNetModels], ControlNetModels] +Img2ImgPageRequestSelectedControlnetModel = typing.Union[ + typing.List[Img2ImgPageRequestSelectedControlnetModelItem], + typing.Literal["sd_controlnet_canny"], + typing.Literal["sd_controlnet_depth"], + typing.Literal["sd_controlnet_hed"], + typing.Literal["sd_controlnet_mlsd"], + typing.Literal["sd_controlnet_normal"], + typing.Literal["sd_controlnet_openpose"], + typing.Literal["sd_controlnet_scribble"], + typing.Literal["sd_controlnet_seg"], + typing.Literal["sd_controlnet_tile"], + typing.Literal["sd_controlnet_brightness"], + typing.Literal["control_v1p_sd15_qrcode_monster_v2"], +] diff --git a/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py b/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py new file mode 100644 index 0000000..1569cf5 --- /dev/null +++ b/src/gooey/types/img2img_page_request_selected_controlnet_model_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Img2ImgPageRequestSelectedControlnetModelItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/img2img_page_request_selected_model.py b/src/gooey/types/img2img_page_request_selected_model.py new file mode 100644 index 0000000..506c2b1 --- /dev/null +++ b/src/gooey/types/img2img_page_request_selected_model.py @@ -0,0 +1,21 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +Img2ImgPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "instruct_pix2pix", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + ], + typing.Any, +] diff --git a/src/gooey/types/inpainting_models.py b/src/gooey/types/inpainting_models.py deleted file mode 100644 index f851858..0000000 --- a/src/gooey/types/inpainting_models.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -InpaintingModels = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/lipsync_models.py b/src/gooey/types/lipsync_models.py deleted file mode 100644 index 0ee41ee..0000000 --- a/src/gooey/types/lipsync_models.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -LipsyncModels = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py index 5ea1d3f..2914a1e 100644 --- a/src/gooey/types/lipsync_page_request.py +++ b/src/gooey/types/lipsync_page_request.py @@ -5,7 +5,7 @@ from .recipe_function import RecipeFunction import pydantic from .sad_talker_settings import SadTalkerSettings -from .lipsync_models import LipsyncModels +from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -23,7 +23,7 @@ class LipsyncPageRequest(UniversalBaseModel): face_padding_left: typing.Optional[int] = None face_padding_right: typing.Optional[int] = None sadtalker_settings: typing.Optional[SadTalkerSettings] = None - selected_model: typing.Optional[LipsyncModels] = None + selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None input_audio: typing.Optional[str] = None settings: typing.Optional[RunSettings] = None diff --git a/src/gooey/types/lipsync_page_request_selected_model.py b/src/gooey/types/lipsync_page_request_selected_model.py new file mode 100644 index 0000000..da68ef8 --- /dev/null +++ b/src/gooey/types/lipsync_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/lipsync_request_selected_model.py b/src/gooey/types/lipsync_request_selected_model.py new file mode 100644 index 0000000..c5614b4 --- /dev/null +++ b/src/gooey/types/lipsync_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py index 9c90696..f4f5293 100644 --- a/src/gooey/types/lipsync_tts_page_request.py +++ b/src/gooey/types/lipsync_tts_page_request.py @@ -4,11 +4,11 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .text_to_speech_providers import TextToSpeechProviders +from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel from .sad_talker_settings import SadTalkerSettings -from .lipsync_models import LipsyncModels +from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -21,7 +21,7 @@ class LipsyncTtsPageRequest(UniversalBaseModel): """ text_prompt: str - tts_provider: typing.Optional[TextToSpeechProviders] = None + tts_provider: typing.Optional[LipsyncTtsPageRequestTtsProvider] = None uberduck_voice_name: typing.Optional[str] = None uberduck_speaking_rate: typing.Optional[float] = None google_voice_name: typing.Optional[str] = None @@ -49,7 +49,7 @@ class LipsyncTtsPageRequest(UniversalBaseModel): face_padding_left: typing.Optional[int] = None face_padding_right: typing.Optional[int] = None sadtalker_settings: typing.Optional[SadTalkerSettings] = None - selected_model: typing.Optional[LipsyncModels] = None + selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None settings: typing.Optional[RunSettings] = None if IS_PYDANTIC_V2: diff --git a/src/gooey/types/lipsync_tts_page_request_selected_model.py b/src/gooey/types/lipsync_tts_page_request_selected_model.py new file mode 100644 index 0000000..538058b --- /dev/null +++ b/src/gooey/types/lipsync_tts_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/lipsync_tts_page_request_tts_provider.py b/src/gooey/types/lipsync_tts_page_request_tts_provider.py new file mode 100644 index 0000000..7e73fda --- /dev/null +++ b/src/gooey/types/lipsync_tts_page_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsPageRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +] diff --git a/src/gooey/types/lipsync_tts_request_selected_model.py b/src/gooey/types/lipsync_tts_request_selected_model.py new file mode 100644 index 0000000..9ece5a9 --- /dev/null +++ b/src/gooey/types/lipsync_tts_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +LipsyncTtsRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/text_to_speech_providers.py b/src/gooey/types/lipsync_tts_request_tts_provider.py similarity index 80% rename from src/gooey/types/text_to_speech_providers.py rename to src/gooey/types/lipsync_tts_request_tts_provider.py index f86047f..1a23fe3 100644 --- a/src/gooey/types/text_to_speech_providers.py +++ b/src/gooey/types/lipsync_tts_request_tts_provider.py @@ -2,6 +2,6 @@ import typing -TextToSpeechProviders = typing.Union[ +LipsyncTtsRequestTtsProvider = typing.Union[ typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any ] diff --git a/src/gooey/types/object_inpainting_page_request.py b/src/gooey/types/object_inpainting_page_request.py index 54d23b5..50b5b72 100644 --- a/src/gooey/types/object_inpainting_page_request.py +++ b/src/gooey/types/object_inpainting_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .inpainting_models import InpaintingModels +from .object_inpainting_page_request_selected_model import ObjectInpaintingPageRequestSelectedModel from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -22,7 +22,7 @@ class ObjectInpaintingPageRequest(UniversalBaseModel): obj_pos_x: typing.Optional[float] = None obj_pos_y: typing.Optional[float] = None mask_threshold: typing.Optional[float] = None - selected_model: typing.Optional[InpaintingModels] = None + selected_model: typing.Optional[ObjectInpaintingPageRequestSelectedModel] = None negative_prompt: typing.Optional[str] = None num_outputs: typing.Optional[int] = None quality: typing.Optional[int] = None diff --git a/src/gooey/types/object_inpainting_page_request_selected_model.py b/src/gooey/types/object_inpainting_page_request_selected_model.py new file mode 100644 index 0000000..92f1302 --- /dev/null +++ b/src/gooey/types/object_inpainting_page_request_selected_model.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ObjectInpaintingPageRequestSelectedModel = typing.Union[ + typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any +] diff --git a/src/gooey/types/portrait_request_selected_model.py b/src/gooey/types/portrait_request_selected_model.py new file mode 100644 index 0000000..6c4a5ce --- /dev/null +++ b/src/gooey/types/portrait_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +PortraitRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/product_image_request_selected_model.py b/src/gooey/types/product_image_request_selected_model.py new file mode 100644 index 0000000..f1ce039 --- /dev/null +++ b/src/gooey/types/product_image_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +ProductImageRequestSelectedModel = typing.Union[typing.Literal["sd_2", "runway_ml", "dall_e", "jack_qiao"], typing.Any] diff --git a/src/gooey/types/qr_code_generator_page_request.py b/src/gooey/types/qr_code_generator_page_request.py index 60831cb..68f3730 100644 --- a/src/gooey/types/qr_code_generator_page_request.py +++ b/src/gooey/types/qr_code_generator_page_request.py @@ -5,9 +5,14 @@ from .recipe_function import RecipeFunction import pydantic from .vcard import Vcard -from .control_net_models import ControlNetModels -from .text_to_image_models import TextToImageModels -from .schedulers import Schedulers +from .qr_code_generator_page_request_image_prompt_controlnet_models_item import ( + QrCodeGeneratorPageRequestImagePromptControlnetModelsItem, +) +from .qr_code_generator_page_request_selected_model import QrCodeGeneratorPageRequestSelectedModel +from .qr_code_generator_page_request_selected_controlnet_model_item import ( + QrCodeGeneratorPageRequestSelectedControlnetModelItem, +) +from .qr_code_generator_page_request_scheduler import QrCodeGeneratorPageRequestScheduler from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -27,20 +32,24 @@ class QrCodeGeneratorPageRequest(UniversalBaseModel): text_prompt: str negative_prompt: typing.Optional[str] = None image_prompt: typing.Optional[str] = None - image_prompt_controlnet_models: typing.Optional[typing.List[ControlNetModels]] = None + image_prompt_controlnet_models: typing.Optional[ + typing.List[QrCodeGeneratorPageRequestImagePromptControlnetModelsItem] + ] = None image_prompt_strength: typing.Optional[float] = None image_prompt_scale: typing.Optional[float] = None image_prompt_pos_x: typing.Optional[float] = None image_prompt_pos_y: typing.Optional[float] = None - selected_model: typing.Optional[TextToImageModels] = None - selected_controlnet_model: typing.Optional[typing.List[ControlNetModels]] = None + selected_model: typing.Optional[QrCodeGeneratorPageRequestSelectedModel] = None + selected_controlnet_model: typing.Optional[typing.List[QrCodeGeneratorPageRequestSelectedControlnetModelItem]] = ( + None + ) output_width: typing.Optional[int] = None output_height: typing.Optional[int] = None guidance_scale: typing.Optional[float] = None controlnet_conditioning_scale: typing.Optional[typing.List[float]] = None num_outputs: typing.Optional[int] = None quality: typing.Optional[int] = None - scheduler: typing.Optional[Schedulers] = None + scheduler: typing.Optional[QrCodeGeneratorPageRequestScheduler] = None seed: typing.Optional[int] = None obj_scale: typing.Optional[float] = None obj_pos_x: typing.Optional[float] = None diff --git a/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py new file mode 100644 index 0000000..508e7e9 --- /dev/null +++ b/src/gooey/types/qr_code_generator_page_request_image_prompt_controlnet_models_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeGeneratorPageRequestImagePromptControlnetModelsItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/qr_code_generator_page_request_scheduler.py b/src/gooey/types/qr_code_generator_page_request_scheduler.py new file mode 100644 index 0000000..e30308a --- /dev/null +++ b/src/gooey/types/qr_code_generator_page_request_scheduler.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeGeneratorPageRequestScheduler = typing.Union[ + typing.Literal[ + "singlestep_dpm_solver", + "multistep_dpm_solver", + "dpm_sde", + "dpm_discrete", + "dpm_discrete_ancestral", + "unipc", + "lms_discrete", + "heun", + "euler", + "euler_ancestral", + "pndm", + "ddpm", + "ddim", + "deis", + ], + typing.Any, +] diff --git a/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py b/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py new file mode 100644 index 0000000..c6f1967 --- /dev/null +++ b/src/gooey/types/qr_code_generator_page_request_selected_controlnet_model_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeGeneratorPageRequestSelectedControlnetModelItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/qr_code_generator_page_request_selected_model.py b/src/gooey/types/qr_code_generator_page_request_selected_model.py new file mode 100644 index 0000000..97282cb --- /dev/null +++ b/src/gooey/types/qr_code_generator_page_request_selected_model.py @@ -0,0 +1,22 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeGeneratorPageRequestSelectedModel = typing.Union[ + typing.Literal[ + "dream_shaper", + "dreamlike_2", + "sd_2", + "sd_1_5", + "dall_e", + "dall_e_3", + "openjourney_2", + "openjourney", + "analog_diffusion", + "protogen_5_3", + "jack_qiao", + "rodent_diffusion_1_5", + "deepfloyd_if", + ], + typing.Any, +] diff --git a/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py new file mode 100644 index 0000000..3be2ab6 --- /dev/null +++ b/src/gooey/types/qr_code_request_image_prompt_controlnet_models_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +QrCodeRequestImagePromptControlnetModelsItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/schedulers.py b/src/gooey/types/qr_code_request_scheduler.py similarity index 91% rename from src/gooey/types/schedulers.py rename to src/gooey/types/qr_code_request_scheduler.py index d3b5398..890b204 100644 --- a/src/gooey/types/schedulers.py +++ b/src/gooey/types/qr_code_request_scheduler.py @@ -2,7 +2,7 @@ import typing -Schedulers = typing.Union[ +QrCodeRequestScheduler = typing.Union[ typing.Literal[ "singlestep_dpm_solver", "multistep_dpm_solver", diff --git a/src/gooey/types/control_net_models.py b/src/gooey/types/qr_code_request_selected_controlnet_model_item.py similarity index 89% rename from src/gooey/types/control_net_models.py rename to src/gooey/types/qr_code_request_selected_controlnet_model_item.py index 5c5f68a..c5cdc8d 100644 --- a/src/gooey/types/control_net_models.py +++ b/src/gooey/types/qr_code_request_selected_controlnet_model_item.py @@ -2,7 +2,7 @@ import typing -ControlNetModels = typing.Union[ +QrCodeRequestSelectedControlnetModelItem = typing.Union[ typing.Literal[ "sd_controlnet_canny", "sd_controlnet_depth", diff --git a/src/gooey/types/text_to_image_models.py b/src/gooey/types/qr_code_request_selected_model.py similarity index 90% rename from src/gooey/types/text_to_image_models.py rename to src/gooey/types/qr_code_request_selected_model.py index fd17514..7ea963c 100644 --- a/src/gooey/types/text_to_image_models.py +++ b/src/gooey/types/qr_code_request_selected_model.py @@ -2,7 +2,7 @@ import typing -TextToImageModels = typing.Union[ +QrCodeRequestSelectedModel = typing.Union[ typing.Literal[ "dream_shaper", "dreamlike_2", diff --git a/src/gooey/types/related_qn_a_doc_page_request_citation_style.py b/src/gooey/types/related_qn_a_doc_page_request_citation_style.py new file mode 100644 index 0000000..b98f002 --- /dev/null +++ b/src/gooey/types/related_qn_a_doc_page_request_citation_style.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnADocPageRequestCitationStyle = typing.Union[ + typing.Literal[ + "number", + "title", + "url", + "symbol", + "markdown", + "html", + "slack_mrkdwn", + "plaintext", + "number_markdown", + "number_html", + "number_slack_mrkdwn", + "number_plaintext", + "symbol_markdown", + "symbol_html", + "symbol_slack_mrkdwn", + "symbol_plaintext", + ], + typing.Any, +] diff --git a/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py b/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py new file mode 100644 index 0000000..680bbb5 --- /dev/null +++ b/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnADocPageRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py b/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py new file mode 100644 index 0000000..c65a896 --- /dev/null +++ b/src/gooey/types/related_qn_a_doc_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnADocPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/related_qn_a_page_request_embedding_model.py b/src/gooey/types/related_qn_a_page_request_embedding_model.py new file mode 100644 index 0000000..a591920 --- /dev/null +++ b/src/gooey/types/related_qn_a_page_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnAPageRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/related_qn_a_page_request_response_format_type.py b/src/gooey/types/related_qn_a_page_request_response_format_type.py new file mode 100644 index 0000000..7bada87 --- /dev/null +++ b/src/gooey/types/related_qn_a_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RelatedQnAPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model.py b/src/gooey/types/remix_image_request_selected_controlnet_model.py index 521d9c8..eea207f 100644 --- a/src/gooey/types/remix_image_request_selected_controlnet_model.py +++ b/src/gooey/types/remix_image_request_selected_controlnet_model.py @@ -1,6 +1,19 @@ # This file was auto-generated by Fern from our API Definition. import typing -from .control_net_models import ControlNetModels +from .remix_image_request_selected_controlnet_model_item import RemixImageRequestSelectedControlnetModelItem -RemixImageRequestSelectedControlnetModel = typing.Union[typing.List[ControlNetModels], ControlNetModels] +RemixImageRequestSelectedControlnetModel = typing.Union[ + typing.List[RemixImageRequestSelectedControlnetModelItem], + typing.Literal["sd_controlnet_canny"], + typing.Literal["sd_controlnet_depth"], + typing.Literal["sd_controlnet_hed"], + typing.Literal["sd_controlnet_mlsd"], + typing.Literal["sd_controlnet_normal"], + typing.Literal["sd_controlnet_openpose"], + typing.Literal["sd_controlnet_scribble"], + typing.Literal["sd_controlnet_seg"], + typing.Literal["sd_controlnet_tile"], + typing.Literal["sd_controlnet_brightness"], + typing.Literal["control_v1p_sd15_qrcode_monster_v2"], +] diff --git a/src/gooey/types/remix_image_request_selected_controlnet_model_item.py b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py new file mode 100644 index 0000000..b4f3ff0 --- /dev/null +++ b/src/gooey/types/remix_image_request_selected_controlnet_model_item.py @@ -0,0 +1,20 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RemixImageRequestSelectedControlnetModelItem = typing.Union[ + typing.Literal[ + "sd_controlnet_canny", + "sd_controlnet_depth", + "sd_controlnet_hed", + "sd_controlnet_mlsd", + "sd_controlnet_normal", + "sd_controlnet_openpose", + "sd_controlnet_scribble", + "sd_controlnet_seg", + "sd_controlnet_tile", + "sd_controlnet_brightness", + "control_v1p_sd15_qrcode_monster_v2", + ], + typing.Any, +] diff --git a/src/gooey/types/image_to_image_models.py b/src/gooey/types/remix_image_request_selected_model.py similarity index 89% rename from src/gooey/types/image_to_image_models.py rename to src/gooey/types/remix_image_request_selected_model.py index 70c9201..245d6b0 100644 --- a/src/gooey/types/image_to_image_models.py +++ b/src/gooey/types/remix_image_request_selected_model.py @@ -2,7 +2,7 @@ import typing -ImageToImageModels = typing.Union[ +RemixImageRequestSelectedModel = typing.Union[ typing.Literal[ "dream_shaper", "dreamlike_2", diff --git a/src/gooey/types/remove_background_request_selected_model.py b/src/gooey/types/remove_background_request_selected_model.py new file mode 100644 index 0000000..c84f0e7 --- /dev/null +++ b/src/gooey/types/remove_background_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +RemoveBackgroundRequestSelectedModel = typing.Union[typing.Literal["dis", "u2net"], typing.Any] diff --git a/src/gooey/types/response_format_type.py b/src/gooey/types/response_format_type.py deleted file mode 100644 index f8216e9..0000000 --- a/src/gooey/types/response_format_type.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -ResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/seo_summary_page_request_response_format_type.py b/src/gooey/types/seo_summary_page_request_response_format_type.py new file mode 100644 index 0000000..26f948b --- /dev/null +++ b/src/gooey/types/seo_summary_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SeoSummaryPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/serp_search_locations.py b/src/gooey/types/serp_search_location.py similarity index 98% rename from src/gooey/types/serp_search_locations.py rename to src/gooey/types/serp_search_location.py index 2d5144d..9b64ad9 100644 --- a/src/gooey/types/serp_search_locations.py +++ b/src/gooey/types/serp_search_location.py @@ -2,7 +2,7 @@ import typing -SerpSearchLocations = typing.Union[ +SerpSearchLocation = typing.Union[ typing.Literal[ "af", "al", diff --git a/src/gooey/types/smart_gpt_page_request_response_format_type.py b/src/gooey/types/smart_gpt_page_request_response_format_type.py new file mode 100644 index 0000000..1eaf901 --- /dev/null +++ b/src/gooey/types/smart_gpt_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SmartGptPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/social_lookup_email_page_request_response_format_type.py b/src/gooey/types/social_lookup_email_page_request_response_format_type.py new file mode 100644 index 0000000..46c50db --- /dev/null +++ b/src/gooey/types/social_lookup_email_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SocialLookupEmailPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/speech_recognition_request_output_format.py b/src/gooey/types/speech_recognition_request_output_format.py new file mode 100644 index 0000000..4d2cf2b --- /dev/null +++ b/src/gooey/types/speech_recognition_request_output_format.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpeechRecognitionRequestOutputFormat = typing.Union[typing.Literal["text", "json", "srt", "vtt"], typing.Any] diff --git a/src/gooey/types/speech_recognition_request_selected_model.py b/src/gooey/types/speech_recognition_request_selected_model.py new file mode 100644 index 0000000..9d2d28f --- /dev/null +++ b/src/gooey/types/speech_recognition_request_selected_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpeechRecognitionRequestSelectedModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/speech_recognition_request_translation_model.py b/src/gooey/types/speech_recognition_request_translation_model.py new file mode 100644 index 0000000..886ab92 --- /dev/null +++ b/src/gooey/types/speech_recognition_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SpeechRecognitionRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/synthesize_data_request_response_format_type.py b/src/gooey/types/synthesize_data_request_response_format_type.py new file mode 100644 index 0000000..3ab37a9 --- /dev/null +++ b/src/gooey/types/synthesize_data_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SynthesizeDataRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/synthesize_data_request_selected_asr_model.py b/src/gooey/types/synthesize_data_request_selected_asr_model.py new file mode 100644 index 0000000..6c1bc21 --- /dev/null +++ b/src/gooey/types/synthesize_data_request_selected_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +SynthesizeDataRequestSelectedAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/text2audio_models.py b/src/gooey/types/text2audio_models.py deleted file mode 100644 index b3eb9eb..0000000 --- a/src/gooey/types/text2audio_models.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -Text2AudioModels = typing.Literal["audio_ldm"] diff --git a/src/gooey/types/text_to_speech_page_request_tts_provider.py b/src/gooey/types/text_to_speech_page_request_tts_provider.py new file mode 100644 index 0000000..a6b8938 --- /dev/null +++ b/src/gooey/types/text_to_speech_page_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TextToSpeechPageRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +] diff --git a/src/gooey/types/translate_request_selected_model.py b/src/gooey/types/translate_request_selected_model.py new file mode 100644 index 0000000..b774b56 --- /dev/null +++ b/src/gooey/types/translate_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TranslateRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/translation_models.py b/src/gooey/types/translation_models.py deleted file mode 100644 index 136ecb8..0000000 --- a/src/gooey/types/translation_models.py +++ /dev/null @@ -1,5 +0,0 @@ -# This file was auto-generated by Fern from our API Definition. - -import typing - -TranslationModels = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/translation_page_request.py b/src/gooey/types/translation_page_request.py index 6845f7f..9c033a6 100644 --- a/src/gooey/types/translation_page_request.py +++ b/src/gooey/types/translation_page_request.py @@ -4,7 +4,7 @@ import typing from .recipe_function import RecipeFunction import pydantic -from .translation_models import TranslationModels +from .translation_page_request_selected_model import TranslationPageRequestSelectedModel from .run_settings import RunSettings from ..core.pydantic_utilities import IS_PYDANTIC_V2 @@ -17,7 +17,7 @@ class TranslationPageRequest(UniversalBaseModel): """ texts: typing.Optional[typing.List[str]] = None - selected_model: typing.Optional[TranslationModels] = None + selected_model: typing.Optional[TranslationPageRequestSelectedModel] = None translation_source: typing.Optional[str] = None translation_target: typing.Optional[str] = None glossary_document: typing.Optional[str] = None diff --git a/src/gooey/types/translation_page_request_selected_model.py b/src/gooey/types/translation_page_request_selected_model.py new file mode 100644 index 0000000..62ae9ab --- /dev/null +++ b/src/gooey/types/translation_page_request_selected_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +TranslationPageRequestSelectedModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/upscaler_models.py b/src/gooey/types/upscale_request_selected_models_item.py similarity index 78% rename from src/gooey/types/upscaler_models.py rename to src/gooey/types/upscale_request_selected_models_item.py index 314c03a..1a8362e 100644 --- a/src/gooey/types/upscaler_models.py +++ b/src/gooey/types/upscale_request_selected_models_item.py @@ -2,6 +2,6 @@ import typing -UpscalerModels = typing.Union[ +UpscaleRequestSelectedModelsItem = typing.Union[ typing.Literal["gfpgan_1_4", "real_esrgan_x2", "sd_x4", "real_esrgan", "gfpgan"], typing.Any ] diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py index d1f4e31..6fb8b5e 100644 --- a/src/gooey/types/video_bots_page_request.py +++ b/src/gooey/types/video_bots_page_request.py @@ -6,14 +6,14 @@ import pydantic from .conversation_entry import ConversationEntry from .large_language_models import LargeLanguageModels -from .embedding_models import EmbeddingModels -from .citation_styles import CitationStyles -from .asr_models import AsrModels -from .translation_models import TranslationModels -from .lipsync_models import LipsyncModels +from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel +from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle +from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel +from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel +from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel from .llm_tools import LlmTools -from .response_format_type import ResponseFormatType -from .text_to_speech_providers import TextToSpeechProviders +from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType +from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel from .video_bots_page_request_sadtalker_settings import VideoBotsPageRequestSadtalkerSettings @@ -52,16 +52,16 @@ class VideoBotsPageRequest(UniversalBaseModel): max_references: typing.Optional[int] = None max_context_words: typing.Optional[int] = None scroll_jump: typing.Optional[int] = None - embedding_model: typing.Optional[EmbeddingModels] = None + embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None dense_weight: typing.Optional[float] = pydantic.Field(default=None) """ Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight. Generally speaking, dense embeddings excel at understanding the context of the query, whereas sparse vectors excel at keyword matches. """ - citation_style: typing.Optional[CitationStyles] = None + citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None use_url_shortener: typing.Optional[bool] = None - asr_model: typing.Optional[AsrModels] = pydantic.Field(default=None) + asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic.Field(default=None) """ Choose a model to transcribe incoming audio messages to text. """ @@ -71,7 +71,7 @@ class VideoBotsPageRequest(UniversalBaseModel): Choose a language to transcribe incoming audio messages to text. """ - translation_model: typing.Optional[TranslationModels] = None + translation_model: typing.Optional[VideoBotsPageRequestTranslationModel] = None user_language: typing.Optional[str] = pydantic.Field(default=None) """ Choose a language to translate incoming text & audio messages to English and responses back to your selected language. Useful for low-resource languages. @@ -79,7 +79,7 @@ class VideoBotsPageRequest(UniversalBaseModel): input_glossary_document: typing.Optional[str] = None output_glossary_document: typing.Optional[str] = None - lipsync_model: typing.Optional[LipsyncModels] = None + lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None) """ Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling). @@ -90,8 +90,8 @@ class VideoBotsPageRequest(UniversalBaseModel): quality: typing.Optional[float] = None max_tokens: typing.Optional[int] = None sampling_temperature: typing.Optional[float] = None - response_format_type: typing.Optional[ResponseFormatType] = None - tts_provider: typing.Optional[TextToSpeechProviders] = None + response_format_type: typing.Optional[VideoBotsPageRequestResponseFormatType] = None + tts_provider: typing.Optional[VideoBotsPageRequestTtsProvider] = None uberduck_voice_name: typing.Optional[str] = None uberduck_speaking_rate: typing.Optional[float] = None google_voice_name: typing.Optional[str] = None diff --git a/src/gooey/types/video_bots_page_request_asr_model.py b/src/gooey/types/video_bots_page_request_asr_model.py new file mode 100644 index 0000000..7db13bc --- /dev/null +++ b/src/gooey/types/video_bots_page_request_asr_model.py @@ -0,0 +1,23 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestAsrModel = typing.Union[ + typing.Literal[ + "whisper_large_v2", + "whisper_large_v3", + "whisper_hindi_large_v2", + "whisper_telugu_large_v2", + "nemo_english", + "nemo_hindi", + "vakyansh_bhojpuri", + "gcp_v1", + "usm", + "deepgram", + "azure", + "seamless_m4t_v2", + "mms_1b_all", + "seamless_m4t", + ], + typing.Any, +] diff --git a/src/gooey/types/video_bots_page_request_citation_style.py b/src/gooey/types/video_bots_page_request_citation_style.py new file mode 100644 index 0000000..dc3630b --- /dev/null +++ b/src/gooey/types/video_bots_page_request_citation_style.py @@ -0,0 +1,25 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestCitationStyle = typing.Union[ + typing.Literal[ + "number", + "title", + "url", + "symbol", + "markdown", + "html", + "slack_mrkdwn", + "plaintext", + "number_markdown", + "number_html", + "number_slack_mrkdwn", + "number_plaintext", + "symbol_markdown", + "symbol_html", + "symbol_slack_mrkdwn", + "symbol_plaintext", + ], + typing.Any, +] diff --git a/src/gooey/types/video_bots_page_request_embedding_model.py b/src/gooey/types/video_bots_page_request_embedding_model.py new file mode 100644 index 0000000..19c8972 --- /dev/null +++ b/src/gooey/types/video_bots_page_request_embedding_model.py @@ -0,0 +1,18 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestEmbeddingModel = typing.Union[ + typing.Literal[ + "openai_3_large", + "openai_3_small", + "openai_ada_2", + "e5_large_v2", + "e5_base_v2", + "multilingual_e5_base", + "multilingual_e5_large", + "gte_large", + "gte_base", + ], + typing.Any, +] diff --git a/src/gooey/types/video_bots_page_request_lipsync_model.py b/src/gooey/types/video_bots_page_request_lipsync_model.py new file mode 100644 index 0000000..3bb98e0 --- /dev/null +++ b/src/gooey/types/video_bots_page_request_lipsync_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request_response_format_type.py b/src/gooey/types/video_bots_page_request_response_format_type.py new file mode 100644 index 0000000..25cc8f1 --- /dev/null +++ b/src/gooey/types/video_bots_page_request_response_format_type.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestResponseFormatType = typing.Union[typing.Literal["text", "json_object"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request_translation_model.py b/src/gooey/types/video_bots_page_request_translation_model.py new file mode 100644 index 0000000..0373c0c --- /dev/null +++ b/src/gooey/types/video_bots_page_request_translation_model.py @@ -0,0 +1,5 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestTranslationModel = typing.Union[typing.Literal["google", "ghana_nlp"], typing.Any] diff --git a/src/gooey/types/video_bots_page_request_tts_provider.py b/src/gooey/types/video_bots_page_request_tts_provider.py new file mode 100644 index 0000000..3fc8d0a --- /dev/null +++ b/src/gooey/types/video_bots_page_request_tts_provider.py @@ -0,0 +1,7 @@ +# This file was auto-generated by Fern from our API Definition. + +import typing + +VideoBotsPageRequestTtsProvider = typing.Union[ + typing.Literal["GOOGLE_TTS", "ELEVEN_LABS", "UBERDUCK", "BARK", "AZURE_TTS", "OPEN_AI"], typing.Any +]