diff --git a/pyproject.toml b/pyproject.toml
index 470f6b4..c75a245 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[tool.poetry]
name = "gooeyai"
-version = "0.0.1-beta23"
+version = "0.0.1-beta24"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index a067ff0..0266830 100644
--- a/reference.md
+++ b/reference.md
@@ -80,7 +80,7 @@ client.animate(
-
-**selected_model:** `typing.Optional[DeforumSdPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[AnimationModels]`
@@ -605,7 +605,7 @@ client.seo_people_also_ask(
-
-**embedding_model:** `typing.Optional[RelatedQnAPageRequestEmbeddingModel]`
+**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -674,7 +674,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -901,7 +901,7 @@ client.seo_content(
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -1086,7 +1086,7 @@ client.web_search_llm(
-
-**embedding_model:** `typing.Optional[GoogleGptPageRequestEmbeddingModel]`
+**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -1155,7 +1155,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -1753,7 +1753,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_asr_model:** `typing.Optional[SynthesizeDataRequestSelectedAsrModel]`
+**selected_asr_model:** `typing.Optional[AsrModels]`
@@ -2121,7 +2121,7 @@ client.rag(
-
-**embedding_model:** `typing.Optional[DocSearchPageRequestEmbeddingModel]`
+**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -2510,7 +2510,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**selected_asr_model:** `typing.Optional[DocSummaryRequestSelectedAsrModel]`
+**selected_asr_model:** `typing.Optional[AsrModels]`
@@ -2780,7 +2780,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[LipsyncRequestSelectedModel]`
+**selected_model:** `typing.Optional[LipsyncModels]`
@@ -3080,7 +3080,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[LipsyncTtsRequestSelectedModel]`
+**selected_model:** `typing.Optional[LipsyncModels]`
@@ -3408,7 +3408,7 @@ typing.List[core.File]` — See core.File for more documentation
-
-**selected_model:** `typing.Optional[SpeechRecognitionRequestSelectedModel]`
+**selected_model:** `typing.Optional[AsrModels]`
@@ -4896,7 +4896,7 @@ client.image_from_web_search(
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -5340,7 +5340,7 @@ client.embed(
-
-**selected_model:** `typing.Optional[EmbeddingsPageRequestSelectedModel]`
+**selected_model:** `typing.Optional[EmbeddingModels]`
@@ -5484,7 +5484,7 @@ client.seo_people_also_ask_doc(
-
-**embedding_model:** `typing.Optional[RelatedQnADocPageRequestEmbeddingModel]`
+**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -5585,7 +5585,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**serp_search_location:** `typing.Optional[SerpSearchLocation]`
+**serp_search_location:** `typing.Optional[SerpSearchLocations]`
@@ -5876,7 +5876,7 @@ typing.Optional[typing.List[core.File]]` — See core.File for more documentatio
-
-**embedding_model:** `typing.Optional[CopilotCompletionRequestEmbeddingModel]`
+**embedding_model:** `typing.Optional[EmbeddingModels]`
@@ -5913,7 +5913,7 @@ Generally speaking, dense embeddings excel at understanding the context of the q
-
-**asr_model:** `typing.Optional[CopilotCompletionRequestAsrModel]` — Choose a model to transcribe incoming audio messages to text.
+**asr_model:** `typing.Optional[AsrModels]` — Choose a model to transcribe incoming audio messages to text.
@@ -5965,7 +5965,7 @@ typing.Optional[core.File]` — See core.File for more documentation
-
-**lipsync_model:** `typing.Optional[CopilotCompletionRequestLipsyncModel]`
+**lipsync_model:** `typing.Optional[LipsyncModels]`
diff --git a/src/gooey/__init__.py b/src/gooey/__init__.py
index 9139402..5b2ed59 100644
--- a/src/gooey/__init__.py
+++ b/src/gooey/__init__.py
@@ -5,14 +5,15 @@
AggFunctionFunction,
AggFunctionResult,
AggFunctionResultFunction,
+ AnimationModels,
AnimationPrompt,
AsrChunk,
+ AsrModels,
AsrOutputJson,
AsrPageOutput,
AsrPageOutputOutputTextItem,
AsrPageRequest,
AsrPageRequestOutputFormat,
- AsrPageRequestSelectedModel,
AsrPageRequestTranslationModel,
AsrPageStatusResponse,
AsyncApiResponseModelV3,
@@ -54,10 +55,7 @@
ConversationEntryRole,
ConversationStart,
CreateStreamRequest,
- CreateStreamRequestAsrModel,
CreateStreamRequestCitationStyle,
- CreateStreamRequestEmbeddingModel,
- CreateStreamRequestLipsyncModel,
CreateStreamRequestOpenaiTtsModel,
CreateStreamRequestOpenaiVoiceName,
CreateStreamRequestResponseFormatType,
@@ -65,31 +63,26 @@
CreateStreamRequestTtsProvider,
CreateStreamResponse,
DeforumSdPageOutput,
- DeforumSdPageRequestSelectedModel,
DeforumSdPageStatusResponse,
DocExtractPageOutput,
DocExtractPageRequest,
DocExtractPageRequestResponseFormatType,
- DocExtractPageRequestSelectedAsrModel,
DocExtractPageStatusResponse,
DocSearchPageOutput,
DocSearchPageRequestCitationStyle,
- DocSearchPageRequestEmbeddingModel,
DocSearchPageRequestKeywordQuery,
DocSearchPageRequestResponseFormatType,
DocSearchPageStatusResponse,
DocSummaryPageOutput,
DocSummaryPageRequest,
DocSummaryPageRequestResponseFormatType,
- DocSummaryPageRequestSelectedAsrModel,
DocSummaryPageStatusResponse,
DocSummaryRequestResponseFormatType,
- DocSummaryRequestSelectedAsrModel,
EmailFaceInpaintingPageOutput,
EmailFaceInpaintingPageRequestSelectedModel,
EmailFaceInpaintingPageStatusResponse,
+ EmbeddingModels,
EmbeddingsPageOutput,
- EmbeddingsPageRequestSelectedModel,
EmbeddingsPageStatusResponse,
EvalPrompt,
FaceInpaintingPageOutput,
@@ -102,7 +95,6 @@
GenericErrorResponse,
GenericErrorResponseDetail,
GoogleGptPageOutput,
- GoogleGptPageRequestEmbeddingModel,
GoogleGptPageRequestResponseFormatType,
GoogleGptPageStatusResponse,
GoogleImageGenPageOutput,
@@ -125,21 +117,18 @@
LetterWriterPageOutput,
LetterWriterPageRequest,
LetterWriterPageStatusResponse,
+ LipsyncModels,
LipsyncPageOutput,
LipsyncPageRequest,
- LipsyncPageRequestSelectedModel,
LipsyncPageStatusResponse,
- LipsyncRequestSelectedModel,
LipsyncTtsPageOutput,
LipsyncTtsPageRequest,
LipsyncTtsPageRequestOpenaiTtsModel,
LipsyncTtsPageRequestOpenaiVoiceName,
- LipsyncTtsPageRequestSelectedModel,
LipsyncTtsPageRequestTtsProvider,
LipsyncTtsPageStatusResponse,
LipsyncTtsRequestOpenaiTtsModel,
LipsyncTtsRequestOpenaiVoiceName,
- LipsyncTtsRequestSelectedModel,
LipsyncTtsRequestTtsProvider,
LlmTools,
MessagePart,
@@ -169,12 +158,10 @@
RelatedGoogleGptResponse,
RelatedQnADocPageOutput,
RelatedQnADocPageRequestCitationStyle,
- RelatedQnADocPageRequestEmbeddingModel,
RelatedQnADocPageRequestKeywordQuery,
RelatedQnADocPageRequestResponseFormatType,
RelatedQnADocPageStatusResponse,
RelatedQnAPageOutput,
- RelatedQnAPageRequestEmbeddingModel,
RelatedQnAPageRequestResponseFormatType,
RelatedQnAPageStatusResponse,
RemixImageRequestSelectedControlnetModel,
@@ -191,11 +178,10 @@
SadTalkerSettings,
SadTalkerSettingsPreprocess,
SearchReference,
- SecuritySchemes,
SeoSummaryPageOutput,
SeoSummaryPageRequestResponseFormatType,
SeoSummaryPageStatusResponse,
- SerpSearchLocation,
+ SerpSearchLocations,
SerpSearchType,
SmartGptPageOutput,
SmartGptPageRequestResponseFormatType,
@@ -204,11 +190,9 @@
SocialLookupEmailPageRequestResponseFormatType,
SocialLookupEmailPageStatusResponse,
SpeechRecognitionRequestOutputFormat,
- SpeechRecognitionRequestSelectedModel,
SpeechRecognitionRequestTranslationModel,
StreamError,
SynthesizeDataRequestResponseFormatType,
- SynthesizeDataRequestSelectedAsrModel,
Text2AudioPageOutput,
Text2AudioPageStatusResponse,
TextToSpeechPageOutput,
@@ -230,12 +214,9 @@
VideoBotsPageOutputFinalKeywordQuery,
VideoBotsPageOutputFinalPrompt,
VideoBotsPageRequest,
- VideoBotsPageRequestAsrModel,
VideoBotsPageRequestCitationStyle,
- VideoBotsPageRequestEmbeddingModel,
VideoBotsPageRequestFunctionsItem,
VideoBotsPageRequestFunctionsItemTrigger,
- VideoBotsPageRequestLipsyncModel,
VideoBotsPageRequestOpenaiTtsModel,
VideoBotsPageRequestOpenaiVoiceName,
VideoBotsPageRequestResponseFormatType,
@@ -249,12 +230,9 @@
from . import copilot
from .client import AsyncGooey, Gooey
from .copilot import (
- CopilotCompletionRequestAsrModel,
CopilotCompletionRequestCitationStyle,
- CopilotCompletionRequestEmbeddingModel,
CopilotCompletionRequestFunctionsItem,
CopilotCompletionRequestFunctionsItemTrigger,
- CopilotCompletionRequestLipsyncModel,
CopilotCompletionRequestOpenaiTtsModel,
CopilotCompletionRequestOpenaiVoiceName,
CopilotCompletionRequestResponseFormatType,
@@ -271,14 +249,15 @@
"AggFunctionFunction",
"AggFunctionResult",
"AggFunctionResultFunction",
+ "AnimationModels",
"AnimationPrompt",
"AsrChunk",
+ "AsrModels",
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
"AsrPageRequest",
"AsrPageRequestOutputFormat",
- "AsrPageRequestSelectedModel",
"AsrPageRequestTranslationModel",
"AsrPageStatusResponse",
"AsyncApiResponseModelV3",
@@ -320,12 +299,9 @@
"ConversationEntryContentItem_Text",
"ConversationEntryRole",
"ConversationStart",
- "CopilotCompletionRequestAsrModel",
"CopilotCompletionRequestCitationStyle",
- "CopilotCompletionRequestEmbeddingModel",
"CopilotCompletionRequestFunctionsItem",
"CopilotCompletionRequestFunctionsItemTrigger",
- "CopilotCompletionRequestLipsyncModel",
"CopilotCompletionRequestOpenaiTtsModel",
"CopilotCompletionRequestOpenaiVoiceName",
"CopilotCompletionRequestResponseFormatType",
@@ -334,10 +310,7 @@
"CopilotCompletionRequestTranslationModel",
"CopilotCompletionRequestTtsProvider",
"CreateStreamRequest",
- "CreateStreamRequestAsrModel",
"CreateStreamRequestCitationStyle",
- "CreateStreamRequestEmbeddingModel",
- "CreateStreamRequestLipsyncModel",
"CreateStreamRequestOpenaiTtsModel",
"CreateStreamRequestOpenaiVoiceName",
"CreateStreamRequestResponseFormatType",
@@ -345,31 +318,26 @@
"CreateStreamRequestTtsProvider",
"CreateStreamResponse",
"DeforumSdPageOutput",
- "DeforumSdPageRequestSelectedModel",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
"DocExtractPageRequest",
"DocExtractPageRequestResponseFormatType",
- "DocExtractPageRequestSelectedAsrModel",
"DocExtractPageStatusResponse",
"DocSearchPageOutput",
"DocSearchPageRequestCitationStyle",
- "DocSearchPageRequestEmbeddingModel",
"DocSearchPageRequestKeywordQuery",
"DocSearchPageRequestResponseFormatType",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
"DocSummaryPageRequest",
"DocSummaryPageRequestResponseFormatType",
- "DocSummaryPageRequestSelectedAsrModel",
"DocSummaryPageStatusResponse",
"DocSummaryRequestResponseFormatType",
- "DocSummaryRequestSelectedAsrModel",
"EmailFaceInpaintingPageOutput",
"EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageStatusResponse",
+ "EmbeddingModels",
"EmbeddingsPageOutput",
- "EmbeddingsPageRequestSelectedModel",
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
@@ -384,7 +352,6 @@
"Gooey",
"GooeyEnvironment",
"GoogleGptPageOutput",
- "GoogleGptPageRequestEmbeddingModel",
"GoogleGptPageRequestResponseFormatType",
"GoogleGptPageStatusResponse",
"GoogleImageGenPageOutput",
@@ -407,21 +374,18 @@
"LetterWriterPageOutput",
"LetterWriterPageRequest",
"LetterWriterPageStatusResponse",
+ "LipsyncModels",
"LipsyncPageOutput",
"LipsyncPageRequest",
- "LipsyncPageRequestSelectedModel",
"LipsyncPageStatusResponse",
- "LipsyncRequestSelectedModel",
"LipsyncTtsPageOutput",
"LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
- "LipsyncTtsPageRequestSelectedModel",
"LipsyncTtsPageRequestTtsProvider",
"LipsyncTtsPageStatusResponse",
"LipsyncTtsRequestOpenaiTtsModel",
"LipsyncTtsRequestOpenaiVoiceName",
- "LipsyncTtsRequestSelectedModel",
"LipsyncTtsRequestTtsProvider",
"LlmTools",
"MessagePart",
@@ -452,12 +416,10 @@
"RelatedGoogleGptResponse",
"RelatedQnADocPageOutput",
"RelatedQnADocPageRequestCitationStyle",
- "RelatedQnADocPageRequestEmbeddingModel",
"RelatedQnADocPageRequestKeywordQuery",
"RelatedQnADocPageRequestResponseFormatType",
"RelatedQnADocPageStatusResponse",
"RelatedQnAPageOutput",
- "RelatedQnAPageRequestEmbeddingModel",
"RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageStatusResponse",
"RemixImageRequestSelectedControlnetModel",
@@ -474,11 +436,10 @@
"SadTalkerSettings",
"SadTalkerSettingsPreprocess",
"SearchReference",
- "SecuritySchemes",
"SeoSummaryPageOutput",
"SeoSummaryPageRequestResponseFormatType",
"SeoSummaryPageStatusResponse",
- "SerpSearchLocation",
+ "SerpSearchLocations",
"SerpSearchType",
"SmartGptPageOutput",
"SmartGptPageRequestResponseFormatType",
@@ -487,11 +448,9 @@
"SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageStatusResponse",
"SpeechRecognitionRequestOutputFormat",
- "SpeechRecognitionRequestSelectedModel",
"SpeechRecognitionRequestTranslationModel",
"StreamError",
"SynthesizeDataRequestResponseFormatType",
- "SynthesizeDataRequestSelectedAsrModel",
"Text2AudioPageOutput",
"Text2AudioPageStatusResponse",
"TextToSpeechPageOutput",
@@ -515,12 +474,9 @@
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
"VideoBotsPageRequest",
- "VideoBotsPageRequestAsrModel",
"VideoBotsPageRequestCitationStyle",
- "VideoBotsPageRequestEmbeddingModel",
"VideoBotsPageRequestFunctionsItem",
"VideoBotsPageRequestFunctionsItemTrigger",
- "VideoBotsPageRequestLipsyncModel",
"VideoBotsPageRequestOpenaiTtsModel",
"VideoBotsPageRequestOpenaiVoiceName",
"VideoBotsPageRequestResponseFormatType",
diff --git a/src/gooey/client.py b/src/gooey/client.py
index 6767f27..fc6fa26 100644
--- a/src/gooey/client.py
+++ b/src/gooey/client.py
@@ -9,7 +9,7 @@
from .copilot.client import CopilotClient
from .types.animation_prompt import AnimationPrompt
from .types.recipe_function import RecipeFunction
-from .types.deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
+from .types.animation_models import AnimationModels
from .types.run_settings import RunSettings
from .core.request_options import RequestOptions
from .types.deforum_sd_page_output import DeforumSdPageOutput
@@ -28,14 +28,13 @@
from .types.qr_code_request_scheduler import QrCodeRequestScheduler
from .types.qr_code_generator_page_output import QrCodeGeneratorPageOutput
from .types.large_language_models import LargeLanguageModels
-from .types.related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
+from .types.embedding_models import EmbeddingModels
from .types.related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
-from .types.serp_search_location import SerpSearchLocation
+from .types.serp_search_locations import SerpSearchLocations
from .types.serp_search_type import SerpSearchType
from .types.related_qn_a_page_output import RelatedQnAPageOutput
from .types.seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType
from .types.seo_summary_page_output import SeoSummaryPageOutput
-from .types.google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
from .types.google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType
from .types.google_gpt_page_output import GoogleGptPageOutput
from .types.social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
@@ -45,35 +44,31 @@
from .types.agg_function import AggFunction
from .types.bulk_eval_page_request_response_format_type import BulkEvalPageRequestResponseFormatType
from .types.bulk_eval_page_output import BulkEvalPageOutput
-from .types.synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel
+from .types.asr_models import AsrModels
from .types.synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
from .types.doc_extract_page_output import DocExtractPageOutput
from .types.compare_llm_page_request_response_format_type import CompareLlmPageRequestResponseFormatType
from .types.compare_llm_page_output import CompareLlmPageOutput
from .types.doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
-from .types.doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
from .types.doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
from .types.doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
from .types.doc_search_page_output import DocSearchPageOutput
from .types.smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
from .types.smart_gpt_page_output import SmartGptPageOutput
-from .types.doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel
from .types.doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
from .types.doc_summary_page_output import DocSummaryPageOutput
from .types.functions_page_output import FunctionsPageOutput
from .types.sad_talker_settings import SadTalkerSettings
-from .types.lipsync_request_selected_model import LipsyncRequestSelectedModel
+from .types.lipsync_models import LipsyncModels
from .types.lipsync_page_output import LipsyncPageOutput
from .types.lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
from .types.lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
from .types.lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
-from .types.lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel
from .types.lipsync_tts_page_output import LipsyncTtsPageOutput
from .types.text_to_speech_page_request_tts_provider import TextToSpeechPageRequestTtsProvider
from .types.text_to_speech_page_request_openai_voice_name import TextToSpeechPageRequestOpenaiVoiceName
from .types.text_to_speech_page_request_openai_tts_model import TextToSpeechPageRequestOpenaiTtsModel
from .types.text_to_speech_page_output import TextToSpeechPageOutput
-from .types.speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel
from .types.speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
from .types.speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
from .types.asr_page_output import AsrPageOutput
@@ -98,10 +93,8 @@
from .types.image_segmentation_page_output import ImageSegmentationPageOutput
from .types.upscale_request_selected_models_item import UpscaleRequestSelectedModelsItem
from .types.compare_upscaler_page_output import CompareUpscalerPageOutput
-from .types.embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
from .types.embeddings_page_output import EmbeddingsPageOutput
from .types.related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
-from .types.related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
from .types.related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
from .types.related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType
from .types.related_qn_a_doc_page_output import RelatedQnADocPageOutput
@@ -183,7 +176,7 @@ def animate(
functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
max_frames: typing.Optional[int] = OMIT,
- selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT,
+ selected_model: typing.Optional[AnimationModels] = OMIT,
animation_mode: typing.Optional[str] = OMIT,
zoom: typing.Optional[str] = OMIT,
translation_x: typing.Optional[str] = OMIT,
@@ -210,7 +203,7 @@ def animate(
max_frames : typing.Optional[int]
- selected_model : typing.Optional[DeforumSdPageRequestSelectedModel]
+ selected_model : typing.Optional[AnimationModels]
animation_mode : typing.Optional[str]
@@ -549,7 +542,7 @@ def seo_people_also_ask(
max_references: typing.Optional[int] = OMIT,
max_context_words: typing.Optional[int] = OMIT,
scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT,
+ embedding_model: typing.Optional[EmbeddingModels] = OMIT,
dense_weight: typing.Optional[float] = OMIT,
avoid_repetition: typing.Optional[bool] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
@@ -557,7 +550,7 @@ def seo_people_also_ask(
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
scaleserp_search_field: typing.Optional[str] = OMIT,
@@ -592,7 +585,7 @@ def seo_people_also_ask(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -612,7 +605,7 @@ def seo_people_also_ask(
response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType]
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
@@ -743,7 +736,7 @@ def seo_content(
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
scaleserp_search_field: typing.Optional[str] = OMIT,
@@ -787,7 +780,7 @@ def seo_content(
response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType]
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
@@ -912,7 +905,7 @@ def web_search_llm(
max_references: typing.Optional[int] = OMIT,
max_context_words: typing.Optional[int] = OMIT,
scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT,
+ embedding_model: typing.Optional[EmbeddingModels] = OMIT,
dense_weight: typing.Optional[float] = OMIT,
avoid_repetition: typing.Optional[bool] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
@@ -920,7 +913,7 @@ def web_search_llm(
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
scaleserp_search_field: typing.Optional[str] = OMIT,
@@ -955,7 +948,7 @@ def web_search_llm(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -975,7 +968,7 @@ def web_search_llm(
response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType]
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
@@ -1514,7 +1507,7 @@ def synthesize_data(
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
sheet_url: typing.Optional[core.File] = None,
- selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None,
+ selected_asr_model: typing.Optional[AsrModels] = None,
google_translate_target: typing.Optional[str] = None,
glossary_document: typing.Optional[core.File] = None,
task_instructions: typing.Optional[str] = None,
@@ -1544,7 +1537,7 @@ def synthesize_data(
sheet_url : typing.Optional[core.File]
See core.File for more documentation
- selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[AsrModels]
google_translate_target : typing.Optional[str]
@@ -1800,7 +1793,7 @@ def rag(
max_context_words: typing.Optional[int] = OMIT,
scroll_jump: typing.Optional[int] = OMIT,
doc_extract_url: typing.Optional[str] = OMIT,
- embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT,
+ embedding_model: typing.Optional[EmbeddingModels] = OMIT,
dense_weight: typing.Optional[float] = OMIT,
task_instructions: typing.Optional[str] = OMIT,
query_instructions: typing.Optional[str] = OMIT,
@@ -1839,7 +1832,7 @@ def rag(
doc_extract_url : typing.Optional[str]
- embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -2118,7 +2111,7 @@ def doc_summary(
merge_instructions: typing.Optional[str] = None,
selected_model: typing.Optional[LargeLanguageModels] = None,
chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
- selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None,
+ selected_asr_model: typing.Optional[AsrModels] = None,
google_translate_target: typing.Optional[str] = None,
avoid_repetition: typing.Optional[bool] = None,
num_outputs: typing.Optional[int] = None,
@@ -2150,7 +2143,7 @@ def doc_summary(
chain_type : typing.Optional[typing.Literal["map_reduce"]]
- selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[AsrModels]
google_translate_target : typing.Optional[str]
@@ -2367,7 +2360,7 @@ def lipsync(
face_padding_left: typing.Optional[int] = None,
face_padding_right: typing.Optional[int] = None,
sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
- selected_model: typing.Optional[LipsyncRequestSelectedModel] = None,
+ selected_model: typing.Optional[LipsyncModels] = None,
input_audio: typing.Optional[core.File] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
@@ -2395,7 +2388,7 @@ def lipsync(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncModels]
input_audio : typing.Optional[core.File]
See core.File for more documentation
@@ -2518,7 +2511,7 @@ def lipsync_tts(
face_padding_left: typing.Optional[int] = None,
face_padding_right: typing.Optional[int] = None,
sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
- selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None,
+ selected_model: typing.Optional[LipsyncModels] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncTtsPageOutput:
@@ -2584,7 +2577,7 @@ def lipsync_tts(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncTtsRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncModels]
settings : typing.Optional[RunSettings]
@@ -2875,7 +2868,7 @@ def speech_recognition(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None,
+ selected_model: typing.Optional[AsrModels] = None,
language: typing.Optional[str] = None,
translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
@@ -2899,7 +2892,7 @@ def speech_recognition(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel]
+ selected_model : typing.Optional[AsrModels]
language : typing.Optional[str]
@@ -4088,7 +4081,7 @@ def image_from_web_search(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
negative_prompt: typing.Optional[str] = OMIT,
@@ -4116,7 +4109,7 @@ def image_from_web_search(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
@@ -4493,7 +4486,7 @@ def embed(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT,
+ selected_model: typing.Optional[EmbeddingModels] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EmbeddingsPageOutput:
@@ -4509,7 +4502,7 @@ def embed(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel]
+ selected_model : typing.Optional[EmbeddingModels]
settings : typing.Optional[RunSettings]
@@ -4605,7 +4598,7 @@ def seo_people_also_ask_doc(
max_context_words: typing.Optional[int] = OMIT,
scroll_jump: typing.Optional[int] = OMIT,
doc_extract_url: typing.Optional[str] = OMIT,
- embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT,
+ embedding_model: typing.Optional[EmbeddingModels] = OMIT,
dense_weight: typing.Optional[float] = OMIT,
task_instructions: typing.Optional[str] = OMIT,
query_instructions: typing.Optional[str] = OMIT,
@@ -4617,7 +4610,7 @@ def seo_people_also_ask_doc(
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
scaleserp_search_field: typing.Optional[str] = OMIT,
@@ -4648,7 +4641,7 @@ def seo_people_also_ask_doc(
doc_extract_url : typing.Optional[str]
- embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -4676,7 +4669,7 @@ def seo_people_also_ask_doc(
response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType]
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
@@ -4899,7 +4892,7 @@ async def animate(
functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
max_frames: typing.Optional[int] = OMIT,
- selected_model: typing.Optional[DeforumSdPageRequestSelectedModel] = OMIT,
+ selected_model: typing.Optional[AnimationModels] = OMIT,
animation_mode: typing.Optional[str] = OMIT,
zoom: typing.Optional[str] = OMIT,
translation_x: typing.Optional[str] = OMIT,
@@ -4926,7 +4919,7 @@ async def animate(
max_frames : typing.Optional[int]
- selected_model : typing.Optional[DeforumSdPageRequestSelectedModel]
+ selected_model : typing.Optional[AnimationModels]
animation_mode : typing.Optional[str]
@@ -5281,7 +5274,7 @@ async def seo_people_also_ask(
max_references: typing.Optional[int] = OMIT,
max_context_words: typing.Optional[int] = OMIT,
scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[RelatedQnAPageRequestEmbeddingModel] = OMIT,
+ embedding_model: typing.Optional[EmbeddingModels] = OMIT,
dense_weight: typing.Optional[float] = OMIT,
avoid_repetition: typing.Optional[bool] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
@@ -5289,7 +5282,7 @@ async def seo_people_also_ask(
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
response_format_type: typing.Optional[RelatedQnAPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
scaleserp_search_field: typing.Optional[str] = OMIT,
@@ -5324,7 +5317,7 @@ async def seo_people_also_ask(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[RelatedQnAPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -5344,7 +5337,7 @@ async def seo_people_also_ask(
response_format_type : typing.Optional[RelatedQnAPageRequestResponseFormatType]
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
@@ -5483,7 +5476,7 @@ async def seo_content(
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
response_format_type: typing.Optional[SeoSummaryPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
scaleserp_search_field: typing.Optional[str] = OMIT,
@@ -5527,7 +5520,7 @@ async def seo_content(
response_format_type : typing.Optional[SeoSummaryPageRequestResponseFormatType]
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
@@ -5660,7 +5653,7 @@ async def web_search_llm(
max_references: typing.Optional[int] = OMIT,
max_context_words: typing.Optional[int] = OMIT,
scroll_jump: typing.Optional[int] = OMIT,
- embedding_model: typing.Optional[GoogleGptPageRequestEmbeddingModel] = OMIT,
+ embedding_model: typing.Optional[EmbeddingModels] = OMIT,
dense_weight: typing.Optional[float] = OMIT,
avoid_repetition: typing.Optional[bool] = OMIT,
num_outputs: typing.Optional[int] = OMIT,
@@ -5668,7 +5661,7 @@ async def web_search_llm(
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
response_format_type: typing.Optional[GoogleGptPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
scaleserp_search_field: typing.Optional[str] = OMIT,
@@ -5703,7 +5696,7 @@ async def web_search_llm(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[GoogleGptPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -5723,7 +5716,7 @@ async def web_search_llm(
response_format_type : typing.Optional[GoogleGptPageRequestResponseFormatType]
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
@@ -6294,7 +6287,7 @@ async def synthesize_data(
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
sheet_url: typing.Optional[core.File] = None,
- selected_asr_model: typing.Optional[SynthesizeDataRequestSelectedAsrModel] = None,
+ selected_asr_model: typing.Optional[AsrModels] = None,
google_translate_target: typing.Optional[str] = None,
glossary_document: typing.Optional[core.File] = None,
task_instructions: typing.Optional[str] = None,
@@ -6324,7 +6317,7 @@ async def synthesize_data(
sheet_url : typing.Optional[core.File]
See core.File for more documentation
- selected_asr_model : typing.Optional[SynthesizeDataRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[AsrModels]
google_translate_target : typing.Optional[str]
@@ -6596,7 +6589,7 @@ async def rag(
max_context_words: typing.Optional[int] = OMIT,
scroll_jump: typing.Optional[int] = OMIT,
doc_extract_url: typing.Optional[str] = OMIT,
- embedding_model: typing.Optional[DocSearchPageRequestEmbeddingModel] = OMIT,
+ embedding_model: typing.Optional[EmbeddingModels] = OMIT,
dense_weight: typing.Optional[float] = OMIT,
task_instructions: typing.Optional[str] = OMIT,
query_instructions: typing.Optional[str] = OMIT,
@@ -6635,7 +6628,7 @@ async def rag(
doc_extract_url : typing.Optional[str]
- embedding_model : typing.Optional[DocSearchPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -6930,7 +6923,7 @@ async def doc_summary(
merge_instructions: typing.Optional[str] = None,
selected_model: typing.Optional[LargeLanguageModels] = None,
chain_type: typing.Optional[typing.Literal["map_reduce"]] = None,
- selected_asr_model: typing.Optional[DocSummaryRequestSelectedAsrModel] = None,
+ selected_asr_model: typing.Optional[AsrModels] = None,
google_translate_target: typing.Optional[str] = None,
avoid_repetition: typing.Optional[bool] = None,
num_outputs: typing.Optional[int] = None,
@@ -6962,7 +6955,7 @@ async def doc_summary(
chain_type : typing.Optional[typing.Literal["map_reduce"]]
- selected_asr_model : typing.Optional[DocSummaryRequestSelectedAsrModel]
+ selected_asr_model : typing.Optional[AsrModels]
google_translate_target : typing.Optional[str]
@@ -7195,7 +7188,7 @@ async def lipsync(
face_padding_left: typing.Optional[int] = None,
face_padding_right: typing.Optional[int] = None,
sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
- selected_model: typing.Optional[LipsyncRequestSelectedModel] = None,
+ selected_model: typing.Optional[LipsyncModels] = None,
input_audio: typing.Optional[core.File] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
@@ -7223,7 +7216,7 @@ async def lipsync(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncModels]
input_audio : typing.Optional[core.File]
See core.File for more documentation
@@ -7354,7 +7347,7 @@ async def lipsync_tts(
face_padding_left: typing.Optional[int] = None,
face_padding_right: typing.Optional[int] = None,
sadtalker_settings: typing.Optional[SadTalkerSettings] = None,
- selected_model: typing.Optional[LipsyncTtsRequestSelectedModel] = None,
+ selected_model: typing.Optional[LipsyncModels] = None,
settings: typing.Optional[RunSettings] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> LipsyncTtsPageOutput:
@@ -7420,7 +7413,7 @@ async def lipsync_tts(
sadtalker_settings : typing.Optional[SadTalkerSettings]
- selected_model : typing.Optional[LipsyncTtsRequestSelectedModel]
+ selected_model : typing.Optional[LipsyncModels]
settings : typing.Optional[RunSettings]
@@ -7727,7 +7720,7 @@ async def speech_recognition(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.List[RecipeFunction]] = None,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = None,
- selected_model: typing.Optional[SpeechRecognitionRequestSelectedModel] = None,
+ selected_model: typing.Optional[AsrModels] = None,
language: typing.Optional[str] = None,
translation_model: typing.Optional[SpeechRecognitionRequestTranslationModel] = None,
output_format: typing.Optional[SpeechRecognitionRequestOutputFormat] = None,
@@ -7751,7 +7744,7 @@ async def speech_recognition(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[SpeechRecognitionRequestSelectedModel]
+ selected_model : typing.Optional[AsrModels]
language : typing.Optional[str]
@@ -9004,7 +8997,7 @@ async def image_from_web_search(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
selected_model: typing.Optional[GoogleImageGenPageRequestSelectedModel] = OMIT,
negative_prompt: typing.Optional[str] = OMIT,
@@ -9032,7 +9025,7 @@ async def image_from_web_search(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
@@ -9433,7 +9426,7 @@ async def embed(
example_id: typing.Optional[str] = None,
functions: typing.Optional[typing.Sequence[RecipeFunction]] = OMIT,
variables: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
- selected_model: typing.Optional[EmbeddingsPageRequestSelectedModel] = OMIT,
+ selected_model: typing.Optional[EmbeddingModels] = OMIT,
settings: typing.Optional[RunSettings] = OMIT,
request_options: typing.Optional[RequestOptions] = None,
) -> EmbeddingsPageOutput:
@@ -9449,7 +9442,7 @@ async def embed(
variables : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
Variables to be used as Jinja prompt templates and in functions as arguments
- selected_model : typing.Optional[EmbeddingsPageRequestSelectedModel]
+ selected_model : typing.Optional[EmbeddingModels]
settings : typing.Optional[RunSettings]
@@ -9553,7 +9546,7 @@ async def seo_people_also_ask_doc(
max_context_words: typing.Optional[int] = OMIT,
scroll_jump: typing.Optional[int] = OMIT,
doc_extract_url: typing.Optional[str] = OMIT,
- embedding_model: typing.Optional[RelatedQnADocPageRequestEmbeddingModel] = OMIT,
+ embedding_model: typing.Optional[EmbeddingModels] = OMIT,
dense_weight: typing.Optional[float] = OMIT,
task_instructions: typing.Optional[str] = OMIT,
query_instructions: typing.Optional[str] = OMIT,
@@ -9565,7 +9558,7 @@ async def seo_people_also_ask_doc(
max_tokens: typing.Optional[int] = OMIT,
sampling_temperature: typing.Optional[float] = OMIT,
response_format_type: typing.Optional[RelatedQnADocPageRequestResponseFormatType] = OMIT,
- serp_search_location: typing.Optional[SerpSearchLocation] = OMIT,
+ serp_search_location: typing.Optional[SerpSearchLocations] = OMIT,
scaleserp_locations: typing.Optional[typing.Sequence[str]] = OMIT,
serp_search_type: typing.Optional[SerpSearchType] = OMIT,
scaleserp_search_field: typing.Optional[str] = OMIT,
@@ -9596,7 +9589,7 @@ async def seo_people_also_ask_doc(
doc_extract_url : typing.Optional[str]
- embedding_model : typing.Optional[RelatedQnADocPageRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -9624,7 +9617,7 @@ async def seo_people_also_ask_doc(
response_format_type : typing.Optional[RelatedQnADocPageRequestResponseFormatType]
- serp_search_location : typing.Optional[SerpSearchLocation]
+ serp_search_location : typing.Optional[SerpSearchLocations]
scaleserp_locations : typing.Optional[typing.Sequence[str]]
DEPRECATED: use `serp_search_location` instead
diff --git a/src/gooey/copilot/__init__.py b/src/gooey/copilot/__init__.py
index 3234b31..d33ab85 100644
--- a/src/gooey/copilot/__init__.py
+++ b/src/gooey/copilot/__init__.py
@@ -1,12 +1,9 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
- CopilotCompletionRequestAsrModel,
CopilotCompletionRequestCitationStyle,
- CopilotCompletionRequestEmbeddingModel,
CopilotCompletionRequestFunctionsItem,
CopilotCompletionRequestFunctionsItemTrigger,
- CopilotCompletionRequestLipsyncModel,
CopilotCompletionRequestOpenaiTtsModel,
CopilotCompletionRequestOpenaiVoiceName,
CopilotCompletionRequestResponseFormatType,
@@ -17,12 +14,9 @@
)
__all__ = [
- "CopilotCompletionRequestAsrModel",
"CopilotCompletionRequestCitationStyle",
- "CopilotCompletionRequestEmbeddingModel",
"CopilotCompletionRequestFunctionsItem",
"CopilotCompletionRequestFunctionsItemTrigger",
- "CopilotCompletionRequestLipsyncModel",
"CopilotCompletionRequestOpenaiTtsModel",
"CopilotCompletionRequestOpenaiVoiceName",
"CopilotCompletionRequestResponseFormatType",
diff --git a/src/gooey/copilot/client.py b/src/gooey/copilot/client.py
index 9dcc465..93b4f19 100644
--- a/src/gooey/copilot/client.py
+++ b/src/gooey/copilot/client.py
@@ -6,11 +6,11 @@
from .. import core
from ..types.conversation_entry import ConversationEntry
from ..types.large_language_models import LargeLanguageModels
-from .types.copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel
+from ..types.embedding_models import EmbeddingModels
from .types.copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle
-from .types.copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel
+from ..types.asr_models import AsrModels
from .types.copilot_completion_request_translation_model import CopilotCompletionRequestTranslationModel
-from .types.copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel
+from ..types.lipsync_models import LipsyncModels
from ..types.llm_tools import LlmTools
from .types.copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType
from .types.copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider
@@ -60,17 +60,17 @@ def completion(
max_references: typing.Optional[int] = None,
max_context_words: typing.Optional[int] = None,
scroll_jump: typing.Optional[int] = None,
- embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None,
+ embedding_model: typing.Optional[EmbeddingModels] = None,
dense_weight: typing.Optional[float] = None,
citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None,
use_url_shortener: typing.Optional[bool] = None,
- asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None,
+ asr_model: typing.Optional[AsrModels] = None,
asr_language: typing.Optional[str] = None,
translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None,
user_language: typing.Optional[str] = None,
input_glossary_document: typing.Optional[core.File] = None,
output_glossary_document: typing.Optional[core.File] = None,
- lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None,
+ lipsync_model: typing.Optional[LipsyncModels] = None,
tools: typing.Optional[typing.List[LlmTools]] = None,
avoid_repetition: typing.Optional[bool] = None,
num_outputs: typing.Optional[int] = None,
@@ -152,7 +152,7 @@ def completion(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -164,7 +164,7 @@ def completion(
use_url_shortener : typing.Optional[bool]
- asr_model : typing.Optional[CopilotCompletionRequestAsrModel]
+ asr_model : typing.Optional[AsrModels]
Choose a model to transcribe incoming audio messages to text.
asr_language : typing.Optional[str]
@@ -181,7 +181,7 @@ def completion(
output_glossary_document : typing.Optional[core.File]
See core.File for more documentation
- lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel]
+ lipsync_model : typing.Optional[LipsyncModels]
tools : typing.Optional[typing.List[LlmTools]]
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
@@ -412,17 +412,17 @@ async def completion(
max_references: typing.Optional[int] = None,
max_context_words: typing.Optional[int] = None,
scroll_jump: typing.Optional[int] = None,
- embedding_model: typing.Optional[CopilotCompletionRequestEmbeddingModel] = None,
+ embedding_model: typing.Optional[EmbeddingModels] = None,
dense_weight: typing.Optional[float] = None,
citation_style: typing.Optional[CopilotCompletionRequestCitationStyle] = None,
use_url_shortener: typing.Optional[bool] = None,
- asr_model: typing.Optional[CopilotCompletionRequestAsrModel] = None,
+ asr_model: typing.Optional[AsrModels] = None,
asr_language: typing.Optional[str] = None,
translation_model: typing.Optional[CopilotCompletionRequestTranslationModel] = None,
user_language: typing.Optional[str] = None,
input_glossary_document: typing.Optional[core.File] = None,
output_glossary_document: typing.Optional[core.File] = None,
- lipsync_model: typing.Optional[CopilotCompletionRequestLipsyncModel] = None,
+ lipsync_model: typing.Optional[LipsyncModels] = None,
tools: typing.Optional[typing.List[LlmTools]] = None,
avoid_repetition: typing.Optional[bool] = None,
num_outputs: typing.Optional[int] = None,
@@ -504,7 +504,7 @@ async def completion(
scroll_jump : typing.Optional[int]
- embedding_model : typing.Optional[CopilotCompletionRequestEmbeddingModel]
+ embedding_model : typing.Optional[EmbeddingModels]
dense_weight : typing.Optional[float]
@@ -516,7 +516,7 @@ async def completion(
use_url_shortener : typing.Optional[bool]
- asr_model : typing.Optional[CopilotCompletionRequestAsrModel]
+ asr_model : typing.Optional[AsrModels]
Choose a model to transcribe incoming audio messages to text.
asr_language : typing.Optional[str]
@@ -533,7 +533,7 @@ async def completion(
output_glossary_document : typing.Optional[core.File]
See core.File for more documentation
- lipsync_model : typing.Optional[CopilotCompletionRequestLipsyncModel]
+ lipsync_model : typing.Optional[LipsyncModels]
tools : typing.Optional[typing.List[LlmTools]]
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
diff --git a/src/gooey/copilot/types/__init__.py b/src/gooey/copilot/types/__init__.py
index 1cdf619..45a2775 100644
--- a/src/gooey/copilot/types/__init__.py
+++ b/src/gooey/copilot/types/__init__.py
@@ -1,11 +1,8 @@
# This file was auto-generated by Fern from our API Definition.
-from .copilot_completion_request_asr_model import CopilotCompletionRequestAsrModel
from .copilot_completion_request_citation_style import CopilotCompletionRequestCitationStyle
-from .copilot_completion_request_embedding_model import CopilotCompletionRequestEmbeddingModel
from .copilot_completion_request_functions_item import CopilotCompletionRequestFunctionsItem
from .copilot_completion_request_functions_item_trigger import CopilotCompletionRequestFunctionsItemTrigger
-from .copilot_completion_request_lipsync_model import CopilotCompletionRequestLipsyncModel
from .copilot_completion_request_openai_tts_model import CopilotCompletionRequestOpenaiTtsModel
from .copilot_completion_request_openai_voice_name import CopilotCompletionRequestOpenaiVoiceName
from .copilot_completion_request_response_format_type import CopilotCompletionRequestResponseFormatType
@@ -17,12 +14,9 @@
from .copilot_completion_request_tts_provider import CopilotCompletionRequestTtsProvider
__all__ = [
- "CopilotCompletionRequestAsrModel",
"CopilotCompletionRequestCitationStyle",
- "CopilotCompletionRequestEmbeddingModel",
"CopilotCompletionRequestFunctionsItem",
"CopilotCompletionRequestFunctionsItemTrigger",
- "CopilotCompletionRequestLipsyncModel",
"CopilotCompletionRequestOpenaiTtsModel",
"CopilotCompletionRequestOpenaiVoiceName",
"CopilotCompletionRequestResponseFormatType",
diff --git a/src/gooey/copilot/types/copilot_completion_request_asr_model.py b/src/gooey/copilot/types/copilot_completion_request_asr_model.py
deleted file mode 100644
index 65ae0f5..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_asr_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestAsrModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/copilot/types/copilot_completion_request_embedding_model.py b/src/gooey/copilot/types/copilot_completion_request_embedding_model.py
deleted file mode 100644
index 4655801..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py b/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py
deleted file mode 100644
index 865bc4b..0000000
--- a/src/gooey/copilot/types/copilot_completion_request_lipsync_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CopilotCompletionRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/core/client_wrapper.py b/src/gooey/core/client_wrapper.py
index 133ff59..6fe0ebd 100644
--- a/src/gooey/core/client_wrapper.py
+++ b/src/gooey/core/client_wrapper.py
@@ -22,7 +22,7 @@ def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "gooeyai",
- "X-Fern-SDK-Version": "0.0.1-beta23",
+ "X-Fern-SDK-Version": "0.0.1-beta24",
}
headers["Authorization"] = f"Bearer {self._get_api_key()}"
return headers
diff --git a/src/gooey/types/__init__.py b/src/gooey/types/__init__.py
index e53e0ae..fab1149 100644
--- a/src/gooey/types/__init__.py
+++ b/src/gooey/types/__init__.py
@@ -4,14 +4,15 @@
from .agg_function_function import AggFunctionFunction
from .agg_function_result import AggFunctionResult
from .agg_function_result_function import AggFunctionResultFunction
+from .animation_models import AnimationModels
from .animation_prompt import AnimationPrompt
from .asr_chunk import AsrChunk
+from .asr_models import AsrModels
from .asr_output_json import AsrOutputJson
from .asr_page_output import AsrPageOutput
from .asr_page_output_output_text_item import AsrPageOutputOutputTextItem
from .asr_page_request import AsrPageRequest
from .asr_page_request_output_format import AsrPageRequestOutputFormat
-from .asr_page_request_selected_model import AsrPageRequestSelectedModel
from .asr_page_request_translation_model import AsrPageRequestTranslationModel
from .asr_page_status_response import AsrPageStatusResponse
from .async_api_response_model_v3 import AsyncApiResponseModelV3
@@ -55,10 +56,7 @@
from .conversation_entry_role import ConversationEntryRole
from .conversation_start import ConversationStart
from .create_stream_request import CreateStreamRequest
-from .create_stream_request_asr_model import CreateStreamRequestAsrModel
from .create_stream_request_citation_style import CreateStreamRequestCitationStyle
-from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel
-from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel
from .create_stream_request_openai_tts_model import CreateStreamRequestOpenaiTtsModel
from .create_stream_request_openai_voice_name import CreateStreamRequestOpenaiVoiceName
from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType
@@ -66,31 +64,26 @@
from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider
from .create_stream_response import CreateStreamResponse
from .deforum_sd_page_output import DeforumSdPageOutput
-from .deforum_sd_page_request_selected_model import DeforumSdPageRequestSelectedModel
from .deforum_sd_page_status_response import DeforumSdPageStatusResponse
from .doc_extract_page_output import DocExtractPageOutput
from .doc_extract_page_request import DocExtractPageRequest
from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
-from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
from .doc_extract_page_status_response import DocExtractPageStatusResponse
from .doc_search_page_output import DocSearchPageOutput
from .doc_search_page_request_citation_style import DocSearchPageRequestCitationStyle
-from .doc_search_page_request_embedding_model import DocSearchPageRequestEmbeddingModel
from .doc_search_page_request_keyword_query import DocSearchPageRequestKeywordQuery
from .doc_search_page_request_response_format_type import DocSearchPageRequestResponseFormatType
from .doc_search_page_status_response import DocSearchPageStatusResponse
from .doc_summary_page_output import DocSummaryPageOutput
from .doc_summary_page_request import DocSummaryPageRequest
from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
-from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
from .doc_summary_page_status_response import DocSummaryPageStatusResponse
from .doc_summary_request_response_format_type import DocSummaryRequestResponseFormatType
-from .doc_summary_request_selected_asr_model import DocSummaryRequestSelectedAsrModel
from .email_face_inpainting_page_output import EmailFaceInpaintingPageOutput
from .email_face_inpainting_page_request_selected_model import EmailFaceInpaintingPageRequestSelectedModel
from .email_face_inpainting_page_status_response import EmailFaceInpaintingPageStatusResponse
+from .embedding_models import EmbeddingModels
from .embeddings_page_output import EmbeddingsPageOutput
-from .embeddings_page_request_selected_model import EmbeddingsPageRequestSelectedModel
from .embeddings_page_status_response import EmbeddingsPageStatusResponse
from .eval_prompt import EvalPrompt
from .face_inpainting_page_output import FaceInpaintingPageOutput
@@ -103,7 +96,6 @@
from .generic_error_response import GenericErrorResponse
from .generic_error_response_detail import GenericErrorResponseDetail
from .google_gpt_page_output import GoogleGptPageOutput
-from .google_gpt_page_request_embedding_model import GoogleGptPageRequestEmbeddingModel
from .google_gpt_page_request_response_format_type import GoogleGptPageRequestResponseFormatType
from .google_gpt_page_status_response import GoogleGptPageStatusResponse
from .google_image_gen_page_output import GoogleImageGenPageOutput
@@ -126,21 +118,18 @@
from .letter_writer_page_output import LetterWriterPageOutput
from .letter_writer_page_request import LetterWriterPageRequest
from .letter_writer_page_status_response import LetterWriterPageStatusResponse
+from .lipsync_models import LipsyncModels
from .lipsync_page_output import LipsyncPageOutput
from .lipsync_page_request import LipsyncPageRequest
-from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
from .lipsync_page_status_response import LipsyncPageStatusResponse
-from .lipsync_request_selected_model import LipsyncRequestSelectedModel
from .lipsync_tts_page_output import LipsyncTtsPageOutput
from .lipsync_tts_page_request import LipsyncTtsPageRequest
from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
-from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
from .lipsync_tts_page_request_tts_provider import LipsyncTtsPageRequestTtsProvider
from .lipsync_tts_page_status_response import LipsyncTtsPageStatusResponse
from .lipsync_tts_request_openai_tts_model import LipsyncTtsRequestOpenaiTtsModel
from .lipsync_tts_request_openai_voice_name import LipsyncTtsRequestOpenaiVoiceName
-from .lipsync_tts_request_selected_model import LipsyncTtsRequestSelectedModel
from .lipsync_tts_request_tts_provider import LipsyncTtsRequestTtsProvider
from .llm_tools import LlmTools
from .message_part import MessagePart
@@ -174,12 +163,10 @@
from .related_google_gpt_response import RelatedGoogleGptResponse
from .related_qn_a_doc_page_output import RelatedQnADocPageOutput
from .related_qn_a_doc_page_request_citation_style import RelatedQnADocPageRequestCitationStyle
-from .related_qn_a_doc_page_request_embedding_model import RelatedQnADocPageRequestEmbeddingModel
from .related_qn_a_doc_page_request_keyword_query import RelatedQnADocPageRequestKeywordQuery
from .related_qn_a_doc_page_request_response_format_type import RelatedQnADocPageRequestResponseFormatType
from .related_qn_a_doc_page_status_response import RelatedQnADocPageStatusResponse
from .related_qn_a_page_output import RelatedQnAPageOutput
-from .related_qn_a_page_request_embedding_model import RelatedQnAPageRequestEmbeddingModel
from .related_qn_a_page_request_response_format_type import RelatedQnAPageRequestResponseFormatType
from .related_qn_a_page_status_response import RelatedQnAPageStatusResponse
from .remix_image_request_selected_controlnet_model import RemixImageRequestSelectedControlnetModel
@@ -196,11 +183,10 @@
from .sad_talker_settings import SadTalkerSettings
from .sad_talker_settings_preprocess import SadTalkerSettingsPreprocess
from .search_reference import SearchReference
-from .security_schemes import SecuritySchemes
from .seo_summary_page_output import SeoSummaryPageOutput
from .seo_summary_page_request_response_format_type import SeoSummaryPageRequestResponseFormatType
from .seo_summary_page_status_response import SeoSummaryPageStatusResponse
-from .serp_search_location import SerpSearchLocation
+from .serp_search_locations import SerpSearchLocations
from .serp_search_type import SerpSearchType
from .smart_gpt_page_output import SmartGptPageOutput
from .smart_gpt_page_request_response_format_type import SmartGptPageRequestResponseFormatType
@@ -209,11 +195,9 @@
from .social_lookup_email_page_request_response_format_type import SocialLookupEmailPageRequestResponseFormatType
from .social_lookup_email_page_status_response import SocialLookupEmailPageStatusResponse
from .speech_recognition_request_output_format import SpeechRecognitionRequestOutputFormat
-from .speech_recognition_request_selected_model import SpeechRecognitionRequestSelectedModel
from .speech_recognition_request_translation_model import SpeechRecognitionRequestTranslationModel
from .stream_error import StreamError
from .synthesize_data_request_response_format_type import SynthesizeDataRequestResponseFormatType
-from .synthesize_data_request_selected_asr_model import SynthesizeDataRequestSelectedAsrModel
from .text2audio_page_output import Text2AudioPageOutput
from .text2audio_page_status_response import Text2AudioPageStatusResponse
from .text_to_speech_page_output import TextToSpeechPageOutput
@@ -235,12 +219,9 @@
from .video_bots_page_output_final_keyword_query import VideoBotsPageOutputFinalKeywordQuery
from .video_bots_page_output_final_prompt import VideoBotsPageOutputFinalPrompt
from .video_bots_page_request import VideoBotsPageRequest
-from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
from .video_bots_page_request_functions_item import VideoBotsPageRequestFunctionsItem
from .video_bots_page_request_functions_item_trigger import VideoBotsPageRequestFunctionsItemTrigger
-from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
from .video_bots_page_request_openai_tts_model import VideoBotsPageRequestOpenaiTtsModel
from .video_bots_page_request_openai_voice_name import VideoBotsPageRequestOpenaiVoiceName
from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
@@ -255,14 +236,15 @@
"AggFunctionFunction",
"AggFunctionResult",
"AggFunctionResultFunction",
+ "AnimationModels",
"AnimationPrompt",
"AsrChunk",
+ "AsrModels",
"AsrOutputJson",
"AsrPageOutput",
"AsrPageOutputOutputTextItem",
"AsrPageRequest",
"AsrPageRequestOutputFormat",
- "AsrPageRequestSelectedModel",
"AsrPageRequestTranslationModel",
"AsrPageStatusResponse",
"AsyncApiResponseModelV3",
@@ -304,10 +286,7 @@
"ConversationEntryRole",
"ConversationStart",
"CreateStreamRequest",
- "CreateStreamRequestAsrModel",
"CreateStreamRequestCitationStyle",
- "CreateStreamRequestEmbeddingModel",
- "CreateStreamRequestLipsyncModel",
"CreateStreamRequestOpenaiTtsModel",
"CreateStreamRequestOpenaiVoiceName",
"CreateStreamRequestResponseFormatType",
@@ -315,31 +294,26 @@
"CreateStreamRequestTtsProvider",
"CreateStreamResponse",
"DeforumSdPageOutput",
- "DeforumSdPageRequestSelectedModel",
"DeforumSdPageStatusResponse",
"DocExtractPageOutput",
"DocExtractPageRequest",
"DocExtractPageRequestResponseFormatType",
- "DocExtractPageRequestSelectedAsrModel",
"DocExtractPageStatusResponse",
"DocSearchPageOutput",
"DocSearchPageRequestCitationStyle",
- "DocSearchPageRequestEmbeddingModel",
"DocSearchPageRequestKeywordQuery",
"DocSearchPageRequestResponseFormatType",
"DocSearchPageStatusResponse",
"DocSummaryPageOutput",
"DocSummaryPageRequest",
"DocSummaryPageRequestResponseFormatType",
- "DocSummaryPageRequestSelectedAsrModel",
"DocSummaryPageStatusResponse",
"DocSummaryRequestResponseFormatType",
- "DocSummaryRequestSelectedAsrModel",
"EmailFaceInpaintingPageOutput",
"EmailFaceInpaintingPageRequestSelectedModel",
"EmailFaceInpaintingPageStatusResponse",
+ "EmbeddingModels",
"EmbeddingsPageOutput",
- "EmbeddingsPageRequestSelectedModel",
"EmbeddingsPageStatusResponse",
"EvalPrompt",
"FaceInpaintingPageOutput",
@@ -352,7 +326,6 @@
"GenericErrorResponse",
"GenericErrorResponseDetail",
"GoogleGptPageOutput",
- "GoogleGptPageRequestEmbeddingModel",
"GoogleGptPageRequestResponseFormatType",
"GoogleGptPageStatusResponse",
"GoogleImageGenPageOutput",
@@ -375,21 +348,18 @@
"LetterWriterPageOutput",
"LetterWriterPageRequest",
"LetterWriterPageStatusResponse",
+ "LipsyncModels",
"LipsyncPageOutput",
"LipsyncPageRequest",
- "LipsyncPageRequestSelectedModel",
"LipsyncPageStatusResponse",
- "LipsyncRequestSelectedModel",
"LipsyncTtsPageOutput",
"LipsyncTtsPageRequest",
"LipsyncTtsPageRequestOpenaiTtsModel",
"LipsyncTtsPageRequestOpenaiVoiceName",
- "LipsyncTtsPageRequestSelectedModel",
"LipsyncTtsPageRequestTtsProvider",
"LipsyncTtsPageStatusResponse",
"LipsyncTtsRequestOpenaiTtsModel",
"LipsyncTtsRequestOpenaiVoiceName",
- "LipsyncTtsRequestSelectedModel",
"LipsyncTtsRequestTtsProvider",
"LlmTools",
"MessagePart",
@@ -419,12 +389,10 @@
"RelatedGoogleGptResponse",
"RelatedQnADocPageOutput",
"RelatedQnADocPageRequestCitationStyle",
- "RelatedQnADocPageRequestEmbeddingModel",
"RelatedQnADocPageRequestKeywordQuery",
"RelatedQnADocPageRequestResponseFormatType",
"RelatedQnADocPageStatusResponse",
"RelatedQnAPageOutput",
- "RelatedQnAPageRequestEmbeddingModel",
"RelatedQnAPageRequestResponseFormatType",
"RelatedQnAPageStatusResponse",
"RemixImageRequestSelectedControlnetModel",
@@ -441,11 +409,10 @@
"SadTalkerSettings",
"SadTalkerSettingsPreprocess",
"SearchReference",
- "SecuritySchemes",
"SeoSummaryPageOutput",
"SeoSummaryPageRequestResponseFormatType",
"SeoSummaryPageStatusResponse",
- "SerpSearchLocation",
+ "SerpSearchLocations",
"SerpSearchType",
"SmartGptPageOutput",
"SmartGptPageRequestResponseFormatType",
@@ -454,11 +421,9 @@
"SocialLookupEmailPageRequestResponseFormatType",
"SocialLookupEmailPageStatusResponse",
"SpeechRecognitionRequestOutputFormat",
- "SpeechRecognitionRequestSelectedModel",
"SpeechRecognitionRequestTranslationModel",
"StreamError",
"SynthesizeDataRequestResponseFormatType",
- "SynthesizeDataRequestSelectedAsrModel",
"Text2AudioPageOutput",
"Text2AudioPageStatusResponse",
"TextToSpeechPageOutput",
@@ -480,12 +445,9 @@
"VideoBotsPageOutputFinalKeywordQuery",
"VideoBotsPageOutputFinalPrompt",
"VideoBotsPageRequest",
- "VideoBotsPageRequestAsrModel",
"VideoBotsPageRequestCitationStyle",
- "VideoBotsPageRequestEmbeddingModel",
"VideoBotsPageRequestFunctionsItem",
"VideoBotsPageRequestFunctionsItemTrigger",
- "VideoBotsPageRequestLipsyncModel",
"VideoBotsPageRequestOpenaiTtsModel",
"VideoBotsPageRequestOpenaiVoiceName",
"VideoBotsPageRequestResponseFormatType",
diff --git a/src/gooey/types/animation_models.py b/src/gooey/types/animation_models.py
new file mode 100644
index 0000000..8ad7a84
--- /dev/null
+++ b/src/gooey/types/animation_models.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+AnimationModels = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any]
diff --git a/src/gooey/types/create_stream_request_asr_model.py b/src/gooey/types/asr_models.py
similarity index 91%
rename from src/gooey/types/create_stream_request_asr_model.py
rename to src/gooey/types/asr_models.py
index af166fa..16c222a 100644
--- a/src/gooey/types/create_stream_request_asr_model.py
+++ b/src/gooey/types/asr_models.py
@@ -2,7 +2,7 @@
import typing
-CreateStreamRequestAsrModel = typing.Union[
+AsrModels = typing.Union[
typing.Literal[
"whisper_large_v2",
"whisper_large_v3",
diff --git a/src/gooey/types/asr_page_request.py b/src/gooey/types/asr_page_request.py
index 1d35181..9e3c8a5 100644
--- a/src/gooey/types/asr_page_request.py
+++ b/src/gooey/types/asr_page_request.py
@@ -4,7 +4,7 @@
import typing
from .recipe_function import RecipeFunction
import pydantic
-from .asr_page_request_selected_model import AsrPageRequestSelectedModel
+from .asr_models import AsrModels
from .asr_page_request_translation_model import AsrPageRequestTranslationModel
from .asr_page_request_output_format import AsrPageRequestOutputFormat
from .run_settings import RunSettings
@@ -19,7 +19,7 @@ class AsrPageRequest(UniversalBaseModel):
"""
documents: typing.List[str]
- selected_model: typing.Optional[AsrPageRequestSelectedModel] = None
+ selected_model: typing.Optional[AsrModels] = None
language: typing.Optional[str] = None
translation_model: typing.Optional[AsrPageRequestTranslationModel] = None
output_format: typing.Optional[AsrPageRequestOutputFormat] = None
diff --git a/src/gooey/types/asr_page_request_selected_model.py b/src/gooey/types/asr_page_request_selected_model.py
deleted file mode 100644
index 4e80d3c..0000000
--- a/src/gooey/types/asr_page_request_selected_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-AsrPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/create_stream_request.py b/src/gooey/types/create_stream_request.py
index 2d4745b..f412021 100644
--- a/src/gooey/types/create_stream_request.py
+++ b/src/gooey/types/create_stream_request.py
@@ -7,11 +7,11 @@
from .recipe_function import RecipeFunction
from .conversation_entry import ConversationEntry
from .large_language_models import LargeLanguageModels
-from .create_stream_request_embedding_model import CreateStreamRequestEmbeddingModel
+from .embedding_models import EmbeddingModels
from .create_stream_request_citation_style import CreateStreamRequestCitationStyle
-from .create_stream_request_asr_model import CreateStreamRequestAsrModel
+from .asr_models import AsrModels
from .create_stream_request_translation_model import CreateStreamRequestTranslationModel
-from .create_stream_request_lipsync_model import CreateStreamRequestLipsyncModel
+from .lipsync_models import LipsyncModels
from .llm_tools import LlmTools
from .create_stream_request_response_format_type import CreateStreamRequestResponseFormatType
from .create_stream_request_tts_provider import CreateStreamRequestTtsProvider
@@ -85,7 +85,7 @@ class CreateStreamRequest(UniversalBaseModel):
max_references: typing.Optional[int] = None
max_context_words: typing.Optional[int] = None
scroll_jump: typing.Optional[int] = None
- embedding_model: typing.Optional[CreateStreamRequestEmbeddingModel] = None
+ embedding_model: typing.Optional[EmbeddingModels] = None
dense_weight: typing.Optional[float] = pydantic.Field(default=None)
"""
Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
@@ -94,7 +94,7 @@ class CreateStreamRequest(UniversalBaseModel):
citation_style: typing.Optional[CreateStreamRequestCitationStyle] = None
use_url_shortener: typing.Optional[bool] = None
- asr_model: typing.Optional[CreateStreamRequestAsrModel] = pydantic.Field(default=None)
+ asr_model: typing.Optional[AsrModels] = pydantic.Field(default=None)
"""
Choose a model to transcribe incoming audio messages to text.
"""
@@ -120,7 +120,7 @@ class CreateStreamRequest(UniversalBaseModel):
Translation Glossary for LLM Language (English) -> User Langauge
"""
- lipsync_model: typing.Optional[CreateStreamRequestLipsyncModel] = None
+ lipsync_model: typing.Optional[LipsyncModels] = None
tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None)
"""
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
diff --git a/src/gooey/types/create_stream_request_lipsync_model.py b/src/gooey/types/create_stream_request_lipsync_model.py
deleted file mode 100644
index c207d45..0000000
--- a/src/gooey/types/create_stream_request_lipsync_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-CreateStreamRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/deforum_sd_page_request_selected_model.py b/src/gooey/types/deforum_sd_page_request_selected_model.py
deleted file mode 100644
index 3af657a..0000000
--- a/src/gooey/types/deforum_sd_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DeforumSdPageRequestSelectedModel = typing.Union[typing.Literal["protogen_2_2", "epicdream"], typing.Any]
diff --git a/src/gooey/types/doc_extract_page_request.py b/src/gooey/types/doc_extract_page_request.py
index 9690c6c..6182439 100644
--- a/src/gooey/types/doc_extract_page_request.py
+++ b/src/gooey/types/doc_extract_page_request.py
@@ -4,7 +4,7 @@
import typing
from .recipe_function import RecipeFunction
import pydantic
-from .doc_extract_page_request_selected_asr_model import DocExtractPageRequestSelectedAsrModel
+from .asr_models import AsrModels
from .large_language_models import LargeLanguageModels
from .doc_extract_page_request_response_format_type import DocExtractPageRequestResponseFormatType
from .run_settings import RunSettings
@@ -20,7 +20,7 @@ class DocExtractPageRequest(UniversalBaseModel):
documents: typing.List[str]
sheet_url: typing.Optional[str] = None
- selected_asr_model: typing.Optional[DocExtractPageRequestSelectedAsrModel] = None
+ selected_asr_model: typing.Optional[AsrModels] = None
google_translate_target: typing.Optional[str] = None
glossary_document: typing.Optional[str] = None
task_instructions: typing.Optional[str] = None
diff --git a/src/gooey/types/doc_extract_page_request_selected_asr_model.py b/src/gooey/types/doc_extract_page_request_selected_asr_model.py
deleted file mode 100644
index a358400..0000000
--- a/src/gooey/types/doc_extract_page_request_selected_asr_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocExtractPageRequestSelectedAsrModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/doc_search_page_request_embedding_model.py b/src/gooey/types/doc_search_page_request_embedding_model.py
deleted file mode 100644
index fb35612..0000000
--- a/src/gooey/types/doc_search_page_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSearchPageRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/doc_summary_page_request.py b/src/gooey/types/doc_summary_page_request.py
index 466ddc1..9bd8770 100644
--- a/src/gooey/types/doc_summary_page_request.py
+++ b/src/gooey/types/doc_summary_page_request.py
@@ -5,7 +5,7 @@
from .recipe_function import RecipeFunction
import pydantic
from .large_language_models import LargeLanguageModels
-from .doc_summary_page_request_selected_asr_model import DocSummaryPageRequestSelectedAsrModel
+from .asr_models import AsrModels
from .doc_summary_page_request_response_format_type import DocSummaryPageRequestResponseFormatType
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -23,7 +23,7 @@ class DocSummaryPageRequest(UniversalBaseModel):
merge_instructions: typing.Optional[str] = None
selected_model: typing.Optional[LargeLanguageModels] = None
chain_type: typing.Optional[typing.Literal["map_reduce"]] = None
- selected_asr_model: typing.Optional[DocSummaryPageRequestSelectedAsrModel] = None
+ selected_asr_model: typing.Optional[AsrModels] = None
google_translate_target: typing.Optional[str] = None
avoid_repetition: typing.Optional[bool] = None
num_outputs: typing.Optional[int] = None
diff --git a/src/gooey/types/doc_summary_page_request_selected_asr_model.py b/src/gooey/types/doc_summary_page_request_selected_asr_model.py
deleted file mode 100644
index c04cc7a..0000000
--- a/src/gooey/types/doc_summary_page_request_selected_asr_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSummaryPageRequestSelectedAsrModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/doc_summary_request_selected_asr_model.py b/src/gooey/types/doc_summary_request_selected_asr_model.py
deleted file mode 100644
index 8b8a338..0000000
--- a/src/gooey/types/doc_summary_request_selected_asr_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-DocSummaryRequestSelectedAsrModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/create_stream_request_embedding_model.py b/src/gooey/types/embedding_models.py
similarity index 87%
rename from src/gooey/types/create_stream_request_embedding_model.py
rename to src/gooey/types/embedding_models.py
index cef26bf..8007d2d 100644
--- a/src/gooey/types/create_stream_request_embedding_model.py
+++ b/src/gooey/types/embedding_models.py
@@ -2,7 +2,7 @@
import typing
-CreateStreamRequestEmbeddingModel = typing.Union[
+EmbeddingModels = typing.Union[
typing.Literal[
"openai_3_large",
"openai_3_small",
diff --git a/src/gooey/types/embeddings_page_request_selected_model.py b/src/gooey/types/embeddings_page_request_selected_model.py
deleted file mode 100644
index a03ecc8..0000000
--- a/src/gooey/types/embeddings_page_request_selected_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-EmbeddingsPageRequestSelectedModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/google_gpt_page_request_embedding_model.py b/src/gooey/types/google_gpt_page_request_embedding_model.py
deleted file mode 100644
index 66f060f..0000000
--- a/src/gooey/types/google_gpt_page_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-GoogleGptPageRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/lipsync_models.py b/src/gooey/types/lipsync_models.py
new file mode 100644
index 0000000..0ee41ee
--- /dev/null
+++ b/src/gooey/types/lipsync_models.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+LipsyncModels = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_page_request.py b/src/gooey/types/lipsync_page_request.py
index 2914a1e..5ea1d3f 100644
--- a/src/gooey/types/lipsync_page_request.py
+++ b/src/gooey/types/lipsync_page_request.py
@@ -5,7 +5,7 @@
from .recipe_function import RecipeFunction
import pydantic
from .sad_talker_settings import SadTalkerSettings
-from .lipsync_page_request_selected_model import LipsyncPageRequestSelectedModel
+from .lipsync_models import LipsyncModels
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -23,7 +23,7 @@ class LipsyncPageRequest(UniversalBaseModel):
face_padding_left: typing.Optional[int] = None
face_padding_right: typing.Optional[int] = None
sadtalker_settings: typing.Optional[SadTalkerSettings] = None
- selected_model: typing.Optional[LipsyncPageRequestSelectedModel] = None
+ selected_model: typing.Optional[LipsyncModels] = None
input_audio: typing.Optional[str] = None
settings: typing.Optional[RunSettings] = None
diff --git a/src/gooey/types/lipsync_page_request_selected_model.py b/src/gooey/types/lipsync_page_request_selected_model.py
deleted file mode 100644
index da68ef8..0000000
--- a/src/gooey/types/lipsync_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_request_selected_model.py b/src/gooey/types/lipsync_request_selected_model.py
deleted file mode 100644
index c5614b4..0000000
--- a/src/gooey/types/lipsync_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_page_request.py b/src/gooey/types/lipsync_tts_page_request.py
index f4f5293..f063d03 100644
--- a/src/gooey/types/lipsync_tts_page_request.py
+++ b/src/gooey/types/lipsync_tts_page_request.py
@@ -8,7 +8,7 @@
from .lipsync_tts_page_request_openai_voice_name import LipsyncTtsPageRequestOpenaiVoiceName
from .lipsync_tts_page_request_openai_tts_model import LipsyncTtsPageRequestOpenaiTtsModel
from .sad_talker_settings import SadTalkerSettings
-from .lipsync_tts_page_request_selected_model import LipsyncTtsPageRequestSelectedModel
+from .lipsync_models import LipsyncModels
from .run_settings import RunSettings
from ..core.pydantic_utilities import IS_PYDANTIC_V2
@@ -49,7 +49,7 @@ class LipsyncTtsPageRequest(UniversalBaseModel):
face_padding_left: typing.Optional[int] = None
face_padding_right: typing.Optional[int] = None
sadtalker_settings: typing.Optional[SadTalkerSettings] = None
- selected_model: typing.Optional[LipsyncTtsPageRequestSelectedModel] = None
+ selected_model: typing.Optional[LipsyncModels] = None
settings: typing.Optional[RunSettings] = None
if IS_PYDANTIC_V2:
diff --git a/src/gooey/types/lipsync_tts_page_request_selected_model.py b/src/gooey/types/lipsync_tts_page_request_selected_model.py
deleted file mode 100644
index 538058b..0000000
--- a/src/gooey/types/lipsync_tts_page_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncTtsPageRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/lipsync_tts_request_selected_model.py b/src/gooey/types/lipsync_tts_request_selected_model.py
deleted file mode 100644
index 9ece5a9..0000000
--- a/src/gooey/types/lipsync_tts_request_selected_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-LipsyncTtsRequestSelectedModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]
diff --git a/src/gooey/types/llm_tools.py b/src/gooey/types/llm_tools.py
index 30a07de..62edec8 100644
--- a/src/gooey/types/llm_tools.py
+++ b/src/gooey/types/llm_tools.py
@@ -2,4 +2,4 @@
import typing
-LlmTools = typing.Union[typing.Literal["json_to_pdf"], typing.Any]
+LlmTools = typing.Literal["json_to_pdf"]
diff --git a/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py b/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py
deleted file mode 100644
index 680bbb5..0000000
--- a/src/gooey/types/related_qn_a_doc_page_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnADocPageRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/related_qn_a_page_request_embedding_model.py b/src/gooey/types/related_qn_a_page_request_embedding_model.py
deleted file mode 100644
index a591920..0000000
--- a/src/gooey/types/related_qn_a_page_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-RelatedQnAPageRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/security_schemes.py b/src/gooey/types/security_schemes.py
deleted file mode 100644
index 8dc4491..0000000
--- a/src/gooey/types/security_schemes.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SecuritySchemes = typing.Optional[typing.Any]
diff --git a/src/gooey/types/serp_search_location.py b/src/gooey/types/serp_search_locations.py
similarity index 98%
rename from src/gooey/types/serp_search_location.py
rename to src/gooey/types/serp_search_locations.py
index 9b64ad9..2d5144d 100644
--- a/src/gooey/types/serp_search_location.py
+++ b/src/gooey/types/serp_search_locations.py
@@ -2,7 +2,7 @@
import typing
-SerpSearchLocation = typing.Union[
+SerpSearchLocations = typing.Union[
typing.Literal[
"af",
"al",
diff --git a/src/gooey/types/speech_recognition_request_selected_model.py b/src/gooey/types/speech_recognition_request_selected_model.py
deleted file mode 100644
index 9d2d28f..0000000
--- a/src/gooey/types/speech_recognition_request_selected_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SpeechRecognitionRequestSelectedModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/synthesize_data_request_selected_asr_model.py b/src/gooey/types/synthesize_data_request_selected_asr_model.py
deleted file mode 100644
index 6c1bc21..0000000
--- a/src/gooey/types/synthesize_data_request_selected_asr_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-SynthesizeDataRequestSelectedAsrModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/video_bots_page_request.py b/src/gooey/types/video_bots_page_request.py
index 6fb8b5e..bf1cb59 100644
--- a/src/gooey/types/video_bots_page_request.py
+++ b/src/gooey/types/video_bots_page_request.py
@@ -6,11 +6,11 @@
import pydantic
from .conversation_entry import ConversationEntry
from .large_language_models import LargeLanguageModels
-from .video_bots_page_request_embedding_model import VideoBotsPageRequestEmbeddingModel
+from .embedding_models import EmbeddingModels
from .video_bots_page_request_citation_style import VideoBotsPageRequestCitationStyle
-from .video_bots_page_request_asr_model import VideoBotsPageRequestAsrModel
+from .asr_models import AsrModels
from .video_bots_page_request_translation_model import VideoBotsPageRequestTranslationModel
-from .video_bots_page_request_lipsync_model import VideoBotsPageRequestLipsyncModel
+from .lipsync_models import LipsyncModels
from .llm_tools import LlmTools
from .video_bots_page_request_response_format_type import VideoBotsPageRequestResponseFormatType
from .video_bots_page_request_tts_provider import VideoBotsPageRequestTtsProvider
@@ -52,7 +52,7 @@ class VideoBotsPageRequest(UniversalBaseModel):
max_references: typing.Optional[int] = None
max_context_words: typing.Optional[int] = None
scroll_jump: typing.Optional[int] = None
- embedding_model: typing.Optional[VideoBotsPageRequestEmbeddingModel] = None
+ embedding_model: typing.Optional[EmbeddingModels] = None
dense_weight: typing.Optional[float] = pydantic.Field(default=None)
"""
Weightage for dense vs sparse embeddings. `0` for sparse, `1` for dense, `0.5` for equal weight.
@@ -61,7 +61,7 @@ class VideoBotsPageRequest(UniversalBaseModel):
citation_style: typing.Optional[VideoBotsPageRequestCitationStyle] = None
use_url_shortener: typing.Optional[bool] = None
- asr_model: typing.Optional[VideoBotsPageRequestAsrModel] = pydantic.Field(default=None)
+ asr_model: typing.Optional[AsrModels] = pydantic.Field(default=None)
"""
Choose a model to transcribe incoming audio messages to text.
"""
@@ -79,7 +79,7 @@ class VideoBotsPageRequest(UniversalBaseModel):
input_glossary_document: typing.Optional[str] = None
output_glossary_document: typing.Optional[str] = None
- lipsync_model: typing.Optional[VideoBotsPageRequestLipsyncModel] = None
+ lipsync_model: typing.Optional[LipsyncModels] = None
tools: typing.Optional[typing.List[LlmTools]] = pydantic.Field(default=None)
"""
Give your copilot superpowers by giving it access to tools. Powered by [Function calling](https://platform.openai.com/docs/guides/function-calling).
diff --git a/src/gooey/types/video_bots_page_request_asr_model.py b/src/gooey/types/video_bots_page_request_asr_model.py
deleted file mode 100644
index 7db13bc..0000000
--- a/src/gooey/types/video_bots_page_request_asr_model.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestAsrModel = typing.Union[
- typing.Literal[
- "whisper_large_v2",
- "whisper_large_v3",
- "whisper_hindi_large_v2",
- "whisper_telugu_large_v2",
- "nemo_english",
- "nemo_hindi",
- "vakyansh_bhojpuri",
- "gcp_v1",
- "usm",
- "deepgram",
- "azure",
- "seamless_m4t_v2",
- "mms_1b_all",
- "seamless_m4t",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/video_bots_page_request_embedding_model.py b/src/gooey/types/video_bots_page_request_embedding_model.py
deleted file mode 100644
index 19c8972..0000000
--- a/src/gooey/types/video_bots_page_request_embedding_model.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestEmbeddingModel = typing.Union[
- typing.Literal[
- "openai_3_large",
- "openai_3_small",
- "openai_ada_2",
- "e5_large_v2",
- "e5_base_v2",
- "multilingual_e5_base",
- "multilingual_e5_large",
- "gte_large",
- "gte_base",
- ],
- typing.Any,
-]
diff --git a/src/gooey/types/video_bots_page_request_lipsync_model.py b/src/gooey/types/video_bots_page_request_lipsync_model.py
deleted file mode 100644
index 3bb98e0..0000000
--- a/src/gooey/types/video_bots_page_request_lipsync_model.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-VideoBotsPageRequestLipsyncModel = typing.Union[typing.Literal["Wav2Lip", "SadTalker"], typing.Any]